From af75078fece3615088e561357c1e97603e43a5fe Mon Sep 17 00:00:00 2001 From: Intel Date: Tue, 4 Sep 2012 13:54:00 +0100 Subject: [PATCH] first public release version 1.2.3 Signed-off-by: Intel --- Makefile | 47 + app/Makefile | 42 + app/chkincs/Makefile | 96 + app/chkincs/test.c | 50 + app/chkincs/test.h | 90 + app/chkincs/test_alarm.c | 53 + app/chkincs/test_atomic.c | 93 + app/chkincs/test_branch_prediction.c | 58 + app/chkincs/test_byteorder.c | 84 + app/chkincs/test_common.c | 76 + app/chkincs/test_cpuflags.c | 53 + app/chkincs/test_cycles.c | 63 + app/chkincs/test_debug.c | 55 + app/chkincs/test_eal.c | 52 + app/chkincs/test_errno.c | 54 + app/chkincs/test_ethdev.c | 72 + app/chkincs/test_ether.c | 52 + app/chkincs/test_fbk_hash.c | 53 + app/chkincs/test_hash.c | 85 + app/chkincs/test_hash_crc.c | 52 + app/chkincs/test_interrupts.c | 53 + app/chkincs/test_ip.c | 53 + app/chkincs/test_jhash.c | 54 + app/chkincs/test_launch.c | 68 + app/chkincs/test_lcore.c | 66 + app/chkincs/test_log.c | 58 + app/chkincs/test_lpm.c | 64 + app/chkincs/test_malloc.c | 57 + app/chkincs/test_mbuf.c | 110 + app/chkincs/test_memcpy.c | 58 + app/chkincs/test_memory.c | 65 + app/chkincs/test_mempool.c | 111 + app/chkincs/test_memzone.c | 61 + app/chkincs/test_pci.c | 86 + app/chkincs/test_pci_dev_ids.c | 60 + app/chkincs/test_per_lcore.c | 57 + app/chkincs/test_prefetch.c | 58 + app/chkincs/test_random.c | 54 + app/chkincs/test_ring.c | 97 + app/chkincs/test_rwlock.c | 60 + app/chkincs/test_sctp.c | 52 + app/chkincs/test_spinlock.c | 59 + app/chkincs/test_string_fns.c | 52 + app/chkincs/test_tailq.c | 55 + app/chkincs/test_tcp.c | 52 + app/chkincs/test_timer.c | 74 + app/chkincs/test_udp.c | 52 + app/chkincs/test_version.c | 52 + app/dump_cfg/Makefile | 49 + app/dump_cfg/dump_cfg_main.c | 229 + app/test-pmd/Makefile | 63 + app/test-pmd/cmdline.c | 2180 ++ app/test-pmd/config.c | 1142 + app/test-pmd/csumonly.c | 449 + app/test-pmd/ieee1588fwd.c | 657 + app/test-pmd/iofwd.c | 131 + app/test-pmd/macfwd.c | 148 + app/test-pmd/parameters.c | 646 + app/test-pmd/rxonly.c | 194 + app/test-pmd/testpmd.c | 1105 + app/test-pmd/testpmd.h | 413 + app/test-pmd/txonly.c | 317 + app/test/Makefile | 82 + app/test/autotest.py | 664 + app/test/commands.c | 391 + app/test/graph_mempool.py | 193 + app/test/graph_ring.py | 201 + app/test/process.h | 89 + app/test/test.c | 153 + app/test/test.h | 85 + app/test/test_alarm.c | 258 + app/test/test_atomic.c | 381 + app/test/test_byteorder.c | 97 + app/test/test_cpuflags.c | 134 + app/test/test_cycles.c | 94 + app/test/test_debug.c | 150 + app/test/test_eal_flags.c | 303 + app/test/test_errno.c | 110 + app/test/test_hash.c | 1785 + app/test/test_interrupts.c | 419 + app/test/test_logs.c | 96 + app/test/test_lpm.c | 1365 + app/test/test_lpm_routes.h | 28947 ++++++++++++++++ app/test/test_malloc.c | 776 + app/test/test_mbuf.c | 875 + app/test/test_memcpy.c | 429 + app/test/test_memory.c | 92 + app/test/test_mempool.c | 707 + app/test/test_memzone.c | 639 + app/test/test_mp_secondary.c | 236 + app/test/test_pci.c | 192 + app/test/test_per_lcore.c | 142 + app/test/test_prefetch.c | 63 + app/test/test_ring.c | 987 + app/test/test_rwlock.c | 135 + app/test/test_spinlock.c | 318 + app/test/test_string_fns.c | 305 + app/test/test_tailq.c | 125 + app/test/test_timer.c | 363 + app/test/test_version.c | 59 + config/defconfig_i686-default-linuxapp-gcc | 240 + config/defconfig_i686-default-linuxapp-icc | 230 + config/defconfig_x86_64-default-linuxapp-gcc | 240 + config/defconfig_x86_64-default-linuxapp-icc | 230 + ...482246_CmdLine_Sample_App_Guide_Rev1.1.pdf | Bin 0 -> 53739 bytes examples/cmdline/Makefile | 52 + examples/cmdline/commands.c | 282 + examples/cmdline/commands.h | 41 + examples/cmdline/main.c | 100 + examples/cmdline/main.h | 47 + examples/cmdline/parse_obj_list.c | 164 + examples/cmdline/parse_obj_list.h | 113 + ...st_in_DPDK_Env_Sample_App_Guide_Rev1.0.pdf | Bin 0 -> 99638 bytes examples/dpdk_qat/Makefile | 81 + .../shumway_B0/dh89xxcc_qa_dev0.conf | 537 + .../shumway_B0/dh89xxcc_qa_dev1.conf | 537 + .../stargo_B0/dh89xxcc_qa_dev0.conf | 409 + examples/dpdk_qat/crypto.c | 921 + examples/dpdk_qat/crypto.h | 88 + examples/dpdk_qat/main.c | 857 + examples/dpdk_qat/main.h | 47 + ..._ExceptionPath_Sample_App_Guide_Rev1.1.pdf | Bin 0 -> 77734 bytes examples/exception_path/Makefile | 57 + examples/exception_path/main.c | 569 + ...249_HelloWorld_Sample_App_Guide_Rev1.1.pdf | Bin 0 -> 48353 bytes examples/helloworld/Makefile | 52 + examples/helloworld/main.c | 82 + examples/helloworld/main.h | 47 + ...0761_IPv4_Frag_Sample_App_Guide_Rev1.0.pdf | Bin 0 -> 67178 bytes examples/ipv4_frag/Makefile | 63 + examples/ipv4_frag/main.c | 707 + examples/ipv4_frag/main.h | 48 + examples/ipv4_frag/rte_ipv4_frag.h | 253 + ...IPv4_Multicast_Sample_App_Guide_Rev1.0.pdf | Bin 0 -> 109644 bytes examples/ipv4_multicast/Makefile | 63 + examples/ipv4_multicast/main.c | 834 + examples/ipv4_multicast/main.h | 48 + ...arding_VirtEnv_Sample_App_Guide_Rev1.0.pdf | Bin 0 -> 70953 bytes examples/l2fwd-vf/Makefile | 53 + examples/l2fwd-vf/main.c | 708 + examples/l2fwd-vf/main.h | 47 + ...0_L2Forwarding_Sample_App_Guide_Rev1.1.pdf | Bin 0 -> 80691 bytes examples/l2fwd/Makefile | 52 + examples/l2fwd/main.c | 745 + examples/l2fwd/main.h | 47 + ...arding_VirtEnv_Sample_App_Guide_Rev1.0.pdf | Bin 0 -> 52089 bytes examples/l3fwd-vf/Makefile | 58 + examples/l3fwd-vf/main.c | 1079 + examples/l3fwd-vf/main.h | 47 + ...1_L3Forwarding_Sample_App_Guide_Rev1.2.pdf | Bin 0 -> 61714 bytes examples/l3fwd/Makefile | 58 + examples/l3fwd/main.c | 1118 + examples/l3fwd/main.h | 47 + ...atus_Interrupt_Sample_App_Guide_Rev1.0.pdf | Bin 0 -> 70985 bytes examples/link_status_interrupt/Makefile | 52 + examples/link_status_interrupt/main.c | 792 + examples/link_status_interrupt/main.h | 47 + ...2_LoadBalancer_Sample_App_Guide_Rev1.1.pdf | Bin 0 -> 62413 bytes examples/load_balancer/Makefile | 58 + examples/load_balancer/config.c | 1058 + examples/load_balancer/init.c | 507 + examples/load_balancer/main.c | 112 + examples/load_balancer/main.h | 377 + examples/load_balancer/runtime.c | 669 + ..._Multi_Process_Sample_App_Guide_Rev1.3.pdf | Bin 0 -> 144607 bytes examples/multi_process/Makefile | 49 + .../multi_process/client_server_mp/Makefile | 49 + .../client_server_mp/mp_client/Makefile | 50 + .../client_server_mp/mp_client/client.c | 294 + .../client_server_mp/mp_server/Makefile | 63 + .../client_server_mp/mp_server/args.c | 175 + .../client_server_mp/mp_server/args.h | 41 + .../client_server_mp/mp_server/init.c | 304 + .../client_server_mp/mp_server/init.h | 74 + .../client_server_mp/mp_server/main.c | 330 + .../client_server_mp/mp_server/main.h | 50 + .../client_server_mp/shared/common.h | 89 + .../client_server_mp/shared/init_drivers.h | 58 + examples/multi_process/simple_mp/Makefile | 52 + examples/multi_process/simple_mp/main.c | 160 + .../multi_process/simple_mp/mp_commands.c | 169 + .../multi_process/simple_mp/mp_commands.h | 46 + examples/multi_process/symmetric_mp/Makefile | 52 + examples/multi_process/symmetric_mp/main.c | 471 + .../482254_Timer_Sample_App_Guide_Rev1.1.pdf | Bin 0 -> 52612 bytes examples/timer/Makefile | 58 + examples/timer/main.c | 156 + examples/timer/main.h | 47 + ...VMDQ_DCB_L2Fwd_Sample_App_Guide_Rev1.1.pdf | Bin 0 -> 71737 bytes examples/vmdq_dcb/Makefile | 59 + examples/vmdq_dcb/main.c | 331 + examples/vmdq_dcb/main.h | 48 + lib/Makefile | 51 + lib/librte_cmdline/Makefile | 65 + lib/librte_cmdline/cmdline.c | 240 + lib/librte_cmdline/cmdline.h | 94 + lib/librte_cmdline/cmdline_cirbuf.c | 434 + lib/librte_cmdline/cmdline_cirbuf.h | 248 + lib/librte_cmdline/cmdline_parse.c | 544 + lib/librte_cmdline/cmdline_parse.h | 188 + lib/librte_cmdline/cmdline_parse_etheraddr.c | 172 + lib/librte_cmdline/cmdline_parse_etheraddr.h | 102 + lib/librte_cmdline/cmdline_parse_ipaddr.c | 383 + lib/librte_cmdline/cmdline_parse_ipaddr.h | 194 + lib/librte_cmdline/cmdline_parse_num.c | 493 + lib/librte_cmdline/cmdline_parse_num.h | 119 + lib/librte_cmdline/cmdline_parse_portlist.c | 172 + lib/librte_cmdline/cmdline_parse_portlist.h | 113 + lib/librte_cmdline/cmdline_parse_string.c | 228 + lib/librte_cmdline/cmdline_parse_string.h | 113 + lib/librte_cmdline/cmdline_rdline.c | 675 + lib/librte_cmdline/cmdline_rdline.h | 260 + lib/librte_cmdline/cmdline_socket.c | 120 + lib/librte_cmdline/cmdline_socket.h | 78 + lib/librte_cmdline/cmdline_vt100.c | 182 + lib/librte_cmdline/cmdline_vt100.h | 153 + lib/librte_eal/Makefile | 41 + lib/librte_eal/common/Makefile | 56 + lib/librte_eal/common/eal_common_cpuflags.c | 265 + lib/librte_eal/common/eal_common_errno.c | 72 + lib/librte_eal/common/eal_common_launch.c | 122 + lib/librte_eal/common/eal_common_log.c | 390 + lib/librte_eal/common/eal_common_memory.c | 116 + lib/librte_eal/common/eal_common_memzone.c | 376 + lib/librte_eal/common/eal_common_pci.c | 145 + lib/librte_eal/common/eal_common_tailqs.c | 113 + lib/librte_eal/common/include/eal_private.h | 176 + .../common/include/i686/arch/rte_atomic.h | 959 + lib/librte_eal/common/include/rte_alarm.h | 100 + lib/librte_eal/common/include/rte_atomic.h | 657 + .../common/include/rte_branch_prediction.h | 72 + lib/librte_eal/common/include/rte_byteorder.h | 244 + lib/librte_eal/common/include/rte_common.h | 310 + lib/librte_eal/common/include/rte_cpuflags.h | 174 + lib/librte_eal/common/include/rte_cycles.h | 120 + lib/librte_eal/common/include/rte_debug.h | 96 + lib/librte_eal/common/include/rte_eal.h | 174 + lib/librte_eal/common/include/rte_errno.h | 98 + .../common/include/rte_interrupts.h | 123 + lib/librte_eal/common/include/rte_launch.h | 179 + lib/librte_eal/common/include/rte_lcore.h | 191 + lib/librte_eal/common/include/rte_log.h | 290 + lib/librte_eal/common/include/rte_memcpy.h | 355 + lib/librte_eal/common/include/rte_memory.h | 143 + lib/librte_eal/common/include/rte_memzone.h | 200 + lib/librte_eal/common/include/rte_pci.h | 197 + .../common/include/rte_pci_dev_ids.h | 205 + lib/librte_eal/common/include/rte_per_lcore.h | 81 + lib/librte_eal/common/include/rte_prefetch.h | 90 + lib/librte_eal/common/include/rte_random.h | 93 + lib/librte_eal/common/include/rte_rwlock.h | 174 + lib/librte_eal/common/include/rte_spinlock.h | 243 + .../common/include/rte_string_fns.h | 165 + lib/librte_eal/common/include/rte_tailq.h | 146 + lib/librte_eal/common/include/rte_version.h | 85 + lib/librte_eal/common/include/rte_warnings.h | 88 + .../common/include/x86_64/arch/rte_atomic.h | 943 + lib/librte_eal/linuxapp/Makefile | 39 + lib/librte_eal/linuxapp/eal/Makefile | 91 + lib/librte_eal/linuxapp/eal/eal.c | 620 + lib/librte_eal/linuxapp/eal/eal_alarm.c | 232 + lib/librte_eal/linuxapp/eal/eal_debug.c | 114 + lib/librte_eal/linuxapp/eal/eal_hpet.c | 232 + .../linuxapp/eal/eal_hugepage_info.c | 229 + lib/librte_eal/linuxapp/eal/eal_interrupts.c | 540 + lib/librte_eal/linuxapp/eal/eal_lcore.c | 192 + lib/librte_eal/linuxapp/eal/eal_log.c | 137 + lib/librte_eal/linuxapp/eal/eal_memory.c | 796 + lib/librte_eal/linuxapp/eal/eal_pci.c | 770 + lib/librte_eal/linuxapp/eal/eal_thread.c | 237 + .../linuxapp/eal/include/eal_fs_paths.h | 96 + .../linuxapp/eal/include/eal_hugepages.h | 62 + .../linuxapp/eal/include/eal_internal_cfg.h | 76 + .../linuxapp/eal/include/eal_thread.h | 55 + .../eal/include/exec-env/rte_interrupts.h | 56 + .../linuxapp/eal/include/exec-env/rte_lcore.h | 92 + .../eal/include/exec-env/rte_per_lcore.h | 69 + lib/librte_eal/linuxapp/igb_uio/Makefile | 55 + lib/librte_eal/linuxapp/igb_uio/igb_uio.c | 402 + lib/librte_ether/Makefile | 55 + lib/librte_ether/rte_ethdev.c | 1381 + lib/librte_ether/rte_ethdev.h | 1809 + lib/librte_ether/rte_ether.h | 256 + lib/librte_hash/Makefile | 55 + lib/librte_hash/rte_fbk_hash.c | 210 + lib/librte_hash/rte_fbk_hash.h | 334 + lib/librte_hash/rte_hash.c | 407 + lib/librte_hash/rte_hash.h | 236 + lib/librte_hash/rte_hash_crc.h | 114 + lib/librte_hash/rte_jhash.h | 263 + lib/librte_lpm/Makefile | 51 + lib/librte_lpm/rte_lpm.c | 971 + lib/librte_lpm/rte_lpm.h | 288 + lib/librte_malloc/Makefile | 50 + lib/librte_malloc/malloc_elem.c | 280 + lib/librte_malloc/malloc_elem.h | 177 + lib/librte_malloc/malloc_heap.c | 181 + lib/librte_malloc/malloc_heap.h | 68 + lib/librte_malloc/rte_malloc.c | 166 + lib/librte_malloc/rte_malloc.h | 212 + lib/librte_mbuf/Makefile | 50 + lib/librte_mbuf/rte_mbuf.c | 252 + lib/librte_mbuf/rte_mbuf.h | 1019 + lib/librte_mempool/Makefile | 50 + lib/librte_mempool/rte_mempool.c | 491 + lib/librte_mempool/rte_mempool.h | 1087 + lib/librte_net/Makefile | 42 + lib/librte_net/rte_ip.h | 255 + lib/librte_net/rte_sctp.h | 101 + lib/librte_net/rte_tcp.h | 106 + lib/librte_net/rte_udp.h | 101 + lib/librte_pmd_igb/Makefile | 64 + lib/librte_pmd_igb/e1000_ethdev.c | 1319 + lib/librte_pmd_igb/e1000_ethdev.h | 117 + lib/librte_pmd_igb/e1000_logs.h | 74 + lib/librte_pmd_igb/e1000_rxtx.c | 1859 + lib/librte_pmd_igb/igb/README | 74 + lib/librte_pmd_igb/igb/e1000_82575.c | 2429 ++ lib/librte_pmd_igb/igb/e1000_82575.h | 487 + lib/librte_pmd_igb/igb/e1000_api.c | 1152 + lib/librte_pmd_igb/igb/e1000_api.h | 156 + lib/librte_pmd_igb/igb/e1000_defines.h | 1733 + lib/librte_pmd_igb/igb/e1000_hw.h | 767 + lib/librte_pmd_igb/igb/e1000_mac.c | 2170 ++ lib/librte_pmd_igb/igb/e1000_mac.h | 95 + lib/librte_pmd_igb/igb/e1000_manage.c | 472 + lib/librte_pmd_igb/igb/e1000_manage.h | 90 + lib/librte_pmd_igb/igb/e1000_mbx.c | 764 + lib/librte_pmd_igb/igb/e1000_mbx.h | 106 + lib/librte_pmd_igb/igb/e1000_nvm.c | 1071 + lib/librte_pmd_igb/igb/e1000_nvm.h | 66 + lib/librte_pmd_igb/igb/e1000_osdep.c | 72 + lib/librte_pmd_igb/igb/e1000_osdep.h | 128 + lib/librte_pmd_igb/igb/e1000_phy.c | 2988 ++ lib/librte_pmd_igb/igb/e1000_phy.h | 217 + lib/librte_pmd_igb/igb/e1000_regs.h | 574 + lib/librte_pmd_igb/igb/e1000_vf.c | 574 + lib/librte_pmd_igb/igb/e1000_vf.h | 294 + lib/librte_pmd_igb/igb/if_igb.c | 5567 +++ lib/librte_pmd_igb/igb/if_igb.h | 541 + lib/librte_pmd_ixgbe/Makefile | 65 + lib/librte_pmd_ixgbe/ixgbe/README | 70 + lib/librte_pmd_ixgbe/ixgbe/ixgbe.c | 5442 +++ lib/librte_pmd_ixgbe/ixgbe/ixgbe.h | 521 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c | 1402 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c | 2281 ++ lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c | 1130 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h | 168 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c | 4049 +++ lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h | 135 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c | 751 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h | 112 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h | 145 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c | 1843 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h | 141 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h | 3138 ++ lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c | 524 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h | 113 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c | 989 + lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h | 42 + lib/librte_pmd_ixgbe/ixgbe/ixv.c | 4010 +++ lib/librte_pmd_ixgbe/ixgbe/ixv.h | 430 + lib/librte_pmd_ixgbe/ixgbe_ethdev.c | 1609 + lib/librte_pmd_ixgbe/ixgbe_ethdev.h | 176 + lib/librte_pmd_ixgbe/ixgbe_fdir.c | 891 + lib/librte_pmd_ixgbe/ixgbe_logs.h | 76 + lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 2445 ++ lib/librte_ring/Makefile | 50 + lib/librte_ring/rte_ring.c | 283 + lib/librte_ring/rte_ring.h | 830 + lib/librte_timer/Makefile | 50 + lib/librte_timer/rte_timer.c | 506 + lib/librte_timer/rte_timer.h | 332 + mk/arch/i686/rte.vars.mk | 59 + mk/arch/x86_64/rte.vars.mk | 59 + mk/exec-env/linuxapp/rte.app.mk | 38 + mk/exec-env/linuxapp/rte.vars.mk | 52 + mk/internal/rte.build-post.mk | 64 + mk/internal/rte.build-pre.mk | 34 + mk/internal/rte.clean-post.mk | 64 + mk/internal/rte.clean-pre.mk | 34 + mk/internal/rte.compile-post.mk | 35 + mk/internal/rte.compile-pre.mk | 178 + mk/internal/rte.depdirs-post.mk | 44 + mk/internal/rte.depdirs-pre.mk | 34 + mk/internal/rte.exthelp-post.mk | 41 + mk/internal/rte.install-post.mk | 101 + mk/internal/rte.install-pre.mk | 62 + mk/machine/atm/rte.vars.mk | 61 + mk/machine/default/rte.vars.mk | 61 + mk/machine/ivb/rte.vars.mk | 61 + mk/machine/native/rte.vars.mk | 111 + mk/machine/nhm/rte.vars.mk | 61 + mk/machine/snb/rte.vars.mk | 61 + mk/machine/wsm/rte.vars.mk | 61 + mk/rte.app.mk | 236 + mk/rte.doc.mk | 127 + mk/rte.extapp.mk | 56 + mk/rte.extlib.mk | 56 + mk/rte.extobj.mk | 56 + mk/rte.extvars.mk | 83 + mk/rte.gnuconfigure.mk | 76 + mk/rte.hostapp.mk | 125 + mk/rte.hostlib.mk | 118 + mk/rte.install.mk | 60 + mk/rte.lib.mk | 116 + mk/rte.module.mk | 117 + mk/rte.obj.mk | 114 + mk/rte.sdkbuild.mk | 102 + mk/rte.sdkconfig.mk | 109 + mk/rte.sdkdepdirs.mk | 65 + mk/rte.sdkdoc.mk | 73 + mk/rte.sdkgcov.mk | 69 + mk/rte.sdkinstall.mk | 76 + mk/rte.sdkroot.mk | 158 + mk/rte.sdktest.mk | 66 + mk/rte.sdktestall.mk | 65 + mk/rte.subdir.mk | 114 + mk/rte.vars.mk | 125 + mk/target/generic/rte.app.mk | 43 + mk/target/generic/rte.vars.mk | 150 + mk/toolchain/gcc/rte.toolchain-compat.mk | 93 + mk/toolchain/gcc/rte.vars.mk | 87 + mk/toolchain/icc/rte.toolchain-compat.mk | 82 + mk/toolchain/icc/rte.vars.mk | 98 + scripts/Makefile | 38 + scripts/depdirs-rule.sh | 97 + scripts/gen-build-mk.sh | 55 + scripts/gen-config-h.sh | 41 + scripts/import_autotest.sh | 87 + scripts/relpath.sh | 100 + scripts/test-framework.sh | 133 + scripts/testhost/Makefile | 50 + scripts/testhost/testhost.c | 57 + tools/setup.sh | 420 + 435 files changed, 169022 insertions(+) create mode 100644 Makefile create mode 100644 app/Makefile create mode 100644 app/chkincs/Makefile create mode 100644 app/chkincs/test.c create mode 100644 app/chkincs/test.h create mode 100644 app/chkincs/test_alarm.c create mode 100644 app/chkincs/test_atomic.c create mode 100644 app/chkincs/test_branch_prediction.c create mode 100644 app/chkincs/test_byteorder.c create mode 100644 app/chkincs/test_common.c create mode 100644 app/chkincs/test_cpuflags.c create mode 100644 app/chkincs/test_cycles.c create mode 100644 app/chkincs/test_debug.c create mode 100644 app/chkincs/test_eal.c create mode 100644 app/chkincs/test_errno.c create mode 100644 app/chkincs/test_ethdev.c create mode 100644 app/chkincs/test_ether.c create mode 100644 app/chkincs/test_fbk_hash.c create mode 100644 app/chkincs/test_hash.c create mode 100644 app/chkincs/test_hash_crc.c create mode 100644 app/chkincs/test_interrupts.c create mode 100644 app/chkincs/test_ip.c create mode 100644 app/chkincs/test_jhash.c create mode 100644 app/chkincs/test_launch.c create mode 100644 app/chkincs/test_lcore.c create mode 100644 app/chkincs/test_log.c create mode 100644 app/chkincs/test_lpm.c create mode 100644 app/chkincs/test_malloc.c create mode 100644 app/chkincs/test_mbuf.c create mode 100644 app/chkincs/test_memcpy.c create mode 100644 app/chkincs/test_memory.c create mode 100644 app/chkincs/test_mempool.c create mode 100644 app/chkincs/test_memzone.c create mode 100644 app/chkincs/test_pci.c create mode 100644 app/chkincs/test_pci_dev_ids.c create mode 100644 app/chkincs/test_per_lcore.c create mode 100644 app/chkincs/test_prefetch.c create mode 100644 app/chkincs/test_random.c create mode 100644 app/chkincs/test_ring.c create mode 100644 app/chkincs/test_rwlock.c create mode 100644 app/chkincs/test_sctp.c create mode 100644 app/chkincs/test_spinlock.c create mode 100644 app/chkincs/test_string_fns.c create mode 100644 app/chkincs/test_tailq.c create mode 100644 app/chkincs/test_tcp.c create mode 100644 app/chkincs/test_timer.c create mode 100644 app/chkincs/test_udp.c create mode 100644 app/chkincs/test_version.c create mode 100644 app/dump_cfg/Makefile create mode 100644 app/dump_cfg/dump_cfg_main.c create mode 100644 app/test-pmd/Makefile create mode 100644 app/test-pmd/cmdline.c create mode 100644 app/test-pmd/config.c create mode 100644 app/test-pmd/csumonly.c create mode 100644 app/test-pmd/ieee1588fwd.c create mode 100644 app/test-pmd/iofwd.c create mode 100644 app/test-pmd/macfwd.c create mode 100644 app/test-pmd/parameters.c create mode 100644 app/test-pmd/rxonly.c create mode 100644 app/test-pmd/testpmd.c create mode 100644 app/test-pmd/testpmd.h create mode 100644 app/test-pmd/txonly.c create mode 100644 app/test/Makefile create mode 100755 app/test/autotest.py create mode 100644 app/test/commands.c create mode 100755 app/test/graph_mempool.py create mode 100755 app/test/graph_ring.py create mode 100644 app/test/process.h create mode 100644 app/test/test.c create mode 100644 app/test/test.h create mode 100644 app/test/test_alarm.c create mode 100644 app/test/test_atomic.c create mode 100644 app/test/test_byteorder.c create mode 100644 app/test/test_cpuflags.c create mode 100644 app/test/test_cycles.c create mode 100644 app/test/test_debug.c create mode 100644 app/test/test_eal_flags.c create mode 100644 app/test/test_errno.c create mode 100644 app/test/test_hash.c create mode 100644 app/test/test_interrupts.c create mode 100644 app/test/test_logs.c create mode 100644 app/test/test_lpm.c create mode 100644 app/test/test_lpm_routes.h create mode 100644 app/test/test_malloc.c create mode 100644 app/test/test_mbuf.c create mode 100644 app/test/test_memcpy.c create mode 100644 app/test/test_memory.c create mode 100644 app/test/test_mempool.c create mode 100644 app/test/test_memzone.c create mode 100644 app/test/test_mp_secondary.c create mode 100644 app/test/test_pci.c create mode 100644 app/test/test_per_lcore.c create mode 100644 app/test/test_prefetch.c create mode 100644 app/test/test_ring.c create mode 100644 app/test/test_rwlock.c create mode 100644 app/test/test_spinlock.c create mode 100644 app/test/test_string_fns.c create mode 100644 app/test/test_tailq.c create mode 100644 app/test/test_timer.c create mode 100644 app/test/test_version.c create mode 100644 config/defconfig_i686-default-linuxapp-gcc create mode 100644 config/defconfig_i686-default-linuxapp-icc create mode 100644 config/defconfig_x86_64-default-linuxapp-gcc create mode 100644 config/defconfig_x86_64-default-linuxapp-icc create mode 100644 examples/cmdline/482246_CmdLine_Sample_App_Guide_Rev1.1.pdf create mode 100644 examples/cmdline/Makefile create mode 100644 examples/cmdline/commands.c create mode 100644 examples/cmdline/commands.h create mode 100644 examples/cmdline/main.c create mode 100644 examples/cmdline/main.h create mode 100644 examples/cmdline/parse_obj_list.c create mode 100644 examples/cmdline/parse_obj_list.h create mode 100644 examples/dpdk_qat/497691_QuickAssist_in_DPDK_Env_Sample_App_Guide_Rev1.0.pdf create mode 100644 examples/dpdk_qat/Makefile create mode 100644 examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev0.conf create mode 100644 examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev1.conf create mode 100644 examples/dpdk_qat/config_files/stargo_B0/dh89xxcc_qa_dev0.conf create mode 100644 examples/dpdk_qat/crypto.c create mode 100644 examples/dpdk_qat/crypto.h create mode 100644 examples/dpdk_qat/main.c create mode 100644 examples/dpdk_qat/main.h create mode 100644 examples/exception_path/482248_ExceptionPath_Sample_App_Guide_Rev1.1.pdf create mode 100644 examples/exception_path/Makefile create mode 100644 examples/exception_path/main.c create mode 100644 examples/helloworld/482249_HelloWorld_Sample_App_Guide_Rev1.1.pdf create mode 100644 examples/helloworld/Makefile create mode 100644 examples/helloworld/main.c create mode 100644 examples/helloworld/main.h create mode 100644 examples/ipv4_frag/490761_IPv4_Frag_Sample_App_Guide_Rev1.0.pdf create mode 100644 examples/ipv4_frag/Makefile create mode 100644 examples/ipv4_frag/main.c create mode 100644 examples/ipv4_frag/main.h create mode 100644 examples/ipv4_frag/rte_ipv4_frag.h create mode 100644 examples/ipv4_multicast/496632_IPv4_Multicast_Sample_App_Guide_Rev1.0.pdf create mode 100644 examples/ipv4_multicast/Makefile create mode 100644 examples/ipv4_multicast/main.c create mode 100644 examples/ipv4_multicast/main.h create mode 100644 examples/l2fwd-vf/496039_L2Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf create mode 100644 examples/l2fwd-vf/Makefile create mode 100644 examples/l2fwd-vf/main.c create mode 100644 examples/l2fwd-vf/main.h create mode 100644 examples/l2fwd/482250_L2Forwarding_Sample_App_Guide_Rev1.1.pdf create mode 100644 examples/l2fwd/Makefile create mode 100644 examples/l2fwd/main.c create mode 100644 examples/l2fwd/main.h create mode 100644 examples/l3fwd-vf/496040_L3Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf create mode 100644 examples/l3fwd-vf/Makefile create mode 100644 examples/l3fwd-vf/main.c create mode 100644 examples/l3fwd-vf/main.h create mode 100644 examples/l3fwd/482251_L3Forwarding_Sample_App_Guide_Rev1.2.pdf create mode 100644 examples/l3fwd/Makefile create mode 100644 examples/l3fwd/main.c create mode 100644 examples/l3fwd/main.h create mode 100644 examples/link_status_interrupt/495672_Link_Status_Interrupt_Sample_App_Guide_Rev1.0.pdf create mode 100644 examples/link_status_interrupt/Makefile create mode 100644 examples/link_status_interrupt/main.c create mode 100644 examples/link_status_interrupt/main.h create mode 100644 examples/load_balancer/482252_LoadBalancer_Sample_App_Guide_Rev1.1.pdf create mode 100644 examples/load_balancer/Makefile create mode 100644 examples/load_balancer/config.c create mode 100644 examples/load_balancer/init.c create mode 100644 examples/load_balancer/main.c create mode 100644 examples/load_balancer/main.h create mode 100644 examples/load_balancer/runtime.c create mode 100644 examples/multi_process/482253_Multi_Process_Sample_App_Guide_Rev1.3.pdf create mode 100644 examples/multi_process/Makefile create mode 100644 examples/multi_process/client_server_mp/Makefile create mode 100644 examples/multi_process/client_server_mp/mp_client/Makefile create mode 100644 examples/multi_process/client_server_mp/mp_client/client.c create mode 100644 examples/multi_process/client_server_mp/mp_server/Makefile create mode 100644 examples/multi_process/client_server_mp/mp_server/args.c create mode 100644 examples/multi_process/client_server_mp/mp_server/args.h create mode 100644 examples/multi_process/client_server_mp/mp_server/init.c create mode 100644 examples/multi_process/client_server_mp/mp_server/init.h create mode 100644 examples/multi_process/client_server_mp/mp_server/main.c create mode 100644 examples/multi_process/client_server_mp/mp_server/main.h create mode 100644 examples/multi_process/client_server_mp/shared/common.h create mode 100644 examples/multi_process/client_server_mp/shared/init_drivers.h create mode 100644 examples/multi_process/simple_mp/Makefile create mode 100644 examples/multi_process/simple_mp/main.c create mode 100644 examples/multi_process/simple_mp/mp_commands.c create mode 100644 examples/multi_process/simple_mp/mp_commands.h create mode 100644 examples/multi_process/symmetric_mp/Makefile create mode 100644 examples/multi_process/symmetric_mp/main.c create mode 100644 examples/timer/482254_Timer_Sample_App_Guide_Rev1.1.pdf create mode 100644 examples/timer/Makefile create mode 100644 examples/timer/main.c create mode 100644 examples/timer/main.h create mode 100644 examples/vmdq_dcb/482255_VMDQ_DCB_L2Fwd_Sample_App_Guide_Rev1.1.pdf create mode 100644 examples/vmdq_dcb/Makefile create mode 100644 examples/vmdq_dcb/main.c create mode 100644 examples/vmdq_dcb/main.h create mode 100644 lib/Makefile create mode 100644 lib/librte_cmdline/Makefile create mode 100644 lib/librte_cmdline/cmdline.c create mode 100644 lib/librte_cmdline/cmdline.h create mode 100644 lib/librte_cmdline/cmdline_cirbuf.c create mode 100644 lib/librte_cmdline/cmdline_cirbuf.h create mode 100644 lib/librte_cmdline/cmdline_parse.c create mode 100644 lib/librte_cmdline/cmdline_parse.h create mode 100644 lib/librte_cmdline/cmdline_parse_etheraddr.c create mode 100644 lib/librte_cmdline/cmdline_parse_etheraddr.h create mode 100644 lib/librte_cmdline/cmdline_parse_ipaddr.c create mode 100644 lib/librte_cmdline/cmdline_parse_ipaddr.h create mode 100644 lib/librte_cmdline/cmdline_parse_num.c create mode 100644 lib/librte_cmdline/cmdline_parse_num.h create mode 100644 lib/librte_cmdline/cmdline_parse_portlist.c create mode 100644 lib/librte_cmdline/cmdline_parse_portlist.h create mode 100644 lib/librte_cmdline/cmdline_parse_string.c create mode 100644 lib/librte_cmdline/cmdline_parse_string.h create mode 100644 lib/librte_cmdline/cmdline_rdline.c create mode 100644 lib/librte_cmdline/cmdline_rdline.h create mode 100644 lib/librte_cmdline/cmdline_socket.c create mode 100644 lib/librte_cmdline/cmdline_socket.h create mode 100644 lib/librte_cmdline/cmdline_vt100.c create mode 100644 lib/librte_cmdline/cmdline_vt100.h create mode 100644 lib/librte_eal/Makefile create mode 100644 lib/librte_eal/common/Makefile create mode 100644 lib/librte_eal/common/eal_common_cpuflags.c create mode 100644 lib/librte_eal/common/eal_common_errno.c create mode 100644 lib/librte_eal/common/eal_common_launch.c create mode 100644 lib/librte_eal/common/eal_common_log.c create mode 100644 lib/librte_eal/common/eal_common_memory.c create mode 100644 lib/librte_eal/common/eal_common_memzone.c create mode 100644 lib/librte_eal/common/eal_common_pci.c create mode 100644 lib/librte_eal/common/eal_common_tailqs.c create mode 100644 lib/librte_eal/common/include/eal_private.h create mode 100644 lib/librte_eal/common/include/i686/arch/rte_atomic.h create mode 100644 lib/librte_eal/common/include/rte_alarm.h create mode 100644 lib/librte_eal/common/include/rte_atomic.h create mode 100644 lib/librte_eal/common/include/rte_branch_prediction.h create mode 100644 lib/librte_eal/common/include/rte_byteorder.h create mode 100644 lib/librte_eal/common/include/rte_common.h create mode 100644 lib/librte_eal/common/include/rte_cpuflags.h create mode 100644 lib/librte_eal/common/include/rte_cycles.h create mode 100644 lib/librte_eal/common/include/rte_debug.h create mode 100644 lib/librte_eal/common/include/rte_eal.h create mode 100644 lib/librte_eal/common/include/rte_errno.h create mode 100644 lib/librte_eal/common/include/rte_interrupts.h create mode 100644 lib/librte_eal/common/include/rte_launch.h create mode 100644 lib/librte_eal/common/include/rte_lcore.h create mode 100644 lib/librte_eal/common/include/rte_log.h create mode 100644 lib/librte_eal/common/include/rte_memcpy.h create mode 100644 lib/librte_eal/common/include/rte_memory.h create mode 100644 lib/librte_eal/common/include/rte_memzone.h create mode 100644 lib/librte_eal/common/include/rte_pci.h create mode 100644 lib/librte_eal/common/include/rte_pci_dev_ids.h create mode 100644 lib/librte_eal/common/include/rte_per_lcore.h create mode 100644 lib/librte_eal/common/include/rte_prefetch.h create mode 100644 lib/librte_eal/common/include/rte_random.h create mode 100644 lib/librte_eal/common/include/rte_rwlock.h create mode 100644 lib/librte_eal/common/include/rte_spinlock.h create mode 100644 lib/librte_eal/common/include/rte_string_fns.h create mode 100644 lib/librte_eal/common/include/rte_tailq.h create mode 100644 lib/librte_eal/common/include/rte_version.h create mode 100644 lib/librte_eal/common/include/rte_warnings.h create mode 100644 lib/librte_eal/common/include/x86_64/arch/rte_atomic.h create mode 100644 lib/librte_eal/linuxapp/Makefile create mode 100644 lib/librte_eal/linuxapp/eal/Makefile create mode 100644 lib/librte_eal/linuxapp/eal/eal.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_alarm.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_debug.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_hpet.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_hugepage_info.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_interrupts.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_lcore.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_log.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_memory.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_pci.c create mode 100644 lib/librte_eal/linuxapp/eal/eal_thread.c create mode 100644 lib/librte_eal/linuxapp/eal/include/eal_fs_paths.h create mode 100644 lib/librte_eal/linuxapp/eal/include/eal_hugepages.h create mode 100644 lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h create mode 100644 lib/librte_eal/linuxapp/eal/include/eal_thread.h create mode 100644 lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h create mode 100644 lib/librte_eal/linuxapp/eal/include/exec-env/rte_lcore.h create mode 100644 lib/librte_eal/linuxapp/eal/include/exec-env/rte_per_lcore.h create mode 100644 lib/librte_eal/linuxapp/igb_uio/Makefile create mode 100644 lib/librte_eal/linuxapp/igb_uio/igb_uio.c create mode 100644 lib/librte_ether/Makefile create mode 100644 lib/librte_ether/rte_ethdev.c create mode 100644 lib/librte_ether/rte_ethdev.h create mode 100644 lib/librte_ether/rte_ether.h create mode 100644 lib/librte_hash/Makefile create mode 100644 lib/librte_hash/rte_fbk_hash.c create mode 100644 lib/librte_hash/rte_fbk_hash.h create mode 100644 lib/librte_hash/rte_hash.c create mode 100644 lib/librte_hash/rte_hash.h create mode 100644 lib/librte_hash/rte_hash_crc.h create mode 100644 lib/librte_hash/rte_jhash.h create mode 100644 lib/librte_lpm/Makefile create mode 100644 lib/librte_lpm/rte_lpm.c create mode 100644 lib/librte_lpm/rte_lpm.h create mode 100644 lib/librte_malloc/Makefile create mode 100644 lib/librte_malloc/malloc_elem.c create mode 100644 lib/librte_malloc/malloc_elem.h create mode 100644 lib/librte_malloc/malloc_heap.c create mode 100644 lib/librte_malloc/malloc_heap.h create mode 100644 lib/librte_malloc/rte_malloc.c create mode 100644 lib/librte_malloc/rte_malloc.h create mode 100644 lib/librte_mbuf/Makefile create mode 100644 lib/librte_mbuf/rte_mbuf.c create mode 100644 lib/librte_mbuf/rte_mbuf.h create mode 100644 lib/librte_mempool/Makefile create mode 100644 lib/librte_mempool/rte_mempool.c create mode 100644 lib/librte_mempool/rte_mempool.h create mode 100644 lib/librte_net/Makefile create mode 100644 lib/librte_net/rte_ip.h create mode 100644 lib/librte_net/rte_sctp.h create mode 100644 lib/librte_net/rte_tcp.h create mode 100644 lib/librte_net/rte_udp.h create mode 100644 lib/librte_pmd_igb/Makefile create mode 100644 lib/librte_pmd_igb/e1000_ethdev.c create mode 100644 lib/librte_pmd_igb/e1000_ethdev.h create mode 100644 lib/librte_pmd_igb/e1000_logs.h create mode 100644 lib/librte_pmd_igb/e1000_rxtx.c create mode 100644 lib/librte_pmd_igb/igb/README create mode 100644 lib/librte_pmd_igb/igb/e1000_82575.c create mode 100644 lib/librte_pmd_igb/igb/e1000_82575.h create mode 100644 lib/librte_pmd_igb/igb/e1000_api.c create mode 100644 lib/librte_pmd_igb/igb/e1000_api.h create mode 100644 lib/librte_pmd_igb/igb/e1000_defines.h create mode 100644 lib/librte_pmd_igb/igb/e1000_hw.h create mode 100644 lib/librte_pmd_igb/igb/e1000_mac.c create mode 100644 lib/librte_pmd_igb/igb/e1000_mac.h create mode 100644 lib/librte_pmd_igb/igb/e1000_manage.c create mode 100644 lib/librte_pmd_igb/igb/e1000_manage.h create mode 100644 lib/librte_pmd_igb/igb/e1000_mbx.c create mode 100644 lib/librte_pmd_igb/igb/e1000_mbx.h create mode 100644 lib/librte_pmd_igb/igb/e1000_nvm.c create mode 100644 lib/librte_pmd_igb/igb/e1000_nvm.h create mode 100644 lib/librte_pmd_igb/igb/e1000_osdep.c create mode 100644 lib/librte_pmd_igb/igb/e1000_osdep.h create mode 100644 lib/librte_pmd_igb/igb/e1000_phy.c create mode 100644 lib/librte_pmd_igb/igb/e1000_phy.h create mode 100644 lib/librte_pmd_igb/igb/e1000_regs.h create mode 100644 lib/librte_pmd_igb/igb/e1000_vf.c create mode 100644 lib/librte_pmd_igb/igb/e1000_vf.h create mode 100644 lib/librte_pmd_igb/igb/if_igb.c create mode 100644 lib/librte_pmd_igb/igb/if_igb.h create mode 100644 lib/librte_pmd_ixgbe/Makefile create mode 100644 lib/librte_pmd_ixgbe/ixgbe/README create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixv.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe/ixv.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe_ethdev.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe_ethdev.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe_fdir.c create mode 100644 lib/librte_pmd_ixgbe/ixgbe_logs.h create mode 100644 lib/librte_pmd_ixgbe/ixgbe_rxtx.c create mode 100644 lib/librte_ring/Makefile create mode 100644 lib/librte_ring/rte_ring.c create mode 100644 lib/librte_ring/rte_ring.h create mode 100644 lib/librte_timer/Makefile create mode 100644 lib/librte_timer/rte_timer.c create mode 100644 lib/librte_timer/rte_timer.h create mode 100644 mk/arch/i686/rte.vars.mk create mode 100644 mk/arch/x86_64/rte.vars.mk create mode 100644 mk/exec-env/linuxapp/rte.app.mk create mode 100644 mk/exec-env/linuxapp/rte.vars.mk create mode 100644 mk/internal/rte.build-post.mk create mode 100644 mk/internal/rte.build-pre.mk create mode 100644 mk/internal/rte.clean-post.mk create mode 100644 mk/internal/rte.clean-pre.mk create mode 100644 mk/internal/rte.compile-post.mk create mode 100644 mk/internal/rte.compile-pre.mk create mode 100644 mk/internal/rte.depdirs-post.mk create mode 100644 mk/internal/rte.depdirs-pre.mk create mode 100644 mk/internal/rte.exthelp-post.mk create mode 100644 mk/internal/rte.install-post.mk create mode 100644 mk/internal/rte.install-pre.mk create mode 100644 mk/machine/atm/rte.vars.mk create mode 100644 mk/machine/default/rte.vars.mk create mode 100644 mk/machine/ivb/rte.vars.mk create mode 100644 mk/machine/native/rte.vars.mk create mode 100644 mk/machine/nhm/rte.vars.mk create mode 100644 mk/machine/snb/rte.vars.mk create mode 100644 mk/machine/wsm/rte.vars.mk create mode 100644 mk/rte.app.mk create mode 100644 mk/rte.doc.mk create mode 100644 mk/rte.extapp.mk create mode 100644 mk/rte.extlib.mk create mode 100644 mk/rte.extobj.mk create mode 100644 mk/rte.extvars.mk create mode 100644 mk/rte.gnuconfigure.mk create mode 100644 mk/rte.hostapp.mk create mode 100644 mk/rte.hostlib.mk create mode 100644 mk/rte.install.mk create mode 100644 mk/rte.lib.mk create mode 100644 mk/rte.module.mk create mode 100644 mk/rte.obj.mk create mode 100644 mk/rte.sdkbuild.mk create mode 100644 mk/rte.sdkconfig.mk create mode 100644 mk/rte.sdkdepdirs.mk create mode 100644 mk/rte.sdkdoc.mk create mode 100644 mk/rte.sdkgcov.mk create mode 100644 mk/rte.sdkinstall.mk create mode 100644 mk/rte.sdkroot.mk create mode 100644 mk/rte.sdktest.mk create mode 100644 mk/rte.sdktestall.mk create mode 100644 mk/rte.subdir.mk create mode 100644 mk/rte.vars.mk create mode 100644 mk/target/generic/rte.app.mk create mode 100644 mk/target/generic/rte.vars.mk create mode 100644 mk/toolchain/gcc/rte.toolchain-compat.mk create mode 100644 mk/toolchain/gcc/rte.vars.mk create mode 100644 mk/toolchain/icc/rte.toolchain-compat.mk create mode 100644 mk/toolchain/icc/rte.vars.mk create mode 100644 scripts/Makefile create mode 100755 scripts/depdirs-rule.sh create mode 100755 scripts/gen-build-mk.sh create mode 100755 scripts/gen-config-h.sh create mode 100755 scripts/import_autotest.sh create mode 100755 scripts/relpath.sh create mode 100755 scripts/test-framework.sh create mode 100644 scripts/testhost/Makefile create mode 100644 scripts/testhost/testhost.c create mode 100755 tools/setup.sh diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..347f7e1cbf --- /dev/null +++ b/Makefile @@ -0,0 +1,47 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# Head Makefile for compiling rte SDK +# + +RTE_SDK := $(CURDIR) +export RTE_SDK + +# +# directory list +# + +ROOTDIRS-y := scripts lib app + +include $(RTE_SDK)/mk/rte.sdkroot.mk diff --git a/app/Makefile b/app/Makefile new file mode 100644 index 0000000000..7a206e3de0 --- /dev/null +++ b/app/Makefile @@ -0,0 +1,42 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-$(CONFIG_RTE_APP_TEST) += test +DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd +DIRS-$(CONFIG_RTE_APP_CHKINCS) += chkincs + +DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += dump_cfg + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/app/chkincs/Makefile b/app/chkincs/Makefile new file mode 100644 index 0000000000..1ec3337d5f --- /dev/null +++ b/app/chkincs/Makefile @@ -0,0 +1,96 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +APP = chkincs + +# +# all source are stored in SRCS-y +# + +SRCS-$(CONFIG_RTE_APP_CHKINCS) += test.c \ + test_alarm.c \ + test_atomic.c \ + test_branch_prediction.c \ + test_byteorder.c \ + test_common.c \ + test_cpuflags.c \ + test_cycles.c \ + test_debug.c \ + test_eal.c \ + test_errno.c \ + test_ethdev.c \ + test_ether.c \ + test_fbk_hash.c \ + test_hash_crc.c \ + test_hash.c \ + test_interrupts.c \ + test_ip.c \ + test_jhash.c \ + test_launch.c \ + test_lcore.c \ + test_log.c \ + test_lpm.c \ + test_malloc.c \ + test_mbuf.c \ + test_memcpy.c \ + test_memory.c \ + test_mempool.c \ + test_memzone.c \ + test_pci_dev_ids.c \ + test_pci.c \ + test_per_lcore.c \ + test_prefetch.c \ + test_random.c \ + test_ring.c \ + test_rwlock.c \ + test_sctp.c \ + test_spinlock.c \ + test_string_fns.c \ + test_tailq.c \ + test_tcp.c \ + test_timer.c \ + test_udp.c \ + test_version.c + +CFLAGS += -O0 -fno-inline +CFLAGS += $(WERROR_FLAGS) + +# this application needs libraries first +DEPDIRS-$(CONFIG_RTE_APP_CHKINCS) += lib + +include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/chkincs/test.c b/app/chkincs/test.c new file mode 100644 index 0000000000..cd44fc25d7 --- /dev/null +++ b/app/chkincs/test.c @@ -0,0 +1,50 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +main(__attribute__((unused)) int argc, __attribute__((unused)) char **argv) +{ + return 0; +} diff --git a/app/chkincs/test.h b/app/chkincs/test.h new file mode 100644 index 0000000000..4d6ec6e4c4 --- /dev/null +++ b/app/chkincs/test.h @@ -0,0 +1,90 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _TEST_H_ +#define _TEST_H_ + +/* icc on baremetal gives us troubles with function named 'main' */ +#ifdef RTE_EXEC_ENV_BAREMETAL +#define main _main +#endif + +int main(int argc, char **argv); + +int test_alarm(void); +int test_atomic(void); +int test_branch_prediction(void); +int test_byteorder(void); +int test_common(void); +int test_cpuflags(void); +int test_cycles(void); +int test_debug(void); +int test_eal(void); +int test_errno(void); +int test_ethdev(void); +int test_ether(void); +int test_fbk_hash(void); +int test_hash_crc(void); +int test_hash(void); +int test_interrupts(void); +int test_ip(void); +int test_jhash(void); +int test_launch(void); +int test_lcore(void); +int test_log(void); +int test_lpm(void); +int test_malloc(void); +int test_mbuf(void); +int test_memcpy(void); +int test_memory(void); +int test_mempool(void); +int test_memzone(void); +int test_pci_dev_ids(void); +int test_pci(void); +int test_per_lcore(void); +int test_prefetch(void); +int test_random(void); +int test_ring(void); +int test_rwlock(void); +int test_sctp(void); +int test_spinlock(void); +int test_string_fns(void); +int test_tailq(void); +int test_tcp(void); +int test_timer(void); +int test_udp(void); +int test_version(void); + +#endif diff --git a/app/chkincs/test_alarm.c b/app/chkincs/test_alarm.c new file mode 100644 index 0000000000..233d4f69dd --- /dev/null +++ b/app/chkincs/test_alarm.c @@ -0,0 +1,53 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_alarm(void) +{ + rte_eal_alarm_set(10, 0, 0); + return 1; +} diff --git a/app/chkincs/test_atomic.c b/app/chkincs/test_atomic.c new file mode 100644 index 0000000000..f4906394fd --- /dev/null +++ b/app/chkincs/test_atomic.c @@ -0,0 +1,93 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_atomic(void) +{ + rte_atomic16_t a16 = RTE_ATOMIC16_INIT(1); + rte_atomic32_t a32 = RTE_ATOMIC32_INIT(1); + rte_atomic64_t a64 = RTE_ATOMIC64_INIT(1); + int x; + + rte_mb(); + rte_wmb(); + rte_rmb(); + + rte_atomic16_init(&a16); + rte_atomic16_set(&a16, 1); + x = rte_atomic16_read(&a16); + rte_atomic16_inc(&a16); + rte_atomic16_dec(&a16); + rte_atomic16_add(&a16, 5); + rte_atomic16_sub(&a16, 5); + x = rte_atomic16_test_and_set(&a16); + x = rte_atomic16_add_return(&a16, 10); + + rte_atomic32_init(&a32); + rte_atomic32_set(&a32, 1); + x = rte_atomic32_read(&a32); + rte_atomic32_inc(&a32); + rte_atomic32_dec(&a32); + rte_atomic32_add(&a32, 5); + rte_atomic32_sub(&a32, 5); + x = rte_atomic32_test_and_set(&a32); + x = rte_atomic32_add_return(&a32, 10); + + rte_atomic64_init(&a64); + rte_atomic64_set(&a64, 1); + x = rte_atomic64_read(&a64); + rte_atomic64_inc(&a64); + rte_atomic64_dec(&a64); + rte_atomic64_add(&a64, 5); + rte_atomic64_sub(&a64, 5); + x = rte_atomic64_test_and_set(&a64); + x = rte_atomic64_add_return(&a64, 10); + (void)x; + + return 1; +} + diff --git a/app/chkincs/test_branch_prediction.c b/app/chkincs/test_branch_prediction.c new file mode 100644 index 0000000000..219ddf16c6 --- /dev/null +++ b/app/chkincs/test_branch_prediction.c @@ -0,0 +1,58 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int test_branch_prediction(void) +{ + int a = 1; + int b = 2; + + if (likely(a < b)) + return 0; + else if (unlikely(a < b)) + return 1; + else return 2; +} diff --git a/app/chkincs/test_byteorder.c b/app/chkincs/test_byteorder.c new file mode 100644 index 0000000000..91b0d6ebf6 --- /dev/null +++ b/app/chkincs/test_byteorder.c @@ -0,0 +1,84 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +static volatile uint16_t u16 = 0x1337; +static volatile uint32_t u32 = 0xdeadbeefUL; +static volatile uint64_t u64 = 0xdeadcafebabefaceULL; + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_byteorder(void) +{ + uint16_t res_u16; + uint32_t res_u32; + uint64_t res_u64; + + res_u16 = rte_bswap16(u16); + res_u32 = rte_bswap32(u32); + res_u64 = rte_bswap64(u64); + + res_u16 = rte_cpu_to_le_16(u16); + res_u32 = rte_cpu_to_le_32(u32); + res_u64 = rte_cpu_to_le_64(u64); + + res_u16 = rte_cpu_to_be_16(u16); + res_u32 = rte_cpu_to_be_32(u32); + res_u64 = rte_cpu_to_be_64(u64); + + res_u16 = rte_le_to_cpu_16(u16); + res_u32 = rte_le_to_cpu_32(u32); + res_u64 = rte_le_to_cpu_64(u64); + + res_u16 = rte_be_to_cpu_16(u16); + res_u32 = rte_be_to_cpu_32(u32); + res_u64 = rte_be_to_cpu_64(u64); + + (void)res_u16; + (void)res_u32; + (void)res_u64; + + return 1; +} diff --git a/app/chkincs/test_common.c b/app/chkincs/test_common.c new file mode 100644 index 0000000000..3f86d5c516 --- /dev/null +++ b/app/chkincs/test_common.c @@ -0,0 +1,76 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +static int +test_func(__rte_unused int var1, int var2) +{ + RTE_SET_USED(var2); + return 1; +} + +static int static_var1 = 3; +static int static_var2 = 6; + +int +test_common(void) +{ + int *ptr1 = &static_var1, *ptr2 = &static_var2; + int var; + + ptr2 = RTE_PTR_ADD(ptr1, 10); + ptr2 = RTE_PTR_SUB(ptr1, 5); + var = RTE_PTR_DIFF(ptr1, ptr2); + + var = RTE_ALIGN(var, 16); + + RTE_BUILD_BUG_ON(0); + + var = RTE_MIN(10, 5); + var = RTE_MAX(10, 5); + + return test_func(10, 5); +} diff --git a/app/chkincs/test_cpuflags.c b/app/chkincs/test_cpuflags.c new file mode 100644 index 0000000000..017bb6629b --- /dev/null +++ b/app/chkincs/test_cpuflags.c @@ -0,0 +1,53 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_cpuflags(void) +{ + rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3); + return 1; +} diff --git a/app/chkincs/test_cycles.c b/app/chkincs/test_cycles.c new file mode 100644 index 0000000000..c85a35ae31 --- /dev/null +++ b/app/chkincs/test_cycles.c @@ -0,0 +1,63 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_cycles(void) +{ + uint64_t hz, c; + + hz = rte_get_hpet_hz(); + c = rte_get_hpet_cycles(); + rte_delay_us(10); + rte_delay_ms(10); + c = rte_rdtsc(); + + (void)hz; + (void)c; + + return 1; +} diff --git a/app/chkincs/test_debug.c b/app/chkincs/test_debug.c new file mode 100644 index 0000000000..58ecdad877 --- /dev/null +++ b/app/chkincs/test_debug.c @@ -0,0 +1,55 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_debug(void) +{ + rte_dump_stack(); + rte_dump_registers(); + rte_panic("oops %d", 10); + return 1; +} diff --git a/app/chkincs/test_eal.c b/app/chkincs/test_eal.c new file mode 100644 index 0000000000..2b77e62e37 --- /dev/null +++ b/app/chkincs/test_eal.c @@ -0,0 +1,52 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_eal(void) +{ + return 1; +} diff --git a/app/chkincs/test_errno.c b/app/chkincs/test_errno.c new file mode 100644 index 0000000000..d02ec94a76 --- /dev/null +++ b/app/chkincs/test_errno.c @@ -0,0 +1,54 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_errno(void) +{ + if (rte_errno != 0) + return -1; + return 1; +} diff --git a/app/chkincs/test_ethdev.c b/app/chkincs/test_ethdev.c new file mode 100644 index 0000000000..180a796b51 --- /dev/null +++ b/app/chkincs/test_ethdev.c @@ -0,0 +1,72 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +static struct rte_eth_conf port_conf; +static struct rte_eth_rxconf rx_conf; +static struct rte_eth_txconf tx_conf; +static struct rte_mempool *mp; + +int +test_ethdev(void) +{ + struct rte_eth_link link; + int x; + struct ether_addr ea; + + x = rte_eth_dev_count(); + x = rte_eth_dev_configure(0, 1, 1, &port_conf); + rte_eth_macaddr_get(0, &ea); + x = rte_eth_rx_queue_setup(0, 0, 128, 0, &rx_conf, mp); + x = rte_eth_tx_queue_setup(0, 0, 128, 0, &tx_conf); + rte_eth_link_get(0, &link); + x = rte_eth_dev_start(0); + + (void)x; + + return 1; +} diff --git a/app/chkincs/test_ether.c b/app/chkincs/test_ether.c new file mode 100644 index 0000000000..b089aaf8b6 --- /dev/null +++ b/app/chkincs/test_ether.c @@ -0,0 +1,52 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_ether(void) +{ + return 1; +} diff --git a/app/chkincs/test_fbk_hash.c b/app/chkincs/test_fbk_hash.c new file mode 100644 index 0000000000..e1e62a010f --- /dev/null +++ b/app/chkincs/test_fbk_hash.c @@ -0,0 +1,53 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_fbk_hash(void) +{ + void * ptr = (void *)RTE_FBK_HASH_FUNC_DEFAULT; + return ptr == ptr; +} diff --git a/app/chkincs/test_hash.c b/app/chkincs/test_hash.c new file mode 100644 index 0000000000..c989070d60 --- /dev/null +++ b/app/chkincs/test_hash.c @@ -0,0 +1,85 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +/* Parameters used for hash table in unit test functions. */ +static struct rte_hash_parameters ut_params = { + "name", /* name */ + 64, /* entries */ + 4, /* bucket_entries */ + 8, /* key_len */ + 0, /* hash_func */ + 0, /* hash_func_init_val */ + 0, /* socket_id */ +}; + +struct key { + char key[8]; +}; + +/* Keys used by unit test functions */ +static struct key keys[1] = { + { + { 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, }, + } +}; + +int test_hash(void) +{ + struct rte_hash *handle; + int32_t pos0; + + handle = rte_hash_create(&ut_params); + if (handle == 0) { + return -1; + } + pos0 = rte_hash_add_key(handle, &keys[0]); + pos0 = rte_hash_lookup(handle, &keys[0]); + pos0 = rte_hash_del_key(handle, &keys[0]); + rte_hash_free(handle); + (void)pos0; /* remove compiler warning */ + return 0; +} diff --git a/app/chkincs/test_hash_crc.c b/app/chkincs/test_hash_crc.c new file mode 100644 index 0000000000..f996a093d7 --- /dev/null +++ b/app/chkincs/test_hash_crc.c @@ -0,0 +1,52 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_hash_crc(void) +{ + return 1; +} diff --git a/app/chkincs/test_interrupts.c b/app/chkincs/test_interrupts.c new file mode 100644 index 0000000000..9d5516045d --- /dev/null +++ b/app/chkincs/test_interrupts.c @@ -0,0 +1,53 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_interrupts(void) +{ + rte_intr_callback_register(0, 0, 0); + return 1; +} diff --git a/app/chkincs/test_ip.c b/app/chkincs/test_ip.c new file mode 100644 index 0000000000..4da405c565 --- /dev/null +++ b/app/chkincs/test_ip.c @@ -0,0 +1,53 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_ip(void) +{ + uint64_t var = IPv4(1,1,1,1); + return IS_IPV4_MCAST(var); +} diff --git a/app/chkincs/test_jhash.c b/app/chkincs/test_jhash.c new file mode 100644 index 0000000000..f63a68de7a --- /dev/null +++ b/app/chkincs/test_jhash.c @@ -0,0 +1,54 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_jhash(void) +{ + uint32_t a = 1, b = 2, c = 3; + __rte_jhash_mix(a,b,c); + return 1; +} diff --git a/app/chkincs/test_launch.c b/app/chkincs/test_launch.c new file mode 100644 index 0000000000..6395147c62 --- /dev/null +++ b/app/chkincs/test_launch.c @@ -0,0 +1,68 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +static int +test_launch_per_core(__attribute__((unused)) void *arg) +{ + return 0; +} + +int +test_launch(void) +{ + enum rte_lcore_state_t s; + + rte_eal_remote_launch(test_launch_per_core, (void *)0, 0); + rte_eal_wait_lcore(0); + rte_eal_mp_remote_launch(test_launch_per_core, (void *)0, CALL_MASTER); + rte_eal_mp_wait_lcore(); + s = rte_eal_get_lcore_state(0); + + (void)s; + + return 0; +} diff --git a/app/chkincs/test_lcore.c b/app/chkincs/test_lcore.c new file mode 100644 index 0000000000..221b122ae3 --- /dev/null +++ b/app/chkincs/test_lcore.c @@ -0,0 +1,66 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_lcore(void) +{ + unsigned x; + + x = rte_socket_id(); + x = rte_lcore_id(); + x = rte_lcore_to_socket_id(x); + x = rte_lcore_count(); + x = rte_lcore_is_enabled(x); + + RTE_LCORE_FOREACH(x) + (void)x; + + RTE_LCORE_FOREACH_SLAVE(x) + (void)x; + + return 0; +} diff --git a/app/chkincs/test_log.c b/app/chkincs/test_log.c new file mode 100644 index 0000000000..c640966b75 --- /dev/null +++ b/app/chkincs/test_log.c @@ -0,0 +1,58 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +#define RTE_LOGTYPE_TESTAPP1 RTE_LOGTYPE_USER1 + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_log(void) +{ + rte_set_log_type(RTE_LOGTYPE_TESTAPP1, 1); + rte_set_log_level(RTE_LOG_DEBUG); + RTE_LOG(DEBUG, TESTAPP1, "this is a debug level message %d\n", 1); + rte_log_dump_history(); + return 0; +} diff --git a/app/chkincs/test_lpm.c b/app/chkincs/test_lpm.c new file mode 100644 index 0000000000..989676ecb8 --- /dev/null +++ b/app/chkincs/test_lpm.c @@ -0,0 +1,64 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_lpm(void) +{ + struct rte_lpm *lpm = 0; + uint32_t ip = 0; + uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0; + + lpm = rte_lpm_create(__func__, -1, 256, RTE_LPM_HEAP); + if (lpm == 0) + return -1; + rte_lpm_add(lpm, ip, depth, next_hop_add); + rte_lpm_lookup(lpm, ip, &next_hop_return); + rte_lpm_delete(lpm, ip, depth); + rte_lpm_lookup(lpm, ip, &next_hop_return); + rte_lpm_free(lpm); + return 0; +} diff --git a/app/chkincs/test_malloc.c b/app/chkincs/test_malloc.c new file mode 100644 index 0000000000..885b356664 --- /dev/null +++ b/app/chkincs/test_malloc.c @@ -0,0 +1,57 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_malloc(void) +{ + void *p1; + + p1 = rte_malloc("dummy", 1000, 8); + rte_free(p1); + + return 0; +} diff --git a/app/chkincs/test_mbuf.c b/app/chkincs/test_mbuf.c new file mode 100644 index 0000000000..1d3ff9cc3c --- /dev/null +++ b/app/chkincs/test_mbuf.c @@ -0,0 +1,110 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + + +#include + +#include "test.h" + +#define MBUF_SIZE 2048 +#define NB_MBUF 128 + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_mbuf(void) +{ + struct rte_mempool *mbuf_pool; + struct rte_mbuf *m, *m1; + char *hdr; + int x; + int* ptr; + + mbuf_pool = rte_mempool_create("test_mbuf_pool", NB_MBUF, + MBUF_SIZE, 32, 0, + (void (*)(struct rte_mempool*, void*)) 0, (void *)0, + rte_pktmbuf_init, (void *)0, + SOCKET_ID_ANY, 0); + if (mbuf_pool == NULL) { + return -1; + } + + m = rte_pktmbuf_alloc(mbuf_pool); + if(m == NULL) { + return -1; + } + + m1 = RTE_MBUF_FROM_BADDR(RTE_MBUF_TO_BADDR(m)); + (void)m1; + + x = rte_pktmbuf_pkt_len(m); + x = rte_pktmbuf_data_len(m); + x = rte_pktmbuf_headroom(m); + x = rte_pktmbuf_tailroom(m); + x = rte_pktmbuf_is_contiguous(m); + + m = rte_pktmbuf_lastseg(m); + + hdr = rte_pktmbuf_mtod(m, char *); + rte_pktmbuf_dump(m, 0); + + hdr = rte_pktmbuf_append(m, 10); + x = rte_pktmbuf_trim(m, 10); + hdr = rte_pktmbuf_prepend(m, 10); + hdr = rte_pktmbuf_adj(m, 10); + + ptr = (int*) rte_ctrlmbuf_data(m); + *ptr = rte_ctrlmbuf_len(m); + *ptr = rte_pktmbuf_pkt_len(m); + *ptr = rte_pktmbuf_data_len(m); + + rte_pktmbuf_free_seg(m); + rte_pktmbuf_free(m); + + RTE_MBUF_PREFETCH_TO_FREE(m); + + rte_mbuf_sanity_check(m, RTE_MBUF_CTRL, 1); + + (void)x; + (void)hdr; + + return 0; +} diff --git a/app/chkincs/test_memcpy.c b/app/chkincs/test_memcpy.c new file mode 100644 index 0000000000..19db8d2bdf --- /dev/null +++ b/app/chkincs/test_memcpy.c @@ -0,0 +1,58 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_memcpy(void) +{ + char buf[16]; + const char s[] = "hello\n"; + volatile int a = 10; + + rte_memcpy(buf, s, sizeof(s)); + rte_memcpy(buf, s, a); + return 0; +} diff --git a/app/chkincs/test_memory.c b/app/chkincs/test_memory.c new file mode 100644 index 0000000000..c17db89281 --- /dev/null +++ b/app/chkincs/test_memory.c @@ -0,0 +1,65 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +static int a __rte_cache_aligned; + +int +test_memory(void) +{ + const struct rte_memseg *mem; + int s = CACHE_LINE_ROUNDUP(10); + + rte_dump_physmem_layout(); + s = rte_eal_get_physmem_size(); + mem = rte_eal_get_physmem_layout(); + + (void)a; + (void)s; + (void)mem; + + return 0; +} diff --git a/app/chkincs/test_mempool.c b/app/chkincs/test_mempool.c new file mode 100644 index 0000000000..9c669d6d57 --- /dev/null +++ b/app/chkincs/test_mempool.c @@ -0,0 +1,111 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +#define MAX_BULK 16 +#define MEMPOOL_ELT_SIZE 2048 +#define MEMPOOL_SIZE 2047 + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_mempool(void) +{ + struct rte_mempool *mp; + void *ptrs[MAX_BULK]; + int x; + phys_addr_t addr; + + mp = rte_mempool_create("test_nocache", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, 0, 0, + (void (*)(struct rte_mempool*, void*)) 0, + (void *)0, + (void (*)(struct rte_mempool*, void*, void*, unsigned int)) 0, + (void *)0, + SOCKET_ID_ANY, 0); + + if (mp == NULL) { + return -1; + } + + rte_mempool_set_bulk_count(mp, MAX_BULK); + rte_mempool_dump(mp); + + rte_mempool_mc_get_bulk(mp, ptrs, 1); + rte_mempool_mc_get_bulk(mp, ptrs, MAX_BULK); + rte_mempool_sc_get_bulk(mp, ptrs, 1); + rte_mempool_sc_get_bulk(mp, ptrs, MAX_BULK); + rte_mempool_get_bulk(mp, ptrs, 1); + rte_mempool_get_bulk(mp, ptrs, MAX_BULK); + rte_mempool_mc_get(mp, ptrs); + rte_mempool_sc_get(mp, ptrs); + rte_mempool_get(mp, ptrs); + + rte_mempool_mp_put_bulk(mp, ptrs, 1); + rte_mempool_mp_put_bulk(mp, ptrs, MAX_BULK); + rte_mempool_sp_put_bulk(mp, ptrs, 1); + rte_mempool_sp_put_bulk(mp, ptrs, MAX_BULK); + rte_mempool_put_bulk(mp, ptrs, 1); + rte_mempool_put_bulk(mp, ptrs, MAX_BULK); + rte_mempool_mp_put(mp, ptrs); + rte_mempool_sp_put(mp, ptrs); + rte_mempool_put(mp, ptrs); + + __MEMPOOL_STAT_ADD(mp, put, 1); + __mempool_check_cookies(mp, 0, 0, 0); + + x = rte_mempool_count(mp); + x = rte_mempool_free_count(mp); + x = rte_mempool_full(mp); + x = rte_mempool_empty(mp); + + addr = rte_mempool_virt2phy(mp, ptrs[0]); + rte_mempool_audit(mp); + ptrs[0] = rte_mempool_get_priv(mp); + + (void)x; + (void)addr; + + return 0; +} diff --git a/app/chkincs/test_memzone.c b/app/chkincs/test_memzone.c new file mode 100644 index 0000000000..31c9af695c --- /dev/null +++ b/app/chkincs/test_memzone.c @@ -0,0 +1,61 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_memzone(void) +{ + const struct rte_memzone *memzone1; + + memzone1 = rte_memzone_lookup("testzone1"); + memzone1 = rte_memzone_reserve("testzone1", 100, + 0, 0); + rte_memzone_dump(); + + (void)memzone1; + + return 0; +} diff --git a/app/chkincs/test_pci.c b/app/chkincs/test_pci.c new file mode 100644 index 0000000000..7af0894bf7 --- /dev/null +++ b/app/chkincs/test_pci.c @@ -0,0 +1,86 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +static int my_driver_init(struct rte_pci_driver *dr, + struct rte_pci_device *dev); + +struct rte_pci_id my_driver_id[] = { + { + 0x8086, + 0x10E8, + PCI_ANY_ID, + PCI_ANY_ID, + }, + { + 0, 0, 0, 0 /* sentinel */ + }, +}; +struct rte_pci_driver my_driver = { + {0, 0}, + "test_driver", + my_driver_init, + my_driver_id, + RTE_PCI_DRV_NEED_IGB_UIO, +}; + +static int +my_driver_init(__attribute__((unused)) struct rte_pci_driver *dr, + __attribute__((unused)) struct rte_pci_device *dev) +{ + return 0; +} + +int +test_pci(void) +{ + struct rte_pci_id id = {RTE_PCI_DEVICE(0, 0)}; + rte_eal_pci_dump(); + rte_eal_pci_register(&my_driver); + rte_eal_pci_probe(); + (void)id; + return 0; +} diff --git a/app/chkincs/test_pci_dev_ids.c b/app/chkincs/test_pci_dev_ids.c new file mode 100644 index 0000000000..290105ce61 --- /dev/null +++ b/app/chkincs/test_pci_dev_ids.c @@ -0,0 +1,60 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include "test.h" + +struct A { + int x; + int y; +}; + +static struct A a[] = { +#define RTE_PCI_DEV_ID_DECL(vend, dev) {vend, dev}, +#include +}; + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_pci_dev_ids(void) +{ + return a[0].x; +} diff --git a/app/chkincs/test_per_lcore.c b/app/chkincs/test_per_lcore.c new file mode 100644 index 0000000000..d2fc666415 --- /dev/null +++ b/app/chkincs/test_per_lcore.c @@ -0,0 +1,57 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +static RTE_DEFINE_PER_LCORE(unsigned, test) = 0x12345678; + +int +test_per_lcore(void) +{ + if (RTE_PER_LCORE(test) != 0x12345678) + return -1; + + return 0; +} diff --git a/app/chkincs/test_prefetch.c b/app/chkincs/test_prefetch.c new file mode 100644 index 0000000000..df81f0e1d8 --- /dev/null +++ b/app/chkincs/test_prefetch.c @@ -0,0 +1,58 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_prefetch(void) +{ + int a; + + rte_prefetch0(&a); + rte_prefetch1(&a); + rte_prefetch2(&a); + + return 0; +} diff --git a/app/chkincs/test_random.c b/app/chkincs/test_random.c new file mode 100644 index 0000000000..9e10176774 --- /dev/null +++ b/app/chkincs/test_random.c @@ -0,0 +1,54 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_random(void) +{ + rte_srand(1); + rte_rand(); + return 0; +} diff --git a/app/chkincs/test_ring.c b/app/chkincs/test_ring.c new file mode 100644 index 0000000000..5e37a6a32b --- /dev/null +++ b/app/chkincs/test_ring.c @@ -0,0 +1,97 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +#define MAX_BULK 16 +#define RING_SIZE 4096 + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_ring(void) +{ + struct rte_ring *r; + void *ptrs[MAX_BULK]; + int x; + + r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0); + if (r == 0) { + return -1; + } + rte_ring_dump(r); + + rte_ring_set_bulk_count(r, MAX_BULK); + rte_ring_set_water_mark(r, 50); + + rte_ring_sp_enqueue_bulk(r, &ptrs[0], 1); + rte_ring_mp_enqueue_bulk(r, &ptrs[0], 1); + rte_ring_sp_enqueue_bulk(r, &ptrs[0], MAX_BULK); + rte_ring_mp_enqueue_bulk(r, &ptrs[0], MAX_BULK); + rte_ring_enqueue_bulk(r, &ptrs[0], MAX_BULK); + rte_ring_enqueue_bulk(r, &ptrs[0], MAX_BULK); + rte_ring_sp_enqueue(r, &ptrs[0]); + rte_ring_mp_enqueue(r, &ptrs[0]); + rte_ring_enqueue(r, &ptrs[0]); + + rte_ring_sc_dequeue_bulk(r, &ptrs[0], 1); + rte_ring_sc_dequeue_bulk(r, &ptrs[0], MAX_BULK); + rte_ring_mc_dequeue_bulk(r, &ptrs[0], 1); + rte_ring_mc_dequeue_bulk(r, &ptrs[0], MAX_BULK); + rte_ring_dequeue_bulk(r, &ptrs[0], 1); + rte_ring_dequeue_bulk(r, &ptrs[0], MAX_BULK); + rte_ring_sc_dequeue(r, &ptrs[0]); + rte_ring_mc_dequeue(r, &ptrs[0]); + rte_ring_dequeue(r, &ptrs[0]); + + __RING_STAT_ADD(r, enq_fail, 10); + + x = rte_ring_full(r); + x = rte_ring_empty(r); + x = rte_ring_count(r); + x = rte_ring_free_count(r); + + (void)x; + + return 0; +} diff --git a/app/chkincs/test_rwlock.c b/app/chkincs/test_rwlock.c new file mode 100644 index 0000000000..20ab519f03 --- /dev/null +++ b/app/chkincs/test_rwlock.c @@ -0,0 +1,60 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_rwlock(void) +{ + rte_rwlock_t rwl = RTE_RWLOCK_INITIALIZER; + + rte_rwlock_init(&rwl); + rte_rwlock_write_lock(&rwl); + rte_rwlock_write_unlock(&rwl); + rte_rwlock_read_lock(&rwl); + rte_rwlock_read_unlock(&rwl); + + return 0; +} diff --git a/app/chkincs/test_sctp.c b/app/chkincs/test_sctp.c new file mode 100644 index 0000000000..11b6b78002 --- /dev/null +++ b/app/chkincs/test_sctp.c @@ -0,0 +1,52 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_sctp(void) +{ + return 0; +} diff --git a/app/chkincs/test_spinlock.c b/app/chkincs/test_spinlock.c new file mode 100644 index 0000000000..eb538df81b --- /dev/null +++ b/app/chkincs/test_spinlock.c @@ -0,0 +1,59 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +static rte_spinlock_t sl = RTE_SPINLOCK_INITIALIZER; +static rte_spinlock_recursive_t slr = RTE_SPINLOCK_RECURSIVE_INITIALIZER; + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_spinlock(void) +{ + rte_spinlock_init(&sl); + rte_spinlock_lock(&sl); + rte_spinlock_unlock(&sl); + rte_spinlock_recursive_lock(&slr); + return 0; +} diff --git a/app/chkincs/test_string_fns.c b/app/chkincs/test_string_fns.c new file mode 100644 index 0000000000..09a24de77a --- /dev/null +++ b/app/chkincs/test_string_fns.c @@ -0,0 +1,52 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_string_fns(void) +{ + return 0; +} diff --git a/app/chkincs/test_tailq.c b/app/chkincs/test_tailq.c new file mode 100644 index 0000000000..4730e1c292 --- /dev/null +++ b/app/chkincs/test_tailq.c @@ -0,0 +1,55 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_tailq(void) +{ + struct rte_dummy *t1, *t2; + t1 = RTE_TAILQ_RESERVE("dummy", rte_dummy); + t2 = RTE_TAILQ_LOOKUP("dummy", rte_dummy); + return (t1 == t2) ? 0 : -1; +} diff --git a/app/chkincs/test_tcp.c b/app/chkincs/test_tcp.c new file mode 100644 index 0000000000..96e54a60d7 --- /dev/null +++ b/app/chkincs/test_tcp.c @@ -0,0 +1,52 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_tcp(void) +{ + return 0; +} diff --git a/app/chkincs/test_timer.c b/app/chkincs/test_timer.c new file mode 100644 index 0000000000..ea63b42e52 --- /dev/null +++ b/app/chkincs/test_timer.c @@ -0,0 +1,74 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +/* timer callback for basic tests */ +static void +timer_cb(__attribute__((unused)) struct rte_timer *tim, + __attribute__((unused)) void *arg) +{ + return; +} + +int +test_timer(void) +{ + int x; + struct rte_timer tim = RTE_TIMER_INITIALIZER; + + rte_timer_subsystem_init(); + rte_timer_init(&tim); + rte_timer_reset(&tim, 1234, SINGLE, 0, timer_cb, &x); + rte_timer_stop(&tim); + rte_timer_reset_sync(&tim, 1234, SINGLE, 0, timer_cb, &x); + rte_timer_stop_sync(&tim); + x = rte_timer_pending(&tim); + rte_timer_manage(); + rte_timer_dump_stats(); + + return 0; +} diff --git a/app/chkincs/test_udp.c b/app/chkincs/test_udp.c new file mode 100644 index 0000000000..9ccb5bab47 --- /dev/null +++ b/app/chkincs/test_udp.c @@ -0,0 +1,52 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include "test.h" + +#include + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_udp(void) +{ + return 0; +} diff --git a/app/chkincs/test_version.c b/app/chkincs/test_version.c new file mode 100644 index 0000000000..e518a67526 --- /dev/null +++ b/app/chkincs/test_version.c @@ -0,0 +1,52 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include "test.h" + +/* + * ^ + * / \ + * / | \ WARNING: this test program does *not* show how to use the + * / . \ API. Its only goal is to check dependencies of include files. + * /_______\ + */ + +int +test_version(void) +{ + return 1; +} diff --git a/app/dump_cfg/Makefile b/app/dump_cfg/Makefile new file mode 100644 index 0000000000..916166abfc --- /dev/null +++ b/app/dump_cfg/Makefile @@ -0,0 +1,49 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +# + +include $(RTE_SDK)/mk/rte.vars.mk + +APP = dump_cfg + +CFLAGS += $(WERROR_FLAGS) + +# +# all source are stored in SRCS-y +# +SRCS-y := dump_cfg_main.c + +# this application needs libraries first +DEPDIRS-y += lib + +include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/dump_cfg/dump_cfg_main.c b/app/dump_cfg/dump_cfg_main.c new file mode 100644 index 0000000000..9227f35ada --- /dev/null +++ b/app/dump_cfg/dump_cfg_main.c @@ -0,0 +1,229 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + + +/* some functions used for printing out the memory segments and memory zones information */ + +#define PRINT_STR_FIELD(structname, field) do{ \ + count+= rte_snprintf(buf + count, len-count, " %s='%s',", \ + #field, (const char *)structname->field);\ +} while(0) + +#define PRINT_PTR_FIELD(structname, field) do{ \ + count+= rte_snprintf(buf + count, len-count, " %s=%p,", \ + #field, (void *)structname->field);\ +} while(0) + +#define PRINT_UINT_FIELD(structname, field) do{ \ + count+= rte_snprintf(buf + count, len-count, " %s=%llu,", \ + #field, (unsigned long long)structname->field);\ +} while(0) + +#define PRINT_INT_FIELD(structname, field) do{ \ + count+= rte_snprintf(buf + count, len-count, " %s=%lld,", \ + #field, (long long)structname->field);\ +} while(0) + +#define PRINT_CUSTOM_FIELD(structname, field, print_fn) do{ \ + char buf2[1024]; \ + count+= rte_snprintf(buf + count, len-count, " %s=%s,", \ + #field, print_fn(structname->field, buf2, sizeof(buf2)));\ +} while(0) + +static inline const char * +memseg_to_str(const struct rte_memseg *seg, char *buf, size_t len) +{ + int count = 0; + count += rte_snprintf(buf + count, len - count, "{"); + PRINT_UINT_FIELD(seg, phys_addr); + PRINT_PTR_FIELD(seg, addr); + PRINT_UINT_FIELD(seg, len); + PRINT_INT_FIELD(seg, socket_id); + PRINT_UINT_FIELD(seg, hugepage_sz); + PRINT_UINT_FIELD(seg, nchannel); + PRINT_UINT_FIELD(seg, nrank); + rte_snprintf(buf + count - 1, len - count + 1, " }"); + return buf; +} + +static inline const char * +memzone_to_str(const struct rte_memzone *zone, char *buf, size_t len) +{ + int count = 0; + count += rte_snprintf(buf + count, len - count, "{"); + PRINT_STR_FIELD(zone, name); + PRINT_UINT_FIELD(zone, phys_addr); + PRINT_PTR_FIELD(zone, addr); + PRINT_UINT_FIELD(zone, len); + PRINT_INT_FIELD(zone, socket_id); + PRINT_UINT_FIELD(zone, flags); + rte_snprintf(buf + count - 1, len - count + 1, " }"); + return buf; +} + +static inline const char * +tailq_to_str(const struct rte_tailq_head *tailq, char *buf, size_t len) +{ + int count = 0; + count += rte_snprintf(buf + count, len - count, "{"); + PRINT_STR_FIELD(tailq, qname); + const struct rte_dummy_head *head = &tailq->tailq_head; + PRINT_PTR_FIELD(head, tqh_first); + PRINT_PTR_FIELD(head, tqh_last); + rte_snprintf(buf + count - 1, len - count + 1, " }"); + return buf; +} + +#define PREFIX "prefix" +static const char *directory = "/var/run"; +static const char *pre = "rte"; + +static void +usage(const char *prgname) +{ + printf("%s --prefix \n\n" + "dump_config option list:\n" + "\t--"PREFIX": filename prefix\n", + prgname); +} + +static int +dmp_cfg_parse_args(int argc, char **argv) +{ + const char *prgname = argv[0]; + const char *home_dir = getenv("HOME"); + int opt; + int option_index; + static struct option lgopts[] = { + {PREFIX, 1, 0, 0}, + {0, 0, 0, 0} + }; + + if (getuid() != 0 && home_dir != NULL) + directory = home_dir; + + while ((opt = getopt_long(argc, argv, "", + lgopts, &option_index)) != EOF) { + switch (opt) { + case 0: + if (!strcmp(lgopts[option_index].name, PREFIX)) + pre = optarg; + else{ + usage(prgname); + return -1; + } + break; + + default: + usage(prgname); + return -1; + } + } + return 0; +} + +int +main(int argc, char **argv) +{ + char buffer[1024]; + char path[PATH_MAX]; + int i; + int fd = 0; + + dmp_cfg_parse_args(argc, argv); + rte_snprintf(path, sizeof(path), "%s/.%s_config", directory, pre); + printf("Path to mem_config: %s\n\n", path); + + fd = open(path, O_RDWR); + if (fd < 0){ + printf("Error with config open\n"); + return 1; + } + struct rte_mem_config *cfg = mmap(NULL, sizeof(*cfg), PROT_READ, \ + MAP_SHARED, fd, 0); + if (cfg == NULL){ + printf("Error with config mmap\n"); + close(fd); + return 1; + } + close(fd); + + printf("----------- MEMORY_SEGMENTS -------------\n"); + for (i = 0; i < RTE_MAX_MEMSEG; i++){ + if (cfg->memseg[i].addr == NULL) break; + printf("Segment %d: ", i); + printf("%s\n", memseg_to_str(&cfg->memseg[i], buffer, sizeof(buffer))); + } + printf("--------- END_MEMORY_SEGMENTS -----------\n"); + + printf("------------ MEMORY_ZONES ---------------\n"); + for (i = 0; i < RTE_MAX_MEMZONE; i++){ + if (cfg->memzone[i].addr == NULL) break; + printf("Zone %d: ", i); + printf("%s\n", memzone_to_str(&cfg->memzone[i], buffer, sizeof(buffer))); + + } + printf("---------- END_MEMORY_ZONES -------------\n"); + + printf("------------- TAIL_QUEUES ---------------\n"); + for (i = 0; i < RTE_MAX_TAILQ; i++){ + if (cfg->tailq_head[i].qname[0] == '\0') break; + printf("Tailq %d: ", i); + printf("%s\n", tailq_to_str(&cfg->tailq_head[i], buffer, sizeof(buffer))); + + } + printf("----------- END_TAIL_QUEUES -------------\n"); + + return 0; +} + diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile new file mode 100644 index 0000000000..bad337ae35 --- /dev/null +++ b/app/test-pmd/Makefile @@ -0,0 +1,63 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +APP = testpmd + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_TEST_PMD) := testpmd.c +SRCS-$(CONFIG_RTE_TEST_PMD) += parameters.c +SRCS-$(CONFIG_RTE_TEST_PMD) += cmdline.c +SRCS-$(CONFIG_RTE_TEST_PMD) += config.c +SRCS-$(CONFIG_RTE_TEST_PMD) += iofwd.c +SRCS-$(CONFIG_RTE_TEST_PMD) += macfwd.c +SRCS-$(CONFIG_RTE_TEST_PMD) += rxonly.c +SRCS-$(CONFIG_RTE_TEST_PMD) += txonly.c +SRCS-$(CONFIG_RTE_TEST_PMD) += csumonly.c +ifeq ($(CONFIG_RTE_LIBRTE_IEEE1588),y) +SRCS-$(CONFIG_RTE_TEST_PMD) += ieee1588fwd.c +endif + +# this application needs libraries first +DEPDIRS-$(CONFIG_RTE_TEST_PMD) += lib + +include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c new file mode 100644 index 0000000000..28233a6195 --- /dev/null +++ b/app/test-pmd/cmdline.c @@ -0,0 +1,2180 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef __linux__ +#include +#endif +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +/* *** HELP *** */ +struct cmd_help_result { + cmdline_fixed_string_t help; +}; + +static void cmd_help_parsed(__attribute__((unused)) void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + cmdline_printf(cl, + "\n" + "TEST PMD\n" + "--------\n" + "\n" + "This commandline can be used to configure forwarding\n" + "\n"); + cmdline_printf(cl, + "Display informations:\n" + "---------------------\n" + "- show port info|stats|fdir X|all\n" + " Diplays information or stats on port X, or all\n" + "- clear port stats X|all\n" + " Clear stats for port X, or all\n" + "- show config rxtx|cores|fwd\n" + " Displays the given configuration\n" + "- read reg port_id reg_off\n" + " Displays value of a port register\n" + "- read regfield port_id reg_off bit_x bit_y\n" + " Displays value of a port register bit field\n" + "- read regbit port_id reg_off bit_x\n" + " Displays value of a port register bit\n" + "- read rxd port_id queue_id rxd_id\n" + " Displays a RX descriptor of a port RX queue\n" + "- read txd port_id queue_id txd_id\n" + " Displays a TX descriptor of a port TX queue\n" + "\n"); + cmdline_printf(cl, + "Configure:\n" + "----------\n" + "Modifications are taken into account once " + "forwarding is restarted.\n" + "- set default\n" + " Set forwarding to default configuration\n" + "- set nbport|nbcore|burst|verbose X\n" + " Set number of ports, number of cores, number " + "of packets per burst,\n or verbose level to X\n" + "- set txpkts x[,y]*\n" + " Set the length of each segment of TXONLY packets\n" + "- set coremask|portmask X\n" + " Set the hexadecimal mask of forwarding cores / " + "forwarding ports\n" + "- set corelist|portlist x[,y]*\n" + " Set the list of forwarding cores / forwarding " + "ports\n" + "- rx_vlan add/rm vlan_id|all port_id\n" + " Add/remove vlan_id, or all identifiers, to/from " + "the set of VLAN Identifiers\n filtered by port_id\n" + "- tx_vlan set vlan_id port_id\n" + " Enable hardware insertion of a VLAN header with " + "the Tag Identifier vlan_id\n in packets sent on" + "port_id\n" + "- tx_vlan reset port_id\n" + " Disable hardware insertion of a VLAN header in " + "packets sent on port_id\n" + "- tx_checksum set mask port_id\n" + " Enable hardware insertion of checksum offload with " + "the 4-bit mask (0~0xf)\n in packets sent on port_id\n" + " Please check the NIC datasheet for HW limits\n" + " bit 0 - insert ip checksum offload if set \n" + " bit 1 - insert udp checksum offload if set \n" + " bit 2 - insert tcp checksum offload if set\n" + " bit 3 - insert sctp checksum offload if set\n" +#ifdef RTE_LIBRTE_IEEE1588 + "- set fwd io|mac|rxonly|txonly|csum|ieee1588\n" + " Set IO, MAC, RXONLY, TXONLY, CSUM or IEEE1588 " + "packet forwarding mode\n" +#else + "- set fwd io|mac|rxonly|txonly|csum\n" + " Set IO, MAC, RXONLY, CSUM or TXONLY packet " + "forwarding mode\n" +#endif + "- mac_addr add|remove X \n" + " Add/Remove the MAC address on port X\n" + "- set promisc|allmulti [all|X] on|off\n" + " Set/unset promisc|allmulti mode on port X, or all\n" + "- set flow_ctrl rx on|off tx on|off high_water low_water " + "pause_time send_xon port_id \n" + " Set the link flow control parameter on the port \n" + "- write reg port_id reg_off value\n" + " Set value of a port register\n" + "- write regfield port_id reg_off bit_x bit_y value\n" + " Set bit field value of a port register\n" + "- write regbit port_id reg_off bit_x value\n" + " Set bit value of a port register\n" + "\n"); + cmdline_printf(cl, + "Control forwarding:\n" + "-------------------\n" + "- start\n" + " Start packet forwarding with current config\n" + "- start tx_first\n" + " Start packet forwarding with current config" + " after sending one burst\n of packets\n" + "- stop\n" + " Stop packet forwarding, and displays accumulated" + " stats\n" + "\n"); + cmdline_printf(cl, + "Flow director mode:\n" + "-------------------\n" + "- add_signature_filter port_id ip|udp|tcp|sctp src\n" + " ip_src_address port_src dst ip_dst_address port_dst\n" + " flexbytes flexbytes_values vlan vlan_id queue queue_id\n" + "- upd_signature_filter port_id ip|udp|tcp|sctp src \n" + " ip_src_address port_src dst ip_dst_address port_dst\n" + " flexbytes flexbytes_values vlan vlan_id queue queue_id\n" + "- rm_signature_filter port_id ip|udp|tcp|sctp src\n" + " ip_src_address port_src dst ip_dst_address port_dst\n" + " flexbytes flexbytes_values vlan vlan_id\n" + "- add_perfect_filter port_id ip|udp|tcp|sctp src\n" + " ip_src_address port_src dst ip_dst_address port_dst\n" + " flexbytes flexbytes_values vlan vlan_id queue \n" + " queue_id soft soft_id\n" + "- upd_perfect_filter port_id ip|udp|tcp|sctp src\n" + " ip_src_address port_src dst ip_dst_address port_dst\n" + " flexbytes flexbytes_values vlan vlan_id queue queue_id\n" + "- rm_perfect_filter port_id ip|udp|tcp|sctp src\n" + " ip_src_address port_src dst ip_dst_address port_dst\n" + " flexbytes flexbytes_values vlan vlan_id soft soft_id\n" + "- set_masks_filter port_id only_ip_flow 0|1 src_mask\n" + " ip_src_mask port_src_mask dst_mask ip_dst_mask\n" + " port_dst_mask flexbytes 0|1 vlan_id 0|1 vlan_prio 0|1\n" + "\n"); + cmdline_printf(cl, + "Misc:\n" + "-----\n" + "- quit\n" + " Quit to prompt in linux, and reboot on baremetal\n" + "\n"); +} + +cmdline_parse_token_string_t cmd_help_help = + TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help"); + +cmdline_parse_inst_t cmd_help = { + .f = cmd_help_parsed, + .data = NULL, + .help_str = "show help", + .tokens = { + (void *)&cmd_help_help, + NULL, + }, +}; + +/* *** stop *** */ +struct cmd_stop_result { + cmdline_fixed_string_t stop; +}; + +static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + stop_packet_forwarding(); +} + +cmdline_parse_token_string_t cmd_stop_stop = + TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop"); + +cmdline_parse_inst_t cmd_stop = { + .f = cmd_stop_parsed, + .data = NULL, + .help_str = "stop - stop packet forwarding", + .tokens = { + (void *)&cmd_stop_stop, + NULL, + }, +}; + +/* *** SET CORELIST and PORTLIST CONFIGURATION *** */ + +static unsigned int +parse_item_list(char* str, const char* item_name, unsigned int max_items, + unsigned int *parsed_items, int check_unique_values) +{ + unsigned int nb_item; + unsigned int value; + unsigned int i; + unsigned int j; + int value_ok; + char c; + + /* + * First parse all items in the list and store their value. + */ + value = 0; + nb_item = 0; + value_ok = 0; + for (i = 0; i < strnlen(str, STR_TOKEN_SIZE); i++) { + c = str[i]; + if ((c >= '0') && (c <= '9')) { + value = (unsigned int) (value * 10 + (c - '0')); + value_ok = 1; + continue; + } + if (c != ',') { + printf("character %c is not a decimal digit\n", c); + return (0); + } + if (! value_ok) { + printf("No valid value before comma\n"); + return (0); + } + if (nb_item < max_items) { + parsed_items[nb_item] = value; + value_ok = 0; + value = 0; + } + nb_item++; + } + if (nb_item >= max_items) { + printf("Number of %s = %u > %u (maximum items)\n", + item_name, nb_item + 1, max_items); + return (0); + } + parsed_items[nb_item++] = value; + if (! check_unique_values) + return (nb_item); + + /* + * Then, check that all values in the list are differents. + * No optimization here... + */ + for (i = 0; i < nb_item; i++) { + for (j = i + 1; j < nb_item; j++) { + if (parsed_items[j] == parsed_items[i]) { + printf("duplicated %s %u at index %u and %u\n", + item_name, parsed_items[i], i, j); + return (0); + } + } + } + return (nb_item); +} + +struct cmd_set_list_result { + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t list_name; + cmdline_fixed_string_t list_of_items; +}; + +static void cmd_set_list_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_list_result *res; + union { + unsigned int lcorelist[RTE_MAX_LCORE]; + unsigned int portlist[RTE_MAX_ETHPORTS]; + } parsed_items; + unsigned int nb_item; + + res = parsed_result; + if (!strcmp(res->list_name, "corelist")) { + nb_item = parse_item_list(res->list_of_items, "core", + RTE_MAX_LCORE, + parsed_items.lcorelist, 1); + if (nb_item > 0) + set_fwd_lcores_list(parsed_items.lcorelist, nb_item); + return; + } + if (!strcmp(res->list_name, "portlist")) { + nb_item = parse_item_list(res->list_of_items, "port", + RTE_MAX_ETHPORTS, + parsed_items.portlist, 1); + if (nb_item > 0) + set_fwd_ports_list(parsed_items.portlist, nb_item); + } +} + +cmdline_parse_token_string_t cmd_set_list_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, cmd_keyword, + "set"); +cmdline_parse_token_string_t cmd_set_list_name = + TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, list_name, + "corelist#portlist"); +cmdline_parse_token_string_t cmd_set_list_of_items = + TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, list_of_items, + NULL); + +cmdline_parse_inst_t cmd_set_fwd_list = { + .f = cmd_set_list_parsed, + .data = NULL, + .help_str = "set corelist|portlist x[,y]*", + .tokens = { + (void *)&cmd_set_list_keyword, + (void *)&cmd_set_list_name, + (void *)&cmd_set_list_of_items, + NULL, + }, +}; + +/* *** SET COREMASK and PORTMASK CONFIGURATION *** */ + +struct cmd_setmask_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t mask; + uint64_t hexavalue; +}; + +static void cmd_set_mask_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_setmask_result *res = parsed_result; + + if (!strcmp(res->mask, "coremask")) + set_fwd_lcores_mask(res->hexavalue); + else if (!strcmp(res->mask, "portmask")) + set_fwd_ports_mask(res->hexavalue); +} + +cmdline_parse_token_string_t cmd_setmask_set = + TOKEN_STRING_INITIALIZER(struct cmd_setmask_result, set, "set"); +cmdline_parse_token_string_t cmd_setmask_mask = + TOKEN_STRING_INITIALIZER(struct cmd_setmask_result, mask, + "coremask#portmask"); +cmdline_parse_token_num_t cmd_setmask_value = + TOKEN_NUM_INITIALIZER(struct cmd_setmask_result, hexavalue, UINT64); + +cmdline_parse_inst_t cmd_set_fwd_mask = { + .f = cmd_set_mask_parsed, + .data = NULL, + .help_str = "set coremask|portmask hexadecimal value", + .tokens = { + (void *)&cmd_setmask_set, + (void *)&cmd_setmask_mask, + (void *)&cmd_setmask_value, + NULL, + }, +}; + +/* + * SET NBPORT, NBCORE, PACKET BURST, and VERBOSE LEVEL CONFIGURATION + */ +struct cmd_set_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t what; + uint16_t value; +}; + +static void cmd_set_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_result *res = parsed_result; + if (!strcmp(res->what, "nbport")) + set_fwd_ports_number(res->value); + else if (!strcmp(res->what, "nbcore")) + set_fwd_lcores_number(res->value); + else if (!strcmp(res->what, "burst")) + set_nb_pkt_per_burst(res->value); + else if (!strcmp(res->what, "verbose")) + set_verbose_level(res->value); +} + +cmdline_parse_token_string_t cmd_set_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_result, set, "set"); +cmdline_parse_token_string_t cmd_set_what = + TOKEN_STRING_INITIALIZER(struct cmd_set_result, what, + "nbport#nbcore#burst#verbose"); +cmdline_parse_token_num_t cmd_set_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_result, value, UINT16); + +cmdline_parse_inst_t cmd_set_numbers = { + .f = cmd_set_parsed, + .data = NULL, + .help_str = "set nbport|nbcore|burst|verbose value", + .tokens = { + (void *)&cmd_set_set, + (void *)&cmd_set_what, + (void *)&cmd_set_value, + NULL, + }, +}; + +/* *** SET SEGMENT LENGTHS OF TXONLY PACKETS *** */ + +struct cmd_set_txpkts_result { + cmdline_fixed_string_t cmd_keyword; + cmdline_fixed_string_t txpkts; + cmdline_fixed_string_t seg_lengths; +}; + +static void +cmd_set_txpkts_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_txpkts_result *res; + unsigned seg_lengths[RTE_MAX_SEGS_PER_PKT]; + unsigned int nb_segs; + + res = parsed_result; + nb_segs = parse_item_list(res->seg_lengths, "segment lengths", + RTE_MAX_SEGS_PER_PKT, seg_lengths, 0); + if (nb_segs > 0) + set_tx_pkt_segments(seg_lengths, nb_segs); +} + +cmdline_parse_token_string_t cmd_set_txpkts_keyword = + TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result, + cmd_keyword, "set"); +cmdline_parse_token_string_t cmd_set_txpkts_name = + TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result, + txpkts, "txpkts"); +cmdline_parse_token_string_t cmd_set_txpkts_lengths = + TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result, + seg_lengths, NULL); + +cmdline_parse_inst_t cmd_set_txpkts = { + .f = cmd_set_txpkts_parsed, + .data = NULL, + .help_str = "set txpkts x[,y]*", + .tokens = { + (void *)&cmd_set_txpkts_keyword, + (void *)&cmd_set_txpkts_name, + (void *)&cmd_set_txpkts_lengths, + NULL, + }, +}; + +/* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */ +struct cmd_rx_vlan_filter_all_result { + cmdline_fixed_string_t rx_vlan; + cmdline_fixed_string_t what; + cmdline_fixed_string_t all; + uint8_t port_id; +}; + +static void +cmd_rx_vlan_filter_all_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_rx_vlan_filter_all_result *res = parsed_result; + + if (!strcmp(res->what, "add")) + rx_vlan_all_filter_set(res->port_id, 1); + else + rx_vlan_all_filter_set(res->port_id, 0); +} + +cmdline_parse_token_string_t cmd_rx_vlan_filter_all_rx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result, + rx_vlan, "rx_vlan"); +cmdline_parse_token_string_t cmd_rx_vlan_filter_all_what = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result, + what, "add#rm"); +cmdline_parse_token_string_t cmd_rx_vlan_filter_all_all = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result, + all, "all"); +cmdline_parse_token_num_t cmd_rx_vlan_filter_all_portid = + TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_all_result, + port_id, UINT8); + +cmdline_parse_inst_t cmd_rx_vlan_filter_all = { + .f = cmd_rx_vlan_filter_all_parsed, + .data = NULL, + .help_str = "add/remove all identifiers to/from the set of VLAN " + "Identifiers filtered by a port", + .tokens = { + (void *)&cmd_rx_vlan_filter_all_rx_vlan, + (void *)&cmd_rx_vlan_filter_all_what, + (void *)&cmd_rx_vlan_filter_all_all, + (void *)&cmd_rx_vlan_filter_all_portid, + NULL, + }, +}; + +/* *** ADD/REMOVE A VLAN IDENTIFIER TO/FROM A PORT VLAN RX FILTER *** */ +struct cmd_rx_vlan_filter_result { + cmdline_fixed_string_t rx_vlan; + cmdline_fixed_string_t what; + uint16_t vlan_id; + uint8_t port_id; +}; + +static void +cmd_rx_vlan_filter_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_rx_vlan_filter_result *res = parsed_result; + + if (!strcmp(res->what, "add")) + rx_vlan_filter_set(res->port_id, res->vlan_id, 1); + else + rx_vlan_filter_set(res->port_id, res->vlan_id, 0); +} + +cmdline_parse_token_string_t cmd_rx_vlan_filter_rx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_result, + rx_vlan, "rx_vlan"); +cmdline_parse_token_string_t cmd_rx_vlan_filter_what = + TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_result, + what, "add#rm"); +cmdline_parse_token_num_t cmd_rx_vlan_filter_vlanid = + TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result, + vlan_id, UINT16); +cmdline_parse_token_num_t cmd_rx_vlan_filter_portid = + TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result, + port_id, UINT8); + +cmdline_parse_inst_t cmd_rx_vlan_filter = { + .f = cmd_rx_vlan_filter_parsed, + .data = NULL, + .help_str = "add/remove a VLAN identifier to/from the set of VLAN " + "Identifiers filtered by a port", + .tokens = { + (void *)&cmd_rx_vlan_filter_rx_vlan, + (void *)&cmd_rx_vlan_filter_what, + (void *)&cmd_rx_vlan_filter_vlanid, + (void *)&cmd_rx_vlan_filter_portid, + NULL, + }, +}; + +/* *** ENABLE HARDWARE INSERTION OF VLAN HEADER IN TX PACKETS *** */ +struct cmd_tx_vlan_set_result { + cmdline_fixed_string_t tx_vlan; + cmdline_fixed_string_t set; + uint16_t vlan_id; + uint8_t port_id; +}; + +static void +cmd_tx_vlan_set_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_tx_vlan_set_result *res = parsed_result; + + tx_vlan_set(res->port_id, res->vlan_id); +} + +cmdline_parse_token_string_t cmd_tx_vlan_set_tx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_result, + tx_vlan, "tx_vlan"); +cmdline_parse_token_string_t cmd_tx_vlan_set_set = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_result, + set, "set"); +cmdline_parse_token_num_t cmd_tx_vlan_set_vlanid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result, + vlan_id, UINT16); +cmdline_parse_token_num_t cmd_tx_vlan_set_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result, + port_id, UINT8); + +cmdline_parse_inst_t cmd_tx_vlan_set = { + .f = cmd_tx_vlan_set_parsed, + .data = NULL, + .help_str = "enable hardware insertion of a VLAN header with a given " + "TAG Identifier in packets sent on a port", + .tokens = { + (void *)&cmd_tx_vlan_set_tx_vlan, + (void *)&cmd_tx_vlan_set_set, + (void *)&cmd_tx_vlan_set_vlanid, + (void *)&cmd_tx_vlan_set_portid, + NULL, + }, +}; + +/* *** DISABLE HARDWARE INSERTION OF VLAN HEADER IN TX PACKETS *** */ +struct cmd_tx_vlan_reset_result { + cmdline_fixed_string_t tx_vlan; + cmdline_fixed_string_t reset; + uint8_t port_id; +}; + +static void +cmd_tx_vlan_reset_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_tx_vlan_reset_result *res = parsed_result; + + tx_vlan_reset(res->port_id); +} + +cmdline_parse_token_string_t cmd_tx_vlan_reset_tx_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_reset_result, + tx_vlan, "tx_vlan"); +cmdline_parse_token_string_t cmd_tx_vlan_reset_reset = + TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_reset_result, + reset, "reset"); +cmdline_parse_token_num_t cmd_tx_vlan_reset_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_reset_result, + port_id, UINT8); + +cmdline_parse_inst_t cmd_tx_vlan_reset = { + .f = cmd_tx_vlan_reset_parsed, + .data = NULL, + .help_str = "disable hardware insertion of a VLAN header in packets " + "sent on a port", + .tokens = { + (void *)&cmd_tx_vlan_reset_tx_vlan, + (void *)&cmd_tx_vlan_reset_reset, + (void *)&cmd_tx_vlan_reset_portid, + NULL, + }, +}; + + +/* *** ENABLE HARDWARE INSERTION OF CHECKSUM IN TX PACKETS *** */ +struct cmd_tx_cksum_set_result { + cmdline_fixed_string_t tx_cksum; + cmdline_fixed_string_t set; + uint8_t cksum_mask; + uint8_t port_id; +}; + +static void +cmd_tx_cksum_set_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_tx_cksum_set_result *res = parsed_result; + + tx_cksum_set(res->port_id, res->cksum_mask); +} + +cmdline_parse_token_string_t cmd_tx_cksum_set_tx_cksum = + TOKEN_STRING_INITIALIZER(struct cmd_tx_cksum_set_result, + tx_cksum, "tx_checksum"); +cmdline_parse_token_string_t cmd_tx_cksum_set_set = + TOKEN_STRING_INITIALIZER(struct cmd_tx_cksum_set_result, + set, "set"); +cmdline_parse_token_num_t cmd_tx_cksum_set_cksum_mask = + TOKEN_NUM_INITIALIZER(struct cmd_tx_cksum_set_result, + cksum_mask, UINT8); +cmdline_parse_token_num_t cmd_tx_cksum_set_portid = + TOKEN_NUM_INITIALIZER(struct cmd_tx_cksum_set_result, + port_id, UINT8); + +cmdline_parse_inst_t cmd_tx_cksum_set = { + .f = cmd_tx_cksum_set_parsed, + .data = NULL, + .help_str = "enable hardware insertion of L3/L4checksum with a given " + "mask in packets sent on a port, the bit mapping is given as, Bit 0 for ip" + "Bit 1 for UDP, Bit 2 for TCP, Bit 3 for SCTP", + .tokens = { + (void *)&cmd_tx_cksum_set_tx_cksum, + (void *)&cmd_tx_cksum_set_set, + (void *)&cmd_tx_cksum_set_cksum_mask, + (void *)&cmd_tx_cksum_set_portid, + NULL, + }, +}; + +/* *** SET FORWARDING MODE *** */ +struct cmd_set_fwd_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t fwd; + cmdline_fixed_string_t mode; +}; + +static void cmd_set_fwd_mode_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_fwd_mode_result *res = parsed_result; + + set_pkt_forwarding_mode(res->mode); +} + +cmdline_parse_token_string_t cmd_setfwd_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, set, "set"); +cmdline_parse_token_string_t cmd_setfwd_fwd = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, fwd, "fwd"); +cmdline_parse_token_string_t cmd_setfwd_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, mode, +#ifdef RTE_LIBRTE_IEEE1588 + "io#mac#rxonly#txonly#csum#ieee1588"); +#else + "io#mac#rxonly#txonly#csum"); +#endif + +cmdline_parse_inst_t cmd_set_fwd_mode = { + .f = cmd_set_fwd_mode_parsed, + .data = NULL, +#ifdef RTE_LIBRTE_IEEE1588 + .help_str = "set fwd io|mac|rxonly|txonly|csum|ieee1588 - set IO, MAC," + " RXONLY, TXONLY, CSUM or IEEE1588 packet forwarding mode", +#else + .help_str = "set fwd io|mac|rxonly|txonly|csum - set IO, MAC," + " RXONLY, CSUM or TXONLY packet forwarding mode", +#endif + .tokens = { + (void *)&cmd_setfwd_set, + (void *)&cmd_setfwd_fwd, + (void *)&cmd_setfwd_mode, + NULL, + }, +}; + +/* *** SET PROMISC MODE *** */ +struct cmd_set_promisc_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t promisc; + cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */ + uint8_t port_num; /* valid if "allports" argument == 0 */ + cmdline_fixed_string_t mode; +}; + +static void cmd_set_promisc_mode_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + void *allports) +{ + struct cmd_set_promisc_mode_result *res = parsed_result; + int enable; + portid_t i; + + if (!strcmp(res->mode, "on")) + enable = 1; + else + enable = 0; + + /* all ports */ + if (allports) { + for (i = 0; i < nb_ports; i++) { + if (enable) + rte_eth_promiscuous_enable(i); + else + rte_eth_promiscuous_disable(i); + } + } + else { + if (enable) + rte_eth_promiscuous_enable(res->port_num); + else + rte_eth_promiscuous_disable(res->port_num); + } +} + +cmdline_parse_token_string_t cmd_setpromisc_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, set, "set"); +cmdline_parse_token_string_t cmd_setpromisc_promisc = + TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, promisc, + "promisc"); +cmdline_parse_token_string_t cmd_setpromisc_portall = + TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, port_all, + "all"); +cmdline_parse_token_num_t cmd_setpromisc_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_set_promisc_mode_result, port_num, + UINT8); +cmdline_parse_token_string_t cmd_setpromisc_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, mode, + "on#off"); + +cmdline_parse_inst_t cmd_set_promisc_mode_all = { + .f = cmd_set_promisc_mode_parsed, + .data = (void *)1, + .help_str = "set promisc all on|off: set promisc mode for all ports", + .tokens = { + (void *)&cmd_setpromisc_set, + (void *)&cmd_setpromisc_promisc, + (void *)&cmd_setpromisc_portall, + (void *)&cmd_setpromisc_mode, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_promisc_mode_one = { + .f = cmd_set_promisc_mode_parsed, + .data = (void *)0, + .help_str = "set promisc X on|off: set promisc mode on port X", + .tokens = { + (void *)&cmd_setpromisc_set, + (void *)&cmd_setpromisc_promisc, + (void *)&cmd_setpromisc_portnum, + (void *)&cmd_setpromisc_mode, + NULL, + }, +}; + +/* *** SET ALLMULTI MODE *** */ +struct cmd_set_allmulti_mode_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t allmulti; + cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */ + uint8_t port_num; /* valid if "allports" argument == 0 */ + cmdline_fixed_string_t mode; +}; + +static void cmd_set_allmulti_mode_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + void *allports) +{ + struct cmd_set_allmulti_mode_result *res = parsed_result; + int enable; + portid_t i; + + if (!strcmp(res->mode, "on")) + enable = 1; + else + enable = 0; + + /* all ports */ + if (allports) { + for (i = 0; i < nb_ports; i++) { + if (enable) + rte_eth_allmulticast_enable(i); + else + rte_eth_allmulticast_disable(i); + } + } + else { + if (enable) + rte_eth_allmulticast_enable(res->port_num); + else + rte_eth_allmulticast_disable(res->port_num); + } +} + +cmdline_parse_token_string_t cmd_setallmulti_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, set, "set"); +cmdline_parse_token_string_t cmd_setallmulti_allmulti = + TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, allmulti, + "allmulti"); +cmdline_parse_token_string_t cmd_setallmulti_portall = + TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, port_all, + "all"); +cmdline_parse_token_num_t cmd_setallmulti_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_set_allmulti_mode_result, port_num, + UINT8); +cmdline_parse_token_string_t cmd_setallmulti_mode = + TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, mode, + "on#off"); + +cmdline_parse_inst_t cmd_set_allmulti_mode_all = { + .f = cmd_set_allmulti_mode_parsed, + .data = (void *)1, + .help_str = "set allmulti all on|off: set allmulti mode for all ports", + .tokens = { + (void *)&cmd_setallmulti_set, + (void *)&cmd_setallmulti_allmulti, + (void *)&cmd_setallmulti_portall, + (void *)&cmd_setallmulti_mode, + NULL, + }, +}; + +cmdline_parse_inst_t cmd_set_allmulti_mode_one = { + .f = cmd_set_allmulti_mode_parsed, + .data = (void *)0, + .help_str = "set allmulti X on|off: set allmulti mode on port X", + .tokens = { + (void *)&cmd_setallmulti_set, + (void *)&cmd_setallmulti_allmulti, + (void *)&cmd_setallmulti_portnum, + (void *)&cmd_setallmulti_mode, + NULL, + }, +}; + +/* *** ADD/REMOVE A PKT FILTER *** */ +struct cmd_pkt_filter_result { + cmdline_fixed_string_t pkt_filter; + uint8_t port_id; + cmdline_fixed_string_t protocol; + cmdline_fixed_string_t src; + cmdline_ipaddr_t ip_src; + uint16_t port_src; + cmdline_fixed_string_t dst; + cmdline_ipaddr_t ip_dst; + uint16_t port_dst; + cmdline_fixed_string_t flexbytes; + uint16_t flexbytes_value; + cmdline_fixed_string_t vlan; + uint16_t vlan_id; + cmdline_fixed_string_t queue; + int8_t queue_id; + cmdline_fixed_string_t soft; + uint8_t soft_id; +}; + +static void +cmd_pkt_filter_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct rte_fdir_filter fdir_filter; + struct cmd_pkt_filter_result *res = parsed_result; + + memset(&fdir_filter, 0, sizeof(struct rte_fdir_filter)); + + if (res->ip_src.family == AF_INET) + fdir_filter.ip_src.ipv4_addr = res->ip_src.addr.ipv4.s_addr; + else + memcpy(&(fdir_filter.ip_src.ipv6_addr), + &(res->ip_src.addr.ipv6), + sizeof(struct in6_addr)); + + if (res->ip_dst.family == AF_INET) + fdir_filter.ip_dst.ipv4_addr = res->ip_dst.addr.ipv4.s_addr; + else + memcpy(&(fdir_filter.ip_dst.ipv6_addr), + &(res->ip_dst.addr.ipv6), + sizeof(struct in6_addr)); + + fdir_filter.port_dst = rte_cpu_to_be_16(res->port_dst); + fdir_filter.port_src = rte_cpu_to_be_16(res->port_src); + + if (!strcmp(res->protocol, "udp")) + fdir_filter.l4type = RTE_FDIR_L4TYPE_UDP; + else if (!strcmp(res->protocol, "tcp")) + fdir_filter.l4type = RTE_FDIR_L4TYPE_TCP; + else if (!strcmp(res->protocol, "sctp")) + fdir_filter.l4type = RTE_FDIR_L4TYPE_SCTP; + else /* default only IP */ + fdir_filter.l4type = RTE_FDIR_L4TYPE_NONE; + + if (res->ip_dst.family == AF_INET6) + fdir_filter.iptype = RTE_FDIR_IPTYPE_IPV6; + else + fdir_filter.iptype = RTE_FDIR_IPTYPE_IPV4; + + fdir_filter.vlan_id = rte_cpu_to_be_16(res->vlan_id); + fdir_filter.flex_bytes = rte_cpu_to_be_16(res->flexbytes_value); + + if (!strcmp(res->pkt_filter, "add_signature_filter")) + fdir_add_signature_filter(res->port_id, res->queue_id, + &fdir_filter); + else if (!strcmp(res->pkt_filter, "upd_signature_filter")) + fdir_update_signature_filter(res->port_id, res->queue_id, + &fdir_filter); + else if (!strcmp(res->pkt_filter, "rm_signature_filter")) + fdir_remove_signature_filter(res->port_id, &fdir_filter); + else if (!strcmp(res->pkt_filter, "add_perfect_filter")) + fdir_add_perfect_filter(res->port_id, res->soft_id, + res->queue_id, + (uint8_t) (res->queue_id < 0), + &fdir_filter); + else if (!strcmp(res->pkt_filter, "upd_perfect_filter")) + fdir_update_perfect_filter(res->port_id, res->soft_id, + res->queue_id, + (uint8_t) (res->queue_id < 0), + &fdir_filter); + else if (!strcmp(res->pkt_filter, "rm_perfect_filter")) + fdir_remove_perfect_filter(res->port_id, res->soft_id, + &fdir_filter); + +} + + +cmdline_parse_token_num_t cmd_pkt_filter_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result, + port_id, UINT8); +cmdline_parse_token_string_t cmd_pkt_filter_protocol = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + protocol, "ip#tcp#udp#sctp"); +cmdline_parse_token_string_t cmd_pkt_filter_src = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + src, "src"); +cmdline_parse_token_ipaddr_t cmd_pkt_filter_ip_src = + TOKEN_IPADDR_INITIALIZER(struct cmd_pkt_filter_result, + ip_src); +cmdline_parse_token_num_t cmd_pkt_filter_port_src = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result, + port_src, UINT16); +cmdline_parse_token_string_t cmd_pkt_filter_dst = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + dst, "dst"); +cmdline_parse_token_ipaddr_t cmd_pkt_filter_ip_dst = + TOKEN_IPADDR_INITIALIZER(struct cmd_pkt_filter_result, + ip_dst); +cmdline_parse_token_num_t cmd_pkt_filter_port_dst = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result, + port_dst, UINT16); +cmdline_parse_token_string_t cmd_pkt_filter_flexbytes = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + flexbytes, "flexbytes"); +cmdline_parse_token_num_t cmd_pkt_filter_flexbytes_value = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result, + flexbytes_value, UINT16); +cmdline_parse_token_string_t cmd_pkt_filter_vlan = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + vlan, "vlan"); +cmdline_parse_token_num_t cmd_pkt_filter_vlan_id = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result, + vlan_id, UINT16); +cmdline_parse_token_string_t cmd_pkt_filter_queue = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + queue, "queue"); +cmdline_parse_token_num_t cmd_pkt_filter_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result, + queue_id, INT8); +cmdline_parse_token_string_t cmd_pkt_filter_soft = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + soft, "soft"); +cmdline_parse_token_num_t cmd_pkt_filter_soft_id = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result, + soft_id, UINT16); + + +cmdline_parse_token_string_t cmd_pkt_filter_add_signature_filter = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + pkt_filter, "add_signature_filter"); +cmdline_parse_inst_t cmd_add_signature_filter = { + .f = cmd_pkt_filter_parsed, + .data = NULL, + .help_str = "add a signature filter", + .tokens = { + (void *)&cmd_pkt_filter_add_signature_filter, + (void *)&cmd_pkt_filter_port_id, + (void *)&cmd_pkt_filter_protocol, + (void *)&cmd_pkt_filter_src, + (void *)&cmd_pkt_filter_ip_src, + (void *)&cmd_pkt_filter_port_src, + (void *)&cmd_pkt_filter_dst, + (void *)&cmd_pkt_filter_ip_dst, + (void *)&cmd_pkt_filter_port_dst, + (void *)&cmd_pkt_filter_flexbytes, + (void *)&cmd_pkt_filter_flexbytes_value, + (void *)&cmd_pkt_filter_vlan, + (void *)&cmd_pkt_filter_vlan_id, + (void *)&cmd_pkt_filter_queue, + (void *)&cmd_pkt_filter_queue_id, + NULL, + }, +}; + + +cmdline_parse_token_string_t cmd_pkt_filter_upd_signature_filter = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + pkt_filter, "upd_signature_filter"); +cmdline_parse_inst_t cmd_upd_signature_filter = { + .f = cmd_pkt_filter_parsed, + .data = NULL, + .help_str = "update a signature filter", + .tokens = { + (void *)&cmd_pkt_filter_upd_signature_filter, + (void *)&cmd_pkt_filter_port_id, + (void *)&cmd_pkt_filter_protocol, + (void *)&cmd_pkt_filter_src, + (void *)&cmd_pkt_filter_ip_src, + (void *)&cmd_pkt_filter_port_src, + (void *)&cmd_pkt_filter_dst, + (void *)&cmd_pkt_filter_ip_dst, + (void *)&cmd_pkt_filter_port_dst, + (void *)&cmd_pkt_filter_flexbytes, + (void *)&cmd_pkt_filter_flexbytes_value, + (void *)&cmd_pkt_filter_vlan, + (void *)&cmd_pkt_filter_vlan_id, + (void *)&cmd_pkt_filter_queue, + (void *)&cmd_pkt_filter_queue_id, + NULL, + }, +}; + + +cmdline_parse_token_string_t cmd_pkt_filter_rm_signature_filter = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + pkt_filter, "rm_signature_filter"); +cmdline_parse_inst_t cmd_rm_signature_filter = { + .f = cmd_pkt_filter_parsed, + .data = NULL, + .help_str = "remove a signature filter", + .tokens = { + (void *)&cmd_pkt_filter_rm_signature_filter, + (void *)&cmd_pkt_filter_port_id, + (void *)&cmd_pkt_filter_protocol, + (void *)&cmd_pkt_filter_src, + (void *)&cmd_pkt_filter_ip_src, + (void *)&cmd_pkt_filter_port_src, + (void *)&cmd_pkt_filter_dst, + (void *)&cmd_pkt_filter_ip_dst, + (void *)&cmd_pkt_filter_port_dst, + (void *)&cmd_pkt_filter_flexbytes, + (void *)&cmd_pkt_filter_flexbytes_value, + (void *)&cmd_pkt_filter_vlan, + (void *)&cmd_pkt_filter_vlan_id, + NULL + }, +}; + + +cmdline_parse_token_string_t cmd_pkt_filter_add_perfect_filter = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + pkt_filter, "add_perfect_filter"); +cmdline_parse_inst_t cmd_add_perfect_filter = { + .f = cmd_pkt_filter_parsed, + .data = NULL, + .help_str = "add a perfect filter", + .tokens = { + (void *)&cmd_pkt_filter_add_perfect_filter, + (void *)&cmd_pkt_filter_port_id, + (void *)&cmd_pkt_filter_protocol, + (void *)&cmd_pkt_filter_src, + (void *)&cmd_pkt_filter_ip_src, + (void *)&cmd_pkt_filter_port_src, + (void *)&cmd_pkt_filter_dst, + (void *)&cmd_pkt_filter_ip_dst, + (void *)&cmd_pkt_filter_port_dst, + (void *)&cmd_pkt_filter_flexbytes, + (void *)&cmd_pkt_filter_flexbytes_value, + (void *)&cmd_pkt_filter_vlan, + (void *)&cmd_pkt_filter_vlan_id, + (void *)&cmd_pkt_filter_queue, + (void *)&cmd_pkt_filter_queue_id, + (void *)&cmd_pkt_filter_soft, + (void *)&cmd_pkt_filter_soft_id, + NULL, + }, +}; + + +cmdline_parse_token_string_t cmd_pkt_filter_upd_perfect_filter = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + pkt_filter, "upd_perfect_filter"); +cmdline_parse_inst_t cmd_upd_perfect_filter = { + .f = cmd_pkt_filter_parsed, + .data = NULL, + .help_str = "update a perfect filter", + .tokens = { + (void *)&cmd_pkt_filter_upd_perfect_filter, + (void *)&cmd_pkt_filter_port_id, + (void *)&cmd_pkt_filter_protocol, + (void *)&cmd_pkt_filter_src, + (void *)&cmd_pkt_filter_ip_src, + (void *)&cmd_pkt_filter_port_src, + (void *)&cmd_pkt_filter_dst, + (void *)&cmd_pkt_filter_ip_dst, + (void *)&cmd_pkt_filter_port_dst, + (void *)&cmd_pkt_filter_flexbytes, + (void *)&cmd_pkt_filter_flexbytes_value, + (void *)&cmd_pkt_filter_vlan, + (void *)&cmd_pkt_filter_vlan_id, + (void *)&cmd_pkt_filter_queue, + (void *)&cmd_pkt_filter_queue_id, + (void *)&cmd_pkt_filter_soft, + (void *)&cmd_pkt_filter_soft_id, + NULL, + }, +}; + + +cmdline_parse_token_string_t cmd_pkt_filter_rm_perfect_filter = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result, + pkt_filter, "rm_perfect_filter"); +cmdline_parse_inst_t cmd_rm_perfect_filter = { + .f = cmd_pkt_filter_parsed, + .data = NULL, + .help_str = "remove a perfect filter", + .tokens = { + (void *)&cmd_pkt_filter_rm_perfect_filter, + (void *)&cmd_pkt_filter_port_id, + (void *)&cmd_pkt_filter_protocol, + (void *)&cmd_pkt_filter_src, + (void *)&cmd_pkt_filter_ip_src, + (void *)&cmd_pkt_filter_port_src, + (void *)&cmd_pkt_filter_dst, + (void *)&cmd_pkt_filter_ip_dst, + (void *)&cmd_pkt_filter_port_dst, + (void *)&cmd_pkt_filter_flexbytes, + (void *)&cmd_pkt_filter_flexbytes_value, + (void *)&cmd_pkt_filter_vlan, + (void *)&cmd_pkt_filter_vlan_id, + (void *)&cmd_pkt_filter_soft, + (void *)&cmd_pkt_filter_soft_id, + NULL, + }, +}; + +/* *** SETUP MASKS FILTER *** */ +struct cmd_pkt_filter_masks_result { + cmdline_fixed_string_t filter_mask; + uint8_t port_id; + cmdline_fixed_string_t src_mask; + uint32_t ip_src_mask; + uint16_t port_src_mask; + cmdline_fixed_string_t dst_mask; + uint32_t ip_dst_mask; + uint16_t port_dst_mask; + cmdline_fixed_string_t flexbytes; + uint8_t flexbytes_value; + cmdline_fixed_string_t vlan_id; + uint8_t vlan_id_value; + cmdline_fixed_string_t vlan_prio; + uint8_t vlan_prio_value; + cmdline_fixed_string_t only_ip_flow; + uint8_t only_ip_flow_value; +}; + +static void +cmd_pkt_filter_masks_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct rte_fdir_masks fdir_masks; + struct cmd_pkt_filter_masks_result *res = parsed_result; + + memset(&fdir_masks, 0, sizeof(struct rte_fdir_masks)); + + fdir_masks.only_ip_flow = res->only_ip_flow_value; + fdir_masks.vlan_id = res->vlan_id_value; + fdir_masks.vlan_prio = res->vlan_prio_value; + fdir_masks.dst_ipv4_mask = res->ip_dst_mask; + fdir_masks.src_ipv4_mask = res->ip_src_mask; + fdir_masks.src_port_mask = res->port_src_mask; + fdir_masks.dst_port_mask = res->port_dst_mask; + fdir_masks.flexbytes = res->flexbytes_value; + + fdir_set_masks(res->port_id, &fdir_masks); +} + +cmdline_parse_token_string_t cmd_pkt_filter_masks_filter_mask = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result, + filter_mask, "set_masks_filter"); +cmdline_parse_token_num_t cmd_pkt_filter_masks_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + port_id, UINT8); +cmdline_parse_token_string_t cmd_pkt_filter_masks_only_ip_flow = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result, + only_ip_flow, "only_ip_flow"); +cmdline_parse_token_num_t cmd_pkt_filter_masks_only_ip_flow_value = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + only_ip_flow_value, UINT8); +cmdline_parse_token_string_t cmd_pkt_filter_masks_src_mask = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result, + src_mask, "src_mask"); +cmdline_parse_token_num_t cmd_pkt_filter_masks_ip_src_mask = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + ip_src_mask, UINT32); +cmdline_parse_token_num_t cmd_pkt_filter_masks_port_src_mask = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + port_src_mask, UINT16); +cmdline_parse_token_string_t cmd_pkt_filter_masks_dst_mask = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result, + src_mask, "dst_mask"); +cmdline_parse_token_num_t cmd_pkt_filter_masks_ip_dst_mask = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + ip_dst_mask, UINT32); +cmdline_parse_token_num_t cmd_pkt_filter_masks_port_dst_mask = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + port_dst_mask, UINT16); +cmdline_parse_token_string_t cmd_pkt_filter_masks_flexbytes = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result, + flexbytes, "flexbytes"); +cmdline_parse_token_num_t cmd_pkt_filter_masks_flexbytes_value = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + flexbytes_value, UINT8); +cmdline_parse_token_string_t cmd_pkt_filter_masks_vlan_id = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result, + vlan_id, "vlan_id"); +cmdline_parse_token_num_t cmd_pkt_filter_masks_vlan_id_value = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + vlan_id_value, UINT8); +cmdline_parse_token_string_t cmd_pkt_filter_masks_vlan_prio = + TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result, + vlan_prio, "vlan_prio"); +cmdline_parse_token_num_t cmd_pkt_filter_masks_vlan_prio_value = + TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result, + vlan_prio_value, UINT8); + +cmdline_parse_inst_t cmd_set_masks_filter = { + .f = cmd_pkt_filter_masks_parsed, + .data = NULL, + .help_str = "setup masks filter", + .tokens = { + (void *)&cmd_pkt_filter_masks_filter_mask, + (void *)&cmd_pkt_filter_masks_port_id, + (void *)&cmd_pkt_filter_masks_only_ip_flow, + (void *)&cmd_pkt_filter_masks_only_ip_flow_value, + (void *)&cmd_pkt_filter_masks_src_mask, + (void *)&cmd_pkt_filter_masks_ip_src_mask, + (void *)&cmd_pkt_filter_masks_port_src_mask, + (void *)&cmd_pkt_filter_masks_dst_mask, + (void *)&cmd_pkt_filter_masks_ip_dst_mask, + (void *)&cmd_pkt_filter_masks_port_dst_mask, + (void *)&cmd_pkt_filter_masks_flexbytes, + (void *)&cmd_pkt_filter_masks_flexbytes_value, + (void *)&cmd_pkt_filter_masks_vlan_id, + (void *)&cmd_pkt_filter_masks_vlan_id_value, + (void *)&cmd_pkt_filter_masks_vlan_prio, + (void *)&cmd_pkt_filter_masks_vlan_prio_value, + NULL, + }, +}; + +/* *** SETUP ETHERNET LINK FLOW CONTROL *** */ +struct cmd_link_flow_ctrl_set_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t flow_ctrl; + cmdline_fixed_string_t rx; + cmdline_fixed_string_t rx_lfc_mode; + cmdline_fixed_string_t tx; + cmdline_fixed_string_t tx_lfc_mode; + uint32_t high_water; + uint32_t low_water; + uint16_t pause_time; + uint16_t send_xon; + uint8_t port_id; +}; + +static void +cmd_link_flow_ctrl_set_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_link_flow_ctrl_set_result *res = parsed_result; + struct rte_eth_fc_conf fc_conf; + int rx_fc_enable, tx_fc_enable; + int ret; + + /* + * Rx on/off, flow control is enabled/disabled on RX side. This can indicate + * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side. + * Tx on/off, flow control is enabled/disabled on TX side. This can indicate + * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side. + */ + static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = { + {RTE_FC_NONE, RTE_FC_RX_PAUSE}, {RTE_FC_TX_PAUSE, RTE_FC_FULL} + }; + + rx_fc_enable = (!strcmp(res->rx_lfc_mode, "on")) ? 1 : 0; + tx_fc_enable = (!strcmp(res->tx_lfc_mode, "on")) ? 1 : 0; + + fc_conf.mode = rx_tx_onoff_2_lfc_mode[rx_fc_enable][tx_fc_enable]; + fc_conf.high_water = res->high_water; + fc_conf.low_water = res->low_water; + fc_conf.pause_time = res->pause_time; + fc_conf.send_xon = res->send_xon; + + ret = rte_eth_dev_flow_ctrl_set(res->port_id, &fc_conf); + if (ret != 0) + printf("bad flow contrl parameter, return code = %d \n", ret); +} + +cmdline_parse_token_string_t cmd_lfc_set_set = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + set, "set"); +cmdline_parse_token_string_t cmd_lfc_set_flow_ctrl = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + flow_ctrl, "flow_ctrl"); +cmdline_parse_token_string_t cmd_lfc_set_rx = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + rx, "rx"); +cmdline_parse_token_string_t cmd_lfc_set_rx_mode = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + rx_lfc_mode, "on#off"); +cmdline_parse_token_string_t cmd_lfc_set_tx = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + tx, "tx"); +cmdline_parse_token_string_t cmd_lfc_set_tx_mode = + TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + tx_lfc_mode, "on#off"); +cmdline_parse_token_num_t cmd_lfc_set_high_water = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + high_water, UINT32); +cmdline_parse_token_num_t cmd_lfc_set_low_water = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + low_water, UINT32); +cmdline_parse_token_num_t cmd_lfc_set_pause_time = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + pause_time, UINT16); +cmdline_parse_token_num_t cmd_lfc_set_send_xon = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + send_xon, UINT16); +cmdline_parse_token_num_t cmd_lfc_set_portid = + TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result, + port_id, UINT8); + +cmdline_parse_inst_t cmd_link_flow_control_set = { + .f = cmd_link_flow_ctrl_set_parsed, + .data = NULL, + .help_str = "Configure the Ethernet link flow control...", + .tokens = { + (void *)&cmd_lfc_set_set, + (void *)&cmd_lfc_set_flow_ctrl, + (void *)&cmd_lfc_set_rx, + (void *)&cmd_lfc_set_rx_mode, + (void *)&cmd_lfc_set_tx, + (void *)&cmd_lfc_set_tx_mode, + (void *)&cmd_lfc_set_high_water, + (void *)&cmd_lfc_set_low_water, + (void *)&cmd_lfc_set_pause_time, + (void *)&cmd_lfc_set_send_xon, + (void *)&cmd_lfc_set_portid, + NULL, + }, +}; + +/* *** RESET CONFIGURATION *** */ +struct cmd_reset_result { + cmdline_fixed_string_t reset; + cmdline_fixed_string_t def; +}; + +static void cmd_reset_parsed(__attribute__((unused)) void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + cmdline_printf(cl, "Reset to default forwarding configuration...\n"); + set_def_fwd_config(); +} + +cmdline_parse_token_string_t cmd_reset_set = + TOKEN_STRING_INITIALIZER(struct cmd_reset_result, reset, "set"); +cmdline_parse_token_string_t cmd_reset_def = + TOKEN_STRING_INITIALIZER(struct cmd_reset_result, def, + "default"); + +cmdline_parse_inst_t cmd_reset = { + .f = cmd_reset_parsed, + .data = NULL, + .help_str = "set default: reset default forwarding configuration", + .tokens = { + (void *)&cmd_reset_set, + (void *)&cmd_reset_def, + NULL, + }, +}; + +/* *** START FORWARDING *** */ +struct cmd_start_result { + cmdline_fixed_string_t start; +}; + +cmdline_parse_token_string_t cmd_start_start = + TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start"); + +static void cmd_start_parsed(__attribute__((unused)) void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + start_packet_forwarding(0); +} + +cmdline_parse_inst_t cmd_start = { + .f = cmd_start_parsed, + .data = NULL, + .help_str = "start packet forwarding", + .tokens = { + (void *)&cmd_start_start, + NULL, + }, +}; + +/* *** START FORWARDING WITH ONE TX BURST FIRST *** */ +struct cmd_start_tx_first_result { + cmdline_fixed_string_t start; + cmdline_fixed_string_t tx_first; +}; + +static void +cmd_start_tx_first_parsed(__attribute__((unused)) void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + start_packet_forwarding(1); +} + +cmdline_parse_token_string_t cmd_start_tx_first_start = + TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_result, start, + "start"); +cmdline_parse_token_string_t cmd_start_tx_first_tx_first = + TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_result, + tx_first, "tx_first"); + +cmdline_parse_inst_t cmd_start_tx_first = { + .f = cmd_start_tx_first_parsed, + .data = NULL, + .help_str = "start packet forwarding, after sending 1 burst of packets", + .tokens = { + (void *)&cmd_start_tx_first_start, + (void *)&cmd_start_tx_first_tx_first, + NULL, + }, +}; + +/* *** SHOW CFG *** */ +struct cmd_showcfg_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t cfg; + cmdline_fixed_string_t what; +}; + +static void cmd_showcfg_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_showcfg_result *res = parsed_result; + if (!strcmp(res->what, "rxtx")) + rxtx_config_display(); + else if (!strcmp(res->what, "cores")) + fwd_lcores_config_display(); + else if (!strcmp(res->what, "fwd")) + fwd_config_display(); +} + +cmdline_parse_token_string_t cmd_showcfg_show = + TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, show, "show"); +cmdline_parse_token_string_t cmd_showcfg_port = + TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, cfg, "config"); +cmdline_parse_token_string_t cmd_showcfg_what = + TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, what, + "rxtx#cores#fwd"); + +cmdline_parse_inst_t cmd_showcfg = { + .f = cmd_showcfg_parsed, + .data = NULL, + .help_str = "show config rxtx|cores|fwd", + .tokens = { + (void *)&cmd_showcfg_show, + (void *)&cmd_showcfg_port, + (void *)&cmd_showcfg_what, + NULL, + }, +}; + +/* *** SHOW ALL PORT INFO *** */ +struct cmd_showportall_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t what; + cmdline_fixed_string_t all; +}; + +static void cmd_showportall_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + portid_t i; + + struct cmd_showportall_result *res = parsed_result; + if (!strcmp(res->show, "clear")) { + if (!strcmp(res->what, "stats")) + for (i = 0; i < nb_ports; i++) + nic_stats_clear(i); + } else if (!strcmp(res->what, "info")) + for (i = 0; i < nb_ports; i++) + port_infos_display(i); + else if (!strcmp(res->what, "stats")) + for (i = 0; i < nb_ports; i++) + nic_stats_display(i); + else if (!strcmp(res->what, "fdir")) + for (i = 0; i < nb_ports; i++) + fdir_get_infos(i); +} + +cmdline_parse_token_string_t cmd_showportall_show = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, show, + "show#clear"); +cmdline_parse_token_string_t cmd_showportall_port = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, port, "port"); +cmdline_parse_token_string_t cmd_showportall_what = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, what, + "info#stats#fdir"); +cmdline_parse_token_string_t cmd_showportall_all = + TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, all, "all"); +cmdline_parse_inst_t cmd_showportall = { + .f = cmd_showportall_parsed, + .data = NULL, + .help_str = "show|clear port info|stats|fdir all", + .tokens = { + (void *)&cmd_showportall_show, + (void *)&cmd_showportall_port, + (void *)&cmd_showportall_what, + (void *)&cmd_showportall_all, + NULL, + }, +}; + +/* *** SHOW PORT INFO *** */ +struct cmd_showport_result { + cmdline_fixed_string_t show; + cmdline_fixed_string_t port; + cmdline_fixed_string_t what; + uint8_t portnum; +}; + +static void cmd_showport_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_showport_result *res = parsed_result; + if (!strcmp(res->show, "clear")) { + if (!strcmp(res->what, "stats")) + nic_stats_clear(res->portnum); + } else if (!strcmp(res->what, "info")) + port_infos_display(res->portnum); + else if (!strcmp(res->what, "stats")) + nic_stats_display(res->portnum); + else if (!strcmp(res->what, "fdir")) + fdir_get_infos(res->portnum); +} + +cmdline_parse_token_string_t cmd_showport_show = + TOKEN_STRING_INITIALIZER(struct cmd_showport_result, show, + "show#clear"); +cmdline_parse_token_string_t cmd_showport_port = + TOKEN_STRING_INITIALIZER(struct cmd_showport_result, port, "port"); +cmdline_parse_token_string_t cmd_showport_what = + TOKEN_STRING_INITIALIZER(struct cmd_showport_result, what, + "info#stats#fdir"); +cmdline_parse_token_num_t cmd_showport_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, INT32); + +cmdline_parse_inst_t cmd_showport = { + .f = cmd_showport_parsed, + .data = NULL, + .help_str = "show|clear port info|stats|fdir X (X = port number)", + .tokens = { + (void *)&cmd_showport_show, + (void *)&cmd_showport_port, + (void *)&cmd_showport_what, + (void *)&cmd_showport_portnum, + NULL, + }, +}; + +/* *** READ PORT REGISTER *** */ +struct cmd_read_reg_result { + cmdline_fixed_string_t read; + cmdline_fixed_string_t reg; + uint8_t port_id; + uint32_t reg_off; +}; + +static void +cmd_read_reg_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_read_reg_result *res = parsed_result; + port_reg_display(res->port_id, res->reg_off); +} + +cmdline_parse_token_string_t cmd_read_reg_read = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, read, "read"); +cmdline_parse_token_string_t cmd_read_reg_reg = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, reg, "reg"); +cmdline_parse_token_num_t cmd_read_reg_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, port_id, UINT8); +cmdline_parse_token_num_t cmd_read_reg_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, reg_off, UINT32); + +cmdline_parse_inst_t cmd_read_reg = { + .f = cmd_read_reg_parsed, + .data = NULL, + .help_str = "read reg port_id reg_off", + .tokens = { + (void *)&cmd_read_reg_read, + (void *)&cmd_read_reg_reg, + (void *)&cmd_read_reg_port_id, + (void *)&cmd_read_reg_reg_off, + NULL, + }, +}; + +/* *** READ PORT REGISTER BIT FIELD *** */ +struct cmd_read_reg_bit_field_result { + cmdline_fixed_string_t read; + cmdline_fixed_string_t regfield; + uint8_t port_id; + uint32_t reg_off; + uint8_t bit1_pos; + uint8_t bit2_pos; +}; + +static void +cmd_read_reg_bit_field_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_read_reg_bit_field_result *res = parsed_result; + port_reg_bit_field_display(res->port_id, res->reg_off, + res->bit1_pos, res->bit2_pos); +} + +cmdline_parse_token_string_t cmd_read_reg_bit_field_read = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_field_result, read, + "read"); +cmdline_parse_token_string_t cmd_read_reg_bit_field_regfield = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_field_result, + regfield, "regfield"); +cmdline_parse_token_num_t cmd_read_reg_bit_field_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, port_id, + UINT8); +cmdline_parse_token_num_t cmd_read_reg_bit_field_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, reg_off, + UINT32); +cmdline_parse_token_num_t cmd_read_reg_bit_field_bit1_pos = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, bit1_pos, + UINT8); +cmdline_parse_token_num_t cmd_read_reg_bit_field_bit2_pos = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, bit2_pos, + UINT8); + +cmdline_parse_inst_t cmd_read_reg_bit_field = { + .f = cmd_read_reg_bit_field_parsed, + .data = NULL, + .help_str = "read regfield port_id reg_off bit_x bit_y " + "(read register bit field between bit_x and bit_y included)", + .tokens = { + (void *)&cmd_read_reg_bit_field_read, + (void *)&cmd_read_reg_bit_field_regfield, + (void *)&cmd_read_reg_bit_field_port_id, + (void *)&cmd_read_reg_bit_field_reg_off, + (void *)&cmd_read_reg_bit_field_bit1_pos, + (void *)&cmd_read_reg_bit_field_bit2_pos, + NULL, + }, +}; + +/* *** READ PORT REGISTER BIT *** */ +struct cmd_read_reg_bit_result { + cmdline_fixed_string_t read; + cmdline_fixed_string_t regbit; + uint8_t port_id; + uint32_t reg_off; + uint8_t bit_pos; +}; + +static void +cmd_read_reg_bit_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_read_reg_bit_result *res = parsed_result; + port_reg_bit_display(res->port_id, res->reg_off, res->bit_pos); +} + +cmdline_parse_token_string_t cmd_read_reg_bit_read = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result, read, "read"); +cmdline_parse_token_string_t cmd_read_reg_bit_regbit = + TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result, + regbit, "regbit"); +cmdline_parse_token_num_t cmd_read_reg_bit_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, port_id, UINT8); +cmdline_parse_token_num_t cmd_read_reg_bit_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, reg_off, UINT32); +cmdline_parse_token_num_t cmd_read_reg_bit_bit_pos = + TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, bit_pos, UINT8); + +cmdline_parse_inst_t cmd_read_reg_bit = { + .f = cmd_read_reg_bit_parsed, + .data = NULL, + .help_str = "read regbit port_id reg_off bit_x (0 <= bit_x <= 31)", + .tokens = { + (void *)&cmd_read_reg_bit_read, + (void *)&cmd_read_reg_bit_regbit, + (void *)&cmd_read_reg_bit_port_id, + (void *)&cmd_read_reg_bit_reg_off, + (void *)&cmd_read_reg_bit_bit_pos, + NULL, + }, +}; + +/* *** WRITE PORT REGISTER *** */ +struct cmd_write_reg_result { + cmdline_fixed_string_t write; + cmdline_fixed_string_t reg; + uint8_t port_id; + uint32_t reg_off; + uint32_t value; +}; + +static void +cmd_write_reg_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_write_reg_result *res = parsed_result; + port_reg_set(res->port_id, res->reg_off, res->value); +} + +cmdline_parse_token_string_t cmd_write_reg_write = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, write, "write"); +cmdline_parse_token_string_t cmd_write_reg_reg = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, reg, "reg"); +cmdline_parse_token_num_t cmd_write_reg_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, port_id, UINT8); +cmdline_parse_token_num_t cmd_write_reg_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, reg_off, UINT32); +cmdline_parse_token_num_t cmd_write_reg_value = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, value, UINT32); + +cmdline_parse_inst_t cmd_write_reg = { + .f = cmd_write_reg_parsed, + .data = NULL, + .help_str = "write reg port_id reg_off reg_value", + .tokens = { + (void *)&cmd_write_reg_write, + (void *)&cmd_write_reg_reg, + (void *)&cmd_write_reg_port_id, + (void *)&cmd_write_reg_reg_off, + (void *)&cmd_write_reg_value, + NULL, + }, +}; + +/* *** WRITE PORT REGISTER BIT FIELD *** */ +struct cmd_write_reg_bit_field_result { + cmdline_fixed_string_t write; + cmdline_fixed_string_t regfield; + uint8_t port_id; + uint32_t reg_off; + uint8_t bit1_pos; + uint8_t bit2_pos; + uint32_t value; +}; + +static void +cmd_write_reg_bit_field_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_write_reg_bit_field_result *res = parsed_result; + port_reg_bit_field_set(res->port_id, res->reg_off, + res->bit1_pos, res->bit2_pos, res->value); +} + +cmdline_parse_token_string_t cmd_write_reg_bit_field_write = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_field_result, write, + "write"); +cmdline_parse_token_string_t cmd_write_reg_bit_field_regfield = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_field_result, + regfield, "regfield"); +cmdline_parse_token_num_t cmd_write_reg_bit_field_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, port_id, + UINT8); +cmdline_parse_token_num_t cmd_write_reg_bit_field_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, reg_off, + UINT32); +cmdline_parse_token_num_t cmd_write_reg_bit_field_bit1_pos = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, bit1_pos, + UINT8); +cmdline_parse_token_num_t cmd_write_reg_bit_field_bit2_pos = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, bit2_pos, + UINT8); +cmdline_parse_token_num_t cmd_write_reg_bit_field_value = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, value, + UINT32); + +cmdline_parse_inst_t cmd_write_reg_bit_field = { + .f = cmd_write_reg_bit_field_parsed, + .data = NULL, + .help_str = "write regfield port_id reg_off bit_x bit_y reg_value" + "(set register bit field between bit_x and bit_y included)", + .tokens = { + (void *)&cmd_write_reg_bit_field_write, + (void *)&cmd_write_reg_bit_field_regfield, + (void *)&cmd_write_reg_bit_field_port_id, + (void *)&cmd_write_reg_bit_field_reg_off, + (void *)&cmd_write_reg_bit_field_bit1_pos, + (void *)&cmd_write_reg_bit_field_bit2_pos, + (void *)&cmd_write_reg_bit_field_value, + NULL, + }, +}; + +/* *** WRITE PORT REGISTER BIT *** */ +struct cmd_write_reg_bit_result { + cmdline_fixed_string_t write; + cmdline_fixed_string_t regbit; + uint8_t port_id; + uint32_t reg_off; + uint8_t bit_pos; + uint8_t value; +}; + +static void +cmd_write_reg_bit_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_write_reg_bit_result *res = parsed_result; + port_reg_bit_set(res->port_id, res->reg_off, res->bit_pos, res->value); +} + +cmdline_parse_token_string_t cmd_write_reg_bit_write = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result, write, + "write"); +cmdline_parse_token_string_t cmd_write_reg_bit_regbit = + TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result, + regbit, "regbit"); +cmdline_parse_token_num_t cmd_write_reg_bit_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, port_id, UINT8); +cmdline_parse_token_num_t cmd_write_reg_bit_reg_off = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, reg_off, UINT32); +cmdline_parse_token_num_t cmd_write_reg_bit_bit_pos = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, bit_pos, UINT8); +cmdline_parse_token_num_t cmd_write_reg_bit_value = + TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, value, UINT8); + +cmdline_parse_inst_t cmd_write_reg_bit = { + .f = cmd_write_reg_bit_parsed, + .data = NULL, + .help_str = "write regbit port_id reg_off bit_x 0/1 (0 <= bit_x <= 31)", + .tokens = { + (void *)&cmd_write_reg_bit_write, + (void *)&cmd_write_reg_bit_regbit, + (void *)&cmd_write_reg_bit_port_id, + (void *)&cmd_write_reg_bit_reg_off, + (void *)&cmd_write_reg_bit_bit_pos, + (void *)&cmd_write_reg_bit_value, + NULL, + }, +}; + +/* *** READ A RING DESCRIPTOR OF A PORT RX/TX QUEUE *** */ +struct cmd_read_rxd_txd_result { + cmdline_fixed_string_t read; + cmdline_fixed_string_t rxd_txd; + uint8_t port_id; + uint16_t queue_id; + uint16_t desc_id; +}; + +static void +cmd_read_rxd_txd_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_read_rxd_txd_result *res = parsed_result; + + if (!strcmp(res->rxd_txd, "rxd")) + rx_ring_desc_display(res->port_id, res->queue_id, res->desc_id); + else if (!strcmp(res->rxd_txd, "txd")) + tx_ring_desc_display(res->port_id, res->queue_id, res->desc_id); +} + +cmdline_parse_token_string_t cmd_read_rxd_txd_read = + TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, read, "read"); +cmdline_parse_token_string_t cmd_read_rxd_txd_rxd_txd = + TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, rxd_txd, + "rxd#txd"); +cmdline_parse_token_num_t cmd_read_rxd_txd_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, port_id, UINT8); +cmdline_parse_token_num_t cmd_read_rxd_txd_queue_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, queue_id, UINT16); +cmdline_parse_token_num_t cmd_read_rxd_txd_desc_id = + TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, desc_id, UINT16); + +cmdline_parse_inst_t cmd_read_rxd_txd = { + .f = cmd_read_rxd_txd_parsed, + .data = NULL, + .help_str = "read rxd|txd port_id queue_id rxd_id", + .tokens = { + (void *)&cmd_read_rxd_txd_read, + (void *)&cmd_read_rxd_txd_rxd_txd, + (void *)&cmd_read_rxd_txd_port_id, + (void *)&cmd_read_rxd_txd_queue_id, + (void *)&cmd_read_rxd_txd_desc_id, + NULL, + }, +}; + +/* *** QUIT *** */ +struct cmd_quit_result { + cmdline_fixed_string_t quit; +}; + +static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + pmd_test_exit(); + cmdline_quit(cl); +} + +cmdline_parse_token_string_t cmd_quit_quit = + TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit"); + +cmdline_parse_inst_t cmd_quit = { + .f = cmd_quit_parsed, + .data = NULL, + .help_str = "exit application", + .tokens = { + (void *)&cmd_quit_quit, + NULL, + }, +}; + +/* *** ADD/REMOVE MAC ADDRESS FROM A PORT *** */ +struct cmd_mac_addr_result { + cmdline_fixed_string_t mac_addr_cmd; + cmdline_fixed_string_t what; + uint8_t port_num; + struct ether_addr address; +}; + +static void cmd_mac_addr_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_mac_addr_result *res = parsed_result; + int ret; + + if (strcmp(res->what, "add") == 0) + ret = rte_eth_dev_mac_addr_add(res->port_num, &res->address, 0); + else + ret = rte_eth_dev_mac_addr_remove(res->port_num, &res->address); + + /* check the return value and print it if is < 0 */ + if(ret < 0) + printf("mac_addr_cmd error: (%s)\n", strerror(-ret)); + +} + +cmdline_parse_token_string_t cmd_mac_addr_cmd = + TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, mac_addr_cmd, + "mac_addr"); +cmdline_parse_token_string_t cmd_mac_addr_what = + TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, what, + "add#remove"); +cmdline_parse_token_num_t cmd_mac_addr_portnum = + TOKEN_NUM_INITIALIZER(struct cmd_mac_addr_result, port_num, UINT8); +cmdline_parse_token_string_t cmd_mac_addr_addr = + TOKEN_ETHERADDR_INITIALIZER(struct cmd_mac_addr_result, address); + +cmdline_parse_inst_t cmd_mac_addr = { + .f = cmd_mac_addr_parsed, + .data = (void *)0, + .help_str = "mac_addr add|remove X
: " + "add/remove MAC address on port X", + .tokens = { + (void *)&cmd_mac_addr_cmd, + (void *)&cmd_mac_addr_what, + (void *)&cmd_mac_addr_portnum, + (void *)&cmd_mac_addr_addr, + NULL, + }, +}; + + +/* list of instructions */ +cmdline_parse_ctx_t main_ctx[] = { + (cmdline_parse_inst_t *)&cmd_help, + (cmdline_parse_inst_t *)&cmd_quit, + (cmdline_parse_inst_t *)&cmd_showport, + (cmdline_parse_inst_t *)&cmd_showportall, + (cmdline_parse_inst_t *)&cmd_showcfg, + (cmdline_parse_inst_t *)&cmd_start, + (cmdline_parse_inst_t *)&cmd_start_tx_first, + (cmdline_parse_inst_t *)&cmd_reset, + (cmdline_parse_inst_t *)&cmd_set_numbers, + (cmdline_parse_inst_t *)&cmd_set_txpkts, + (cmdline_parse_inst_t *)&cmd_set_fwd_list, + (cmdline_parse_inst_t *)&cmd_set_fwd_mask, + (cmdline_parse_inst_t *)&cmd_set_fwd_mode, + (cmdline_parse_inst_t *)&cmd_set_promisc_mode_one, + (cmdline_parse_inst_t *)&cmd_set_promisc_mode_all, + (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_one, + (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_all, + (cmdline_parse_inst_t *)&cmd_rx_vlan_filter_all, + (cmdline_parse_inst_t *)&cmd_rx_vlan_filter, + (cmdline_parse_inst_t *)&cmd_tx_vlan_set, + (cmdline_parse_inst_t *)&cmd_tx_vlan_reset, + (cmdline_parse_inst_t *)&cmd_tx_cksum_set, + (cmdline_parse_inst_t *)&cmd_link_flow_control_set, + (cmdline_parse_inst_t *)&cmd_read_reg, + (cmdline_parse_inst_t *)&cmd_read_reg_bit_field, + (cmdline_parse_inst_t *)&cmd_read_reg_bit, + (cmdline_parse_inst_t *)&cmd_write_reg, + (cmdline_parse_inst_t *)&cmd_write_reg_bit_field, + (cmdline_parse_inst_t *)&cmd_write_reg_bit, + (cmdline_parse_inst_t *)&cmd_read_rxd_txd, + (cmdline_parse_inst_t *)&cmd_add_signature_filter, + (cmdline_parse_inst_t *)&cmd_upd_signature_filter, + (cmdline_parse_inst_t *)&cmd_rm_signature_filter, + (cmdline_parse_inst_t *)&cmd_add_perfect_filter, + (cmdline_parse_inst_t *)&cmd_upd_perfect_filter, + (cmdline_parse_inst_t *)&cmd_rm_perfect_filter, + (cmdline_parse_inst_t *)&cmd_set_masks_filter, + (cmdline_parse_inst_t *)&cmd_stop, + (cmdline_parse_inst_t *)&cmd_mac_addr, + NULL, +}; + +/* prompt function, called from main on MASTER lcore */ +void +prompt(void) +{ + struct cmdline *cl; + + cl = cmdline_stdin_new(main_ctx, "testpmd> "); + if (cl == NULL) { + return; + } + cmdline_interact(cl); + cmdline_stdin_exit(cl); +} diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c new file mode 100644 index 0000000000..fd62235e91 --- /dev/null +++ b/app/test-pmd/config.c @@ -0,0 +1,1142 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +static void +print_ethaddr(const char *name, struct ether_addr *eth_addr) +{ + printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name, + eth_addr->addr_bytes[0], + eth_addr->addr_bytes[1], + eth_addr->addr_bytes[2], + eth_addr->addr_bytes[3], + eth_addr->addr_bytes[4], + eth_addr->addr_bytes[5]); +} + +void +nic_stats_display(portid_t port_id) +{ + struct rte_eth_stats stats; + + static const char *nic_stats_border = "########################"; + + if (port_id >= nb_ports) { + printf("Invalid port, range is [0, %d]\n", nb_ports - 1); + return; + } + rte_eth_stats_get(port_id, &stats); + printf("\n %s NIC statistics for port %-2d %s\n", + nic_stats_border, port_id, nic_stats_border); + printf(" RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64"RX-bytes: " + "%-"PRIu64"\n" + " TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64"TX-bytes: " + "%-"PRIu64"\n", + stats.ipackets, stats.ierrors, stats.ibytes, + stats.opackets, stats.oerrors, stats.obytes); + + /* stats fdir */ + if (fdir_conf.mode != RTE_FDIR_MODE_NONE) + printf(" Fdirmiss: %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n", + stats.fdirmiss, + stats.fdirmatch); + + printf(" %s############################%s\n", + nic_stats_border, nic_stats_border); +} + +void +nic_stats_clear(portid_t port_id) +{ + if (port_id >= nb_ports) { + printf("Invalid port, range is [0, %d]\n", nb_ports - 1); + return; + } + rte_eth_stats_reset(port_id); + printf("\n NIC statistics for port %d cleared\n", port_id); +} + +void +port_infos_display(portid_t port_id) +{ + struct rte_port *port; + struct rte_eth_link link; + static const char *info_border = "*********************"; + + if (port_id >= nb_ports) { + printf("Invalid port, range is [0, %d]\n", nb_ports - 1); + return; + } + port = &ports[port_id]; + rte_eth_link_get(port_id, &link); + printf("\n%s Infos for port %-2d %s\n", + info_border, port_id, info_border); + print_ethaddr("MAC address: ", &port->eth_addr); + printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down")); + printf("Link speed: %u Mbps\n", (unsigned) link.link_speed); + printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex")); + printf("Promiscuous mode: %s\n", + rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled"); + printf("Allmulticast mode: %s\n", + rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled"); + printf("Maximum number of MAC addresses: %u\n", + (unsigned int)(port->dev_info.max_mac_addrs)); +} + +static int +port_id_is_invalid(portid_t port_id) +{ + if (port_id < nb_ports) + return 0; + printf("Invalid port %d (must be < nb_ports=%d)\n", port_id, nb_ports); + return 1; +} + +static int +vlan_id_is_invalid(uint16_t vlan_id) +{ + if (vlan_id < 4096) + return 0; + printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id); + return 1; +} + +static int +port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off) +{ + uint64_t pci_len; + + if (reg_off & 0x3) { + printf("Port register offset 0x%X not aligned on a 4-byte " + "boundary\n", + (unsigned)reg_off); + return 1; + } + pci_len = ports[port_id].dev_info.pci_dev->mem_resource.len; + if (reg_off >= pci_len) { + printf("Port %d: register offset %u (0x%X) out of port PCI " + "resource (length=%"PRIu64")\n", + port_id, (unsigned)reg_off, (unsigned)reg_off, pci_len); + return 1; + } + return 0; +} + +static int +reg_bit_pos_is_invalid(uint8_t bit_pos) +{ + if (bit_pos <= 31) + return 0; + printf("Invalid bit position %d (must be <= 31)\n", bit_pos); + return 1; +} + +#define display_port_and_reg_off(port_id, reg_off) \ + printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off)) + +static inline void +display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v) +{ + display_port_and_reg_off(port_id, (unsigned)reg_off); + printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v); +} + +void +port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x) +{ + uint32_t reg_v; + + + if (port_id_is_invalid(port_id)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + if (reg_bit_pos_is_invalid(bit_x)) + return; + reg_v = port_id_pci_reg_read(port_id, reg_off); + display_port_and_reg_off(port_id, (unsigned)reg_off); + printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x)); +} + +void +port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, + uint8_t bit1_pos, uint8_t bit2_pos) +{ + uint32_t reg_v; + uint8_t l_bit; + uint8_t h_bit; + + if (port_id_is_invalid(port_id)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + if (reg_bit_pos_is_invalid(bit1_pos)) + return; + if (reg_bit_pos_is_invalid(bit2_pos)) + return; + if (bit1_pos > bit2_pos) + l_bit = bit2_pos, h_bit = bit1_pos; + else + l_bit = bit1_pos, h_bit = bit2_pos; + + reg_v = port_id_pci_reg_read(port_id, reg_off); + reg_v >>= l_bit; + if (h_bit < 31) + reg_v &= ((1 << (h_bit - l_bit + 1)) - 1); + display_port_and_reg_off(port_id, (unsigned)reg_off); + printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit, + ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v); +} + +void +port_reg_display(portid_t port_id, uint32_t reg_off) +{ + uint32_t reg_v; + + if (port_id_is_invalid(port_id)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + reg_v = port_id_pci_reg_read(port_id, reg_off); + display_port_reg_value(port_id, reg_off, reg_v); +} + +void +port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, + uint8_t bit_v) +{ + uint32_t reg_v; + + if (port_id_is_invalid(port_id)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + if (reg_bit_pos_is_invalid(bit_pos)) + return; + if (bit_v > 1) { + printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v); + return; + } + reg_v = port_id_pci_reg_read(port_id, reg_off); + if (bit_v == 0) + reg_v &= ~(1 << bit_pos); + else + reg_v |= (1 << bit_pos); + port_id_pci_reg_write(port_id, reg_off, reg_v); + display_port_reg_value(port_id, reg_off, reg_v); +} + +void +port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, + uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value) +{ + uint32_t max_v; + uint32_t reg_v; + uint8_t l_bit; + uint8_t h_bit; + + if (port_id_is_invalid(port_id)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + if (reg_bit_pos_is_invalid(bit1_pos)) + return; + if (reg_bit_pos_is_invalid(bit2_pos)) + return; + if (bit1_pos > bit2_pos) + l_bit = bit2_pos, h_bit = bit1_pos; + else + l_bit = bit1_pos, h_bit = bit2_pos; + + if ((h_bit - l_bit) < 31) + max_v = (1 << (h_bit - l_bit + 1)) - 1; + else + max_v = 0xFFFFFFFF; + + if (value > max_v) { + printf("Invalid value %u (0x%x) must be < %u (0x%x)\n", + (unsigned)value, (unsigned)value, + (unsigned)max_v, (unsigned)max_v); + return; + } + reg_v = port_id_pci_reg_read(port_id, reg_off); + reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */ + reg_v |= (value << l_bit); /* Set changed bits */ + port_id_pci_reg_write(port_id, reg_off, reg_v); + display_port_reg_value(port_id, reg_off, reg_v); +} + +void +port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v) +{ + if (port_id_is_invalid(port_id)) + return; + if (port_reg_off_is_invalid(port_id, reg_off)) + return; + port_id_pci_reg_write(port_id, reg_off, reg_v); + display_port_reg_value(port_id, reg_off, reg_v); +} + +/* + * RX/TX ring descriptors display functions. + */ +static int +rx_queue_id_is_invalid(queueid_t rxq_id) +{ + if (rxq_id < nb_rxq) + return 0; + printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq); + return 1; +} + +static int +tx_queue_id_is_invalid(queueid_t txq_id) +{ + if (txq_id < nb_txq) + return 0; + printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq); + return 1; +} + +static int +rx_desc_id_is_invalid(uint16_t rxdesc_id) +{ + if (rxdesc_id < nb_rxd) + return 0; + printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n", + rxdesc_id, nb_rxd); + return 1; +} + +static int +tx_desc_id_is_invalid(uint16_t txdesc_id) +{ + if (txdesc_id < nb_txd) + return 0; + printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n", + txdesc_id, nb_txd); + return 1; +} + +static const struct rte_memzone * +ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + rte_snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d", + ports[port_id].dev_info.driver_name, ring_name, port_id, q_id); + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + printf("%s ring memory zoneof (port %d, queue %d) not" + "found (zone name = %s\n", + ring_name, port_id, q_id, mz_name); + return (mz); +} + +union igb_ring_dword { + uint64_t dword; + struct { + uint32_t hi; + uint32_t lo; + } words; +}; + +struct igb_ring_desc { + union igb_ring_dword lo_dword; + union igb_ring_dword hi_dword; +}; + +static void +ring_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id) +{ + struct igb_ring_desc *ring; + struct igb_ring_desc rd; + + ring = (struct igb_ring_desc *) ring_mz->addr; + rd.lo_dword = rte_le_to_cpu_64(ring[desc_id].lo_dword); + rd.hi_dword = rte_le_to_cpu_64(ring[desc_id].hi_dword); + printf(" 0x%08X - 0x%08X / 0x%08X - 0x%08X\n", + (unsigned)rd.lo_dword.words.lo, (unsigned)rd.lo_dword.words.hi, + (unsigned)rd.hi_dword.words.lo, (unsigned)rd.hi_dword.words.hi); +} + +void +rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id) +{ + const struct rte_memzone *rx_mz; + + if (port_id_is_invalid(port_id)) + return; + if (rx_queue_id_is_invalid(rxq_id)) + return; + if (rx_desc_id_is_invalid(rxd_id)) + return; + rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id); + if (rx_mz == NULL) + return; + ring_descriptor_display(rx_mz, rxd_id); +} + +void +tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id) +{ + const struct rte_memzone *tx_mz; + + if (port_id_is_invalid(port_id)) + return; + if (tx_queue_id_is_invalid(txq_id)) + return; + if (tx_desc_id_is_invalid(txd_id)) + return; + tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id); + if (tx_mz == NULL) + return; + ring_descriptor_display(tx_mz, txd_id); +} + +void +fwd_lcores_config_display(void) +{ + lcoreid_t lc_id; + + printf("List of forwarding lcores:"); + for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++) + printf(" %2u", fwd_lcores_cpuids[lc_id]); + printf("\n"); +} +void +rxtx_config_display(void) +{ + printf(" %s packet forwarding - CRC stripping %s - " + "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name, + rx_mode.hw_strip_crc ? "enabled" : "disabled", + nb_pkt_per_burst); + + if (cur_fwd_eng == &tx_only_engine) + printf(" packet len=%u - nb packet segments=%d\n", + (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs); + + printf(" nb forwarding cores=%d - nb forwarding ports=%d\n", + nb_fwd_lcores, nb_fwd_ports); + printf(" RX queues=%d - RX desc=%d - RX free threshold=%d\n", + nb_rxq, nb_rxd, rx_free_thresh); + printf(" RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", + rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh); + printf(" TX queues=%d - TX desc=%d - TX free threshold=%d\n", + nb_txq, nb_txd, tx_free_thresh); + printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", + tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh); + printf(" TX RS bit threshold=%d\n", tx_rs_thresh); +} + +/* + * Setup forwarding configuration for each logical core. + */ +static void +setup_fwd_config_of_each_lcore(struct fwd_config *cfg) +{ + streamid_t nb_fs_per_lcore; + streamid_t nb_fs; + streamid_t sm_id; + lcoreid_t nb_extra; + lcoreid_t nb_fc; + lcoreid_t nb_lc; + lcoreid_t lc_id; + + nb_fs = cfg->nb_fwd_streams; + nb_fc = cfg->nb_fwd_lcores; + if (nb_fs <= nb_fc) { + nb_fs_per_lcore = 1; + nb_extra = 0; + } else { + nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc); + nb_extra = (lcoreid_t) (nb_fs % nb_fc); + } + nb_extra = (lcoreid_t) (nb_fs % nb_fc); + + nb_lc = (lcoreid_t) (nb_fc - nb_extra); + sm_id = 0; + for (lc_id = 0; lc_id < nb_lc; lc_id++) { + fwd_lcores[lc_id]->stream_idx = sm_id; + fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore; + sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); + } + + /* + * Assign extra remaining streams, if any. + */ + nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1); + for (lc_id = 0; lc_id < nb_extra; lc_id++) { + fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id; + fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore; + sm_id = (streamid_t) (sm_id + nb_fs_per_lcore); + } +} + +static void +simple_fwd_config_setup(void) +{ + portid_t i; + portid_t j; + portid_t inc = 2; + + if (nb_fwd_ports % 2) { + if (port_topology == PORT_TOPOLOGY_CHAINED) { + inc = 1; + } + else { + printf("\nWarning! Cannot handle an odd number of ports " + "with the current port topology. Configuration " + "must be changed to have an even number of ports, " + "or relaunch application with " + "--port-topology=chained\n\n"); + } + } + + cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports; + cur_fwd_config.nb_fwd_streams = + (streamid_t) cur_fwd_config.nb_fwd_ports; + + /* + * In the simple forwarding test, the number of forwarding cores + * must be lower or equal to the number of forwarding ports. + */ + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; + if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports) + cur_fwd_config.nb_fwd_lcores = + (lcoreid_t) cur_fwd_config.nb_fwd_ports; + setup_fwd_config_of_each_lcore(&cur_fwd_config); + + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) { + j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports); + fwd_streams[i]->rx_port = fwd_ports_ids[i]; + fwd_streams[i]->rx_queue = 0; + fwd_streams[i]->tx_port = fwd_ports_ids[j]; + fwd_streams[i]->tx_queue = 0; + fwd_streams[i]->peer_addr = j; + + if (port_topology == PORT_TOPOLOGY_PAIRED) { + fwd_streams[j]->rx_port = fwd_ports_ids[j]; + fwd_streams[j]->rx_queue = 0; + fwd_streams[j]->tx_port = fwd_ports_ids[i]; + fwd_streams[j]->tx_queue = 0; + fwd_streams[j]->peer_addr = i; + } + } +} + +/** + * For the RSS forwarding test, each core is assigned on every port a transmit + * queue whose index is the index of the core itself. This approach limits the + * maximumm number of processing cores of the RSS test to the maximum number of + * TX queues supported by the devices. + * + * Each core is assigned a single stream, each stream being composed of + * a RX queue to poll on a RX port for input messages, associated with + * a TX queue of a TX port where to send forwarded packets. + * All packets received on the RX queue of index "RxQj" of the RX port "RxPi" + * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two + * following rules: + * - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd + * - TxQl = RxQj + */ +static void +rss_fwd_config_setup(void) +{ + portid_t rxp; + portid_t txp; + queueid_t rxq; + queueid_t nb_q; + lcoreid_t lc_id; + + nb_q = nb_rxq; + if (nb_q > nb_txq) + nb_q = nb_txq; + cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores; + cur_fwd_config.nb_fwd_ports = nb_fwd_ports; + cur_fwd_config.nb_fwd_streams = + (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports); + if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores) + cur_fwd_config.nb_fwd_streams = + (streamid_t)cur_fwd_config.nb_fwd_lcores; + else + cur_fwd_config.nb_fwd_lcores = + (lcoreid_t)cur_fwd_config.nb_fwd_streams; + setup_fwd_config_of_each_lcore(&cur_fwd_config); + rxp = 0; rxq = 0; + for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) { + struct fwd_stream *fs; + + fs = fwd_streams[lc_id]; + if ((rxp & 0x1) == 0) + txp = (portid_t) (rxp + 1); + else + txp = (portid_t) (rxp - 1); + fs->rx_port = fwd_ports_ids[rxp]; + fs->rx_queue = rxq; + fs->tx_port = fwd_ports_ids[txp]; + fs->tx_queue = rxq; + fs->peer_addr = fs->tx_port; + rxq = (queueid_t) (rxq + 1); + if (rxq < nb_q) + continue; + /* + * rxq == nb_q + * Restart from RX queue 0 on next RX port + */ + rxq = 0; + if (numa_support && (nb_fwd_ports <= (nb_ports >> 1))) + rxp = (portid_t) + (rxp + ((nb_ports >> 1) / nb_fwd_ports)); + else + rxp = (portid_t) (rxp + 1); + } +} + +void +fwd_config_setup(void) +{ + cur_fwd_config.fwd_eng = cur_fwd_eng; + if ((nb_rxq > 1) && (nb_txq > 1)) + rss_fwd_config_setup(); + else + simple_fwd_config_setup(); +} + +static void +pkt_fwd_config_display(struct fwd_config *cfg) +{ + struct fwd_stream *fs; + lcoreid_t lc_id; + streamid_t sm_id; + + printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - " + "NUMA support %s\n", + cfg->fwd_eng->fwd_mode_name, + cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams, + numa_support == 1 ? "enabled" : "disabled"); + for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) { + printf("Logical Core %u (socket %u) forwards packets on " + "%d streams:", + fwd_lcores_cpuids[lc_id], + rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), + fwd_lcores[lc_id]->stream_nb); + for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) { + fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id]; + printf("\n RX P=%d/Q=%d (socket %u) -> TX " + "P=%d/Q=%d (socket %u) ", + fs->rx_port, fs->rx_queue, + ports[fs->rx_port].socket_id, + fs->tx_port, fs->tx_queue, + ports[fs->tx_port].socket_id); + print_ethaddr("peer=", + &peer_eth_addrs[fs->peer_addr]); + } + printf("\n"); + } + printf("\n"); +} + + +void +fwd_config_display(void) +{ + fwd_config_setup(); + pkt_fwd_config_display(&cur_fwd_config); +} + +void +set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc) +{ + unsigned int i; + unsigned int lcore_cpuid; + int record_now; + + record_now = 0; + again: + for (i = 0; i < nb_lc; i++) { + lcore_cpuid = lcorelist[i]; + if (! rte_lcore_is_enabled(lcore_cpuid)) { + printf("Logical core %u not enabled\n", lcore_cpuid); + return; + } + if (lcore_cpuid == rte_get_master_lcore()) { + printf("Master core %u cannot forward packets\n", + lcore_cpuid); + return; + } + if (record_now) + fwd_lcores_cpuids[i] = lcore_cpuid; + } + if (record_now == 0) { + record_now = 1; + goto again; + } + nb_cfg_lcores = (lcoreid_t) nb_lc; + if (nb_fwd_lcores != (lcoreid_t) nb_lc) { + printf("previous number of forwarding cores %u - changed to " + "number of configured cores %u\n", + (unsigned int) nb_fwd_lcores, nb_lc); + nb_fwd_lcores = (lcoreid_t) nb_lc; + } +} + +void +set_fwd_lcores_mask(uint64_t lcoremask) +{ + unsigned int lcorelist[64]; + unsigned int nb_lc; + unsigned int i; + + if (lcoremask == 0) { + printf("Invalid NULL mask of cores\n"); + return; + } + nb_lc = 0; + for (i = 0; i < 64; i++) { + if (! ((uint64_t)(1ULL << i) & lcoremask)) + continue; + lcorelist[nb_lc++] = i; + } + set_fwd_lcores_list(lcorelist, nb_lc); +} + +void +set_fwd_lcores_number(uint16_t nb_lc) +{ + if (nb_lc > nb_cfg_lcores) { + printf("nb fwd cores %u > %u (max. number of configured " + "lcores) - ignored\n", + (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores); + return; + } + nb_fwd_lcores = (lcoreid_t) nb_lc; + printf("Number of forwarding cores set to %u\n", + (unsigned int) nb_fwd_lcores); +} + +void +set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt) +{ + unsigned int i; + portid_t port_id; + int record_now; + + record_now = 0; + again: + for (i = 0; i < nb_pt; i++) { + port_id = (portid_t) portlist[i]; + if (port_id >= nb_ports) { + printf("Invalid port id %u > %u\n", + (unsigned int) port_id, + (unsigned int) nb_ports); + return; + } + if (record_now) + fwd_ports_ids[i] = port_id; + } + if (record_now == 0) { + record_now = 1; + goto again; + } + nb_cfg_ports = (portid_t) nb_pt; + if (nb_fwd_ports != (portid_t) nb_pt) { + printf("previous number of forwarding ports %u - changed to " + "number of configured ports %u\n", + (unsigned int) nb_fwd_ports, nb_pt); + nb_fwd_ports = (portid_t) nb_pt; + } +} + +void +set_fwd_ports_mask(uint64_t portmask) +{ + unsigned int portlist[64]; + unsigned int nb_pt; + unsigned int i; + + if (portmask == 0) { + printf("Invalid NULL mask of ports\n"); + return; + } + nb_pt = 0; + for (i = 0; i < 64; i++) { + if (! ((uint64_t)(1ULL << i) & portmask)) + continue; + portlist[nb_pt++] = i; + } + set_fwd_ports_list(portlist, nb_pt); +} + +void +set_fwd_ports_number(uint16_t nb_pt) +{ + if (nb_pt > nb_cfg_ports) { + printf("nb fwd ports %u > %u (number of configured " + "ports) - ignored\n", + (unsigned int) nb_pt, (unsigned int) nb_cfg_ports); + return; + } + nb_fwd_ports = (portid_t) nb_pt; + printf("Number of forwarding ports set to %u\n", + (unsigned int) nb_fwd_ports); +} + +void +set_nb_pkt_per_burst(uint16_t nb) +{ + if (nb > MAX_PKT_BURST) { + printf("nb pkt per burst: %u > %u (maximum packet per burst) " + " ignored\n", + (unsigned int) nb, (unsigned int) MAX_PKT_BURST); + return; + } + nb_pkt_per_burst = nb; + printf("Number of packets per burst set to %u\n", + (unsigned int) nb_pkt_per_burst); +} + +void +set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs) +{ + uint16_t tx_pkt_len; + unsigned i; + + if (nb_segs >= (unsigned) nb_txd) { + printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n", + nb_segs, (unsigned int) nb_txd); + return; + } + + /* + * Check that each segment length is greater or equal than + * the mbuf data sise. + * Check also that the total packet length is greater or equal than the + * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8). + */ + tx_pkt_len = 0; + for (i = 0; i < nb_segs; i++) { + if (seg_lengths[i] > (unsigned) mbuf_data_size) { + printf("length[%u]=%u > mbuf_data_size=%u - give up\n", + i, seg_lengths[i], (unsigned) mbuf_data_size); + return; + } + tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]); + } + if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) { + printf("total packet length=%u < %d - give up\n", + (unsigned) tx_pkt_len, + (int)(sizeof(struct ether_hdr) + 20 + 8)); + return; + } + + for (i = 0; i < nb_segs; i++) + tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i]; + + tx_pkt_length = tx_pkt_len; + tx_pkt_nb_segs = (uint8_t) nb_segs; +} + +void +set_pkt_forwarding_mode(const char *fwd_mode_name) +{ + struct fwd_engine *fwd_eng; + unsigned i; + + i = 0; + while ((fwd_eng = fwd_engines[i]) != NULL) { + if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) { + printf("Set %s packet forwarding mode\n", + fwd_mode_name); + cur_fwd_eng = fwd_eng; + return; + } + i++; + } + printf("Invalid %s packet forwarding mode\n", fwd_mode_name); +} + +void +set_verbose_level(uint16_t vb_level) +{ + printf("Change verbose level from %u to %u\n", + (unsigned int) verbose_level, (unsigned int) vb_level); + verbose_level = vb_level; +} + +void +rx_vlan_filter_set(portid_t port_id, uint16_t vlan_id, int on) +{ + int diag; + + if (port_id_is_invalid(port_id)) + return; + if (vlan_id_is_invalid(vlan_id)) + return; + diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on); + if (diag == 0) + return; + printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed " + "diag=%d\n", + port_id, vlan_id, on, diag); +} + +void +rx_vlan_all_filter_set(portid_t port_id, int on) +{ + uint16_t vlan_id; + + if (port_id_is_invalid(port_id)) + return; + for (vlan_id = 0; vlan_id < 4096; vlan_id++) + rx_vlan_filter_set(port_id, vlan_id, on); +} + +void +tx_vlan_set(portid_t port_id, uint16_t vlan_id) +{ + if (port_id_is_invalid(port_id)) + return; + if (vlan_id_is_invalid(vlan_id)) + return; + ports[port_id].tx_ol_flags |= PKT_TX_VLAN_PKT; + ports[port_id].tx_vlan_id = vlan_id; +} + +void +tx_vlan_reset(portid_t port_id) +{ + if (port_id_is_invalid(port_id)) + return; + ports[port_id].tx_ol_flags &= ~PKT_TX_VLAN_PKT; +} + +void +tx_cksum_set(portid_t port_id, uint8_t cksum_mask) +{ + uint16_t tx_ol_flags; + if (port_id_is_invalid(port_id)) + return; + /* Clear last 4 bits and then set L3/4 checksum mask again */ + tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0); + ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags); +} + +void +fdir_add_signature_filter(portid_t port_id, uint8_t queue_id, + struct rte_fdir_filter *fdir_filter) +{ + int diag; + + if (port_id_is_invalid(port_id)) + return; + + diag = rte_eth_dev_fdir_add_signature_filter(port_id, fdir_filter, + queue_id); + if (diag == 0) + return; + + printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed " + "diag=%d\n", port_id, diag); +} + +void +fdir_update_signature_filter(portid_t port_id, uint8_t queue_id, + struct rte_fdir_filter *fdir_filter) +{ + int diag; + + if (port_id_is_invalid(port_id)) + return; + + diag = rte_eth_dev_fdir_update_signature_filter(port_id, fdir_filter, + queue_id); + if (diag == 0) + return; + + printf("rte_eth_dev_fdir_update_signature_filter for port_id=%d failed " + "diag=%d\n", port_id, diag); +} + +void +fdir_remove_signature_filter(portid_t port_id, + struct rte_fdir_filter *fdir_filter) +{ + int diag; + + if (port_id_is_invalid(port_id)) + return; + + diag = rte_eth_dev_fdir_remove_signature_filter(port_id, fdir_filter); + if (diag == 0) + return; + + printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed " + "diag=%d\n", port_id, diag); + +} + +void +fdir_get_infos(portid_t port_id) +{ + struct rte_eth_fdir fdir_infos; + + static const char *fdir_stats_border = "########################"; + + if (port_id_is_invalid(port_id)) + return; + + rte_eth_dev_fdir_get_infos(port_id, &fdir_infos); + + printf("\n %s FDIR infos for port %-2d %s\n", + fdir_stats_border, port_id, fdir_stats_border); + + printf(" collision: %-10"PRIu64" free: %-10"PRIu64"\n" + " maxhash: %-10"PRIu64" maxlen: %-10"PRIu64"\n" + " add : %-10"PRIu64" remove : %-10"PRIu64"\n" + " f_add: %-10"PRIu64" f_remove: %-10"PRIu64"\n", + (uint64_t)(fdir_infos.collision), (uint64_t)(fdir_infos.free), + (uint64_t)(fdir_infos.maxhash), (uint64_t)(fdir_infos.maxlen), + fdir_infos.add, fdir_infos.remove, + fdir_infos.f_add, fdir_infos.f_remove); + printf(" %s############################%s\n", + fdir_stats_border, fdir_stats_border); +} + +void +fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id, + uint8_t drop, struct rte_fdir_filter *fdir_filter) +{ + int diag; + + if (port_id_is_invalid(port_id)) + return; + + diag = rte_eth_dev_fdir_add_perfect_filter(port_id, fdir_filter, + soft_id, queue_id, drop); + if (diag == 0) + return; + + printf("rte_eth_dev_fdir_add_perfect_filter for port_id=%d failed " + "diag=%d\n", port_id, diag); +} + +void +fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id, + uint8_t drop, struct rte_fdir_filter *fdir_filter) +{ + int diag; + + if (port_id_is_invalid(port_id)) + return; + + diag = rte_eth_dev_fdir_update_perfect_filter(port_id, fdir_filter, + soft_id, queue_id, drop); + if (diag == 0) + return; + + printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed " + "diag=%d\n", port_id, diag); +} + +void +fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id, + struct rte_fdir_filter *fdir_filter) +{ + int diag; + + if (port_id_is_invalid(port_id)) + return; + + diag = rte_eth_dev_fdir_remove_perfect_filter(port_id, fdir_filter, + soft_id); + if (diag == 0) + return; + + printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed " + "diag=%d\n", port_id, diag); +} + +void +fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks) +{ + int diag; + + if (port_id_is_invalid(port_id)) + return; + + diag = rte_eth_dev_fdir_set_masks(port_id, fdir_masks); + if (diag == 0) + return; + + printf("rte_eth_dev_set_masks_filter for port_id=%d failed " + "diag=%d\n", port_id, diag); +} diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c new file mode 100644 index 0000000000..7aabcde271 --- /dev/null +++ b/app/test-pmd/csumonly.c @@ -0,0 +1,449 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "testpmd.h" + + + +#define IP_DEFTTL 64 /* from RFC 1340. */ +#define IP_VERSION 0x40 +#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */ +#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) + +/* Pseudo Header for IPv4/UDP/TCP checksum */ +struct psd_header { + uint32_t src_addr; /* IP address of source host. */ + uint32_t dst_addr; /* IP address of destination host(s). */ + uint8_t zero; /* zero. */ + uint8_t proto; /* L4 protocol type. */ + uint16_t len; /* L4 length. */ +} __attribute__((__packed__)); + + +/* Pseudo Header for IPv6/UDP/TCP checksum */ +struct ipv6_psd_header { + uint8_t src_addr[16]; /* IP address of source host. */ + uint8_t dst_addr[16]; /* IP address of destination host(s). */ + uint32_t len; /* L4 length. */ + uint8_t zero[3]; /* zero. */ + uint8_t proto; /* L4 protocol. */ +} __attribute__((__packed__)); + + +static inline uint16_t +get_16b_sum(uint16_t *ptr16, uint32_t nr) +{ + uint32_t sum = 0; + while (nr > 1) + { + sum +=*ptr16; + nr -= sizeof(uint16_t); + ptr16++; + if (sum > UINT16_MAX) + sum -= UINT16_MAX; + } + + /* If length is in odd bytes */ + if (nr) + sum += *((uint8_t*)ptr16); + + sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff); + sum &= 0x0ffff; + return (uint16_t)sum; +} + +static inline uint16_t +get_ipv4_cksum(struct ipv4_hdr *ipv4_hdr) +{ + uint16_t cksum; + cksum = get_16b_sum((uint16_t*)ipv4_hdr, sizeof(struct ipv4_hdr)); + return (uint16_t)((cksum == 0xffff)?cksum:~cksum); +} + + +static inline +uint16_t get_ipv4_psd_sum (struct ipv4_hdr * ip_hdr) +{ + struct psd_header psd_hdr; + psd_hdr.src_addr = ip_hdr->src_addr; + psd_hdr.dst_addr = ip_hdr->dst_addr; + psd_hdr.zero = 0; + psd_hdr.proto = ip_hdr->next_proto_id; + psd_hdr.len = rte_cpu_to_be_16((uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) + - sizeof(struct ipv4_hdr))); + return get_16b_sum((uint16_t*)&psd_hdr, sizeof(struct psd_header)); +} + +static inline +uint16_t get_ipv6_psd_sum (struct ipv6_hdr * ip_hdr) +{ + struct ipv6_psd_header psd_hdr; + rte_memcpy(psd_hdr.src_addr, ip_hdr->src_addr, sizeof(ip_hdr->src_addr) + + sizeof(ip_hdr->dst_addr)); + + psd_hdr.zero[0] = 0; + psd_hdr.zero[1] = 0; + psd_hdr.zero[2] = 0; + psd_hdr.proto = ip_hdr->proto; + psd_hdr.len = ip_hdr->payload_len; + + return get_16b_sum((uint16_t*)&psd_hdr, sizeof(struct ipv6_psd_header)); +} + +static inline uint16_t +get_ipv4_udptcp_checksum(struct ipv4_hdr *ipv4_hdr, uint16_t *l4_hdr) +{ + uint32_t cksum; + uint32_t l4_len; + + l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - sizeof(struct ipv4_hdr); + + cksum = get_16b_sum(l4_hdr, l4_len); + cksum += get_ipv4_psd_sum(ipv4_hdr); + + cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff); + cksum = (~cksum) & 0xffff; + if (cksum == 0) + cksum = 0xffff; + return (uint16_t)cksum; + +} + +static inline uint16_t +get_ipv6_udptcp_checksum(struct ipv6_hdr *ipv6_hdr, uint16_t *l4_hdr) +{ + uint32_t cksum; + uint32_t l4_len; + + l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len); + + cksum = get_16b_sum(l4_hdr, l4_len); + cksum += get_ipv6_psd_sum(ipv6_hdr); + + cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff); + cksum = (~cksum) & 0xffff; + if (cksum == 0) + cksum = 0xffff; + + return (uint16_t)cksum; +} + + +/* + * Forwarding of packets. Change the checksum field with HW or SW methods + * The HW/SW method selection depends on the ol_flags on every packet + */ +static void +pkt_burst_checksum_forward(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_port *txp; + struct rte_mbuf *mb; + struct ether_hdr *eth_hdr; + struct ipv4_hdr *ipv4_hdr; + struct ipv6_hdr *ipv6_hdr; + struct udp_hdr *udp_hdr; + struct tcp_hdr *tcp_hdr; + struct sctp_hdr *sctp_hdr; + + uint16_t nb_rx; + uint16_t nb_tx; + uint16_t i; + uint16_t ol_flags; + uint16_t pkt_ol_flags; + uint16_t tx_ol_flags; + uint16_t l4_proto; + uint8_t l2_len; + uint8_t l3_len; + + uint32_t rx_bad_ip_csum; + uint32_t rx_bad_l4_csum; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* + * Receive a burst of packets and forward them. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + rx_bad_ip_csum = 0; + rx_bad_l4_csum = 0; + + txp = &ports[fs->tx_port]; + tx_ol_flags = txp->tx_ol_flags; + + for (i = 0; i < nb_rx; i++) { + + mb = pkts_burst[i]; + l2_len = sizeof(struct ether_hdr); + pkt_ol_flags = mb->ol_flags; + ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK)); + + eth_hdr = (struct ether_hdr *) mb->pkt.data; + if (rte_be_to_cpu_16(eth_hdr->ether_type) == ETHER_TYPE_VLAN) { + /* Only allow single VLAN label here */ + l2_len += sizeof(struct vlan_hdr); + } + + /* Update the L3/L4 checksum error packet count */ + rx_bad_ip_csum += (uint16_t) ((pkt_ol_flags & PKT_RX_IP_CKSUM_BAD) != 0); + rx_bad_l4_csum += (uint16_t) ((pkt_ol_flags & PKT_RX_L4_CKSUM_BAD) != 0); + + /* + * Simplify the protocol parsing + * Assuming the incoming packets format as + * Ethernet2 + optional single VLAN + * + ipv4 or ipv6 + * + udp or tcp or sctp or others + */ + if (pkt_ol_flags & PKT_RX_IPV4_HDR) { + + /* Do not support ipv4 option field */ + l3_len = sizeof(struct ipv4_hdr) ; + + ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len); + + l4_proto = ipv4_hdr->next_proto_id; + + /* Do not delete, this is required by HW*/ + ipv4_hdr->hdr_checksum = 0; + + if (tx_ol_flags & 0x1) { + /* HW checksum */ + ol_flags |= PKT_TX_IP_CKSUM; + } + else { + /* SW checksum calculation */ + ipv4_hdr->src_addr++; + ipv4_hdr->hdr_checksum = get_ipv4_cksum(ipv4_hdr); + } + + if (l4_proto == IPPROTO_UDP) { + udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len + l3_len); + if (tx_ol_flags & 0x2) { + /* HW Offload */ + ol_flags |= PKT_TX_UDP_CKSUM; + /* Pseudo header sum need be set properly */ + udp_hdr->dgram_cksum = get_ipv4_psd_sum(ipv4_hdr); + } + else { + /* SW Implementation, clear checksum field first */ + udp_hdr->dgram_cksum = 0; + udp_hdr->dgram_cksum = get_ipv4_udptcp_checksum(ipv4_hdr, + (uint16_t*)udp_hdr); + } + } + else if (l4_proto == IPPROTO_TCP){ + tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len + l3_len); + if (tx_ol_flags & 0x4) { + ol_flags |= PKT_TX_TCP_CKSUM; + tcp_hdr->cksum = get_ipv4_psd_sum(ipv4_hdr); + } + else { + tcp_hdr->cksum = 0; + tcp_hdr->cksum = get_ipv4_udptcp_checksum(ipv4_hdr, + (uint16_t*)tcp_hdr); + } + } + else if (l4_proto == IPPROTO_SCTP) { + sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len + l3_len); + + if (tx_ol_flags & 0x8) { + ol_flags |= PKT_TX_SCTP_CKSUM; + sctp_hdr->cksum = 0; + + /* Sanity check, only number of 4 bytes supported */ + if ((rte_be_to_cpu_16(ipv4_hdr->total_length) % 4) != 0) + printf("sctp payload must be a multiple " + "of 4 bytes for checksum offload"); + } + else { + sctp_hdr->cksum = 0; + /* CRC32c sample code available in RFC3309 */ + } + } + /* End of L4 Handling*/ + } + + else { + ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len); + l3_len = sizeof(struct ipv6_hdr) ; + l4_proto = ipv6_hdr->proto; + + if (l4_proto == IPPROTO_UDP) { + udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len + l3_len); + if (tx_ol_flags & 0x2) { + /* HW Offload */ + ol_flags |= PKT_TX_UDP_CKSUM; + udp_hdr->dgram_cksum = get_ipv6_psd_sum(ipv6_hdr); + } + else { + /* SW Implementation */ + /* checksum field need be clear first */ + udp_hdr->dgram_cksum = 0; + udp_hdr->dgram_cksum = get_ipv6_udptcp_checksum(ipv6_hdr, + (uint16_t*)udp_hdr); + } + } + else if (l4_proto == IPPROTO_TCP) { + tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len + l3_len); + if (tx_ol_flags & 0x4) { + ol_flags |= PKT_TX_TCP_CKSUM; + tcp_hdr->cksum = get_ipv6_psd_sum(ipv6_hdr); + } + else { + tcp_hdr->cksum = 0; + tcp_hdr->cksum = get_ipv6_udptcp_checksum(ipv6_hdr, + (uint16_t*)tcp_hdr); + } + } + else if (l4_proto == IPPROTO_SCTP) { + sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb, + unsigned char *) + l2_len + l3_len); + + if (tx_ol_flags & 0x8) { + ol_flags |= PKT_TX_SCTP_CKSUM; + sctp_hdr->cksum = 0; + /* Sanity check, only number of 4 bytes supported by HW */ + if ((rte_be_to_cpu_16(ipv6_hdr->payload_len) % 4) != 0) + printf("sctp payload must be a multiple " + "of 4 bytes for checksum offload"); + } + else { + /* CRC32c sample code available in RFC3309 */ + sctp_hdr->cksum = 0; + } + } else { + printf("Test flow control for 1G PMD \n"); + } + /* End of L4 Handling*/ + } + + /* Combine the packet header write. VLAN is not consider here */ + mb->pkt.l2_len = l2_len; + mb->pkt.l3_len = l3_len; + mb->ol_flags = ol_flags; + } + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); + fs->tx_packets += nb_tx; + fs->rx_bad_ip_csum += rx_bad_ip_csum; + fs->rx_bad_l4_csum += rx_bad_l4_csum; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + + +struct fwd_engine csum_fwd_engine = { + .fwd_mode_name = "csum", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_checksum_forward, +}; + diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c new file mode 100644 index 0000000000..1fbc55463d --- /dev/null +++ b/app/test-pmd/ieee1588fwd.c @@ -0,0 +1,657 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +/** + * The structure of a PTP V2 packet. + * + * Only the minimum fields used by the ieee1588 test are represented. + */ +struct ptpv2_msg { + uint8_t msg_id; + uint8_t version; /**< must be 0x02 */ + uint8_t unused[34]; +}; +#define PTP_SYNC_MESSAGE 0x0 +#define PTP_DELAY_REQ_MESSAGE 0x1 +#define PTP_PATH_DELAY_REQ_MESSAGE 0x2 +#define PTP_PATH_DELAY_RESP_MESSAGE 0x3 +#define PTP_FOLLOWUP_MESSAGE 0x8 +#define PTP_DELAY_RESP_MESSAGE 0x9 +#define PTP_PATH_DELAY_FOLLOWUP_MESSAGE 0xA +#define PTP_ANNOUNCE_MESSAGE 0xB +#define PTP_SIGNALLING_MESSAGE 0xC +#define PTP_MANAGEMENT_MESSAGE 0xD + +/* + * Forwarding of IEEE1588 Precise Time Protocol (PTP) packets. + * + * In this mode, packets are received one by one and are expected to be + * PTP V2 L2 Ethernet frames (with the specific Ethernet type "0x88F7") + * containing PTP "sync" messages (version 2 at offset 1, and message ID + * 0 at offset 0). + * + * Check that each received packet is a IEEE1588 PTP V2 packet of type + * PTP_SYNC_MESSAGE, and that it has been identified and timestamped + * by the hardware. + * Check that the value of the last RX timestamp recorded by the controller + * is greater than the previous one. + * + * If everything is OK, send the received packet back on the same port, + * requesting for it to be timestamped by the hardware. + * Check that the value of the last TX timestamp recorded by the controller + * is greater than the previous one. + */ + +/* + * 1GbE 82576 Kawela registers used for IEEE1588 hardware support + */ +#define IGBE_82576_ETQF(n) (0x05CB0 + (4 * (n))) +#define IGBE_82576_ETQF_FILTER_ENABLE (1 << 26) +#define IGBE_82576_ETQF_1588_TIMESTAMP (1 << 30) + +#define IGBE_82576_TSYNCRXCTL 0x0B620 +#define IGBE_82576_TSYNCRXCTL_RXTS_ENABLE (1 << 4) + +#define IGBE_82576_RXSTMPL 0x0B624 +#define IGBE_82576_RXSTMPH 0x0B628 +#define IGBE_82576_RXSATRL 0x0B62C +#define IGBE_82576_RXSATRH 0x0B630 +#define IGBE_82576_TSYNCTXCTL 0x0B614 +#define IGBE_82576_TSYNCTXCTL_TXTS_ENABLE (1 << 4) + +#define IGBE_82576_TXSTMPL 0x0B618 +#define IGBE_82576_TXSTMPH 0x0B61C +#define IGBE_82576_SYSTIML 0x0B600 +#define IGBE_82576_SYSTIMH 0x0B604 +#define IGBE_82576_TIMINCA 0x0B608 +#define IGBE_82576_TIMADJL 0x0B60C +#define IGBE_82576_TIMADJH 0x0B610 +#define IGBE_82576_TSAUXC 0x0B640 +#define IGBE_82576_TRGTTIML0 0x0B644 +#define IGBE_82576_TRGTTIMH0 0x0B648 +#define IGBE_82576_TRGTTIML1 0x0B64C +#define IGBE_82576_TRGTTIMH1 0x0B650 +#define IGBE_82576_AUXSTMPL0 0x0B65C +#define IGBE_82576_AUXSTMPH0 0x0B660 +#define IGBE_82576_AUXSTMPL1 0x0B664 +#define IGBE_82576_AUXSTMPH1 0x0B668 +#define IGBE_82576_TSYNCRXCFG 0x05F50 +#define IGBE_82576_TSSDP 0x0003C + +/* + * 10GbE 82599 Niantic registers used for IEEE1588 hardware support + */ +#define IXGBE_82599_ETQF(n) (0x05128 + (4 * (n))) +#define IXGBE_82599_ETQF_FILTER_ENABLE (1 << 31) +#define IXGBE_82599_ETQF_1588_TIMESTAMP (1 << 30) + +#define IXGBE_82599_TSYNCRXCTL 0x05188 +#define IXGBE_82599_TSYNCRXCTL_RXTS_ENABLE (1 << 4) + +#define IXGBE_82599_RXSTMPL 0x051E8 +#define IXGBE_82599_RXSTMPH 0x051A4 +#define IXGBE_82599_RXSATRL 0x051A0 +#define IXGBE_82599_RXSATRH 0x051A8 +#define IXGBE_82599_RXMTRL 0x05120 +#define IXGBE_82599_TSYNCTXCTL 0x08C00 +#define IXGBE_82599_TSYNCTXCTL_TXTS_ENABLE (1 << 4) + +#define IXGBE_82599_TXSTMPL 0x08C04 +#define IXGBE_82599_TXSTMPH 0x08C08 +#define IXGBE_82599_SYSTIML 0x08C0C +#define IXGBE_82599_SYSTIMH 0x08C10 +#define IXGBE_82599_TIMINCA 0x08C14 +#define IXGBE_82599_TIMADJL 0x08C18 +#define IXGBE_82599_TIMADJH 0x08C1C +#define IXGBE_82599_TSAUXC 0x08C20 +#define IXGBE_82599_TRGTTIML0 0x08C24 +#define IXGBE_82599_TRGTTIMH0 0x08C28 +#define IXGBE_82599_TRGTTIML1 0x08C2C +#define IXGBE_82599_TRGTTIMH1 0x08C30 +#define IXGBE_82599_AUXSTMPL0 0x08C3C +#define IXGBE_82599_AUXSTMPH0 0x08C40 +#define IXGBE_82599_AUXSTMPL1 0x08C44 +#define IXGBE_82599_AUXSTMPH1 0x08C48 + +/** + * Mandatory ETQF register for IEEE1588 packets filter. + */ +#define ETQF_FILTER_1588_REG 3 + +/** + * Recommended value for increment and period of + * the Increment Attribute Register. + */ +#define IEEE1588_TIMINCA_INIT ((0x02 << 24) | 0x00F42400) + +/** + * Data structure with pointers to port-specific functions. + */ +typedef void (*ieee1588_start_t)(portid_t pi); /**< Start IEEE1588 feature. */ +typedef void (*ieee1588_stop_t)(portid_t pi); /**< Stop IEEE1588 feature. */ +typedef int (*tmst_read_t)(portid_t pi, uint64_t *tmst); /**< Read TMST regs */ + +struct port_ieee1588_ops { + ieee1588_start_t ieee1588_start; + ieee1588_stop_t ieee1588_stop; + tmst_read_t rx_tmst_read; + tmst_read_t tx_tmst_read; +}; + +/** + * 1GbE 82576 IEEE1588 operations. + */ +static void +igbe_82576_ieee1588_start(portid_t pi) +{ + uint32_t tsync_ctl; + + /* + * Start incrementation of the System Time registers used to + * timestamp PTP packets. + */ + port_id_pci_reg_write(pi, IGBE_82576_TIMINCA, IEEE1588_TIMINCA_INIT); + port_id_pci_reg_write(pi, IGBE_82576_TSAUXC, 0); + + /* + * Enable L2 filtering of IEEE1588 Ethernet frame types. + */ + port_id_pci_reg_write(pi, IGBE_82576_ETQF(ETQF_FILTER_1588_REG), + (ETHER_TYPE_1588 | + IGBE_82576_ETQF_FILTER_ENABLE | + IGBE_82576_ETQF_1588_TIMESTAMP)); + + /* + * Enable timestamping of received PTP packets. + */ + tsync_ctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCRXCTL); + tsync_ctl |= IGBE_82576_TSYNCRXCTL_RXTS_ENABLE; + port_id_pci_reg_write(pi, IGBE_82576_TSYNCRXCTL, tsync_ctl); + + /* + * Enable Timestamping of transmitted PTP packets. + */ + tsync_ctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCTXCTL); + tsync_ctl |= IGBE_82576_TSYNCTXCTL_TXTS_ENABLE; + port_id_pci_reg_write(pi, IGBE_82576_TSYNCTXCTL, tsync_ctl); +} + +static void +igbe_82576_ieee1588_stop(portid_t pi) +{ + uint32_t tsync_ctl; + + /* + * Disable Timestamping of transmitted PTP packets. + */ + tsync_ctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCTXCTL); + tsync_ctl &= ~IGBE_82576_TSYNCTXCTL_TXTS_ENABLE; + port_id_pci_reg_write(pi, IGBE_82576_TSYNCTXCTL, tsync_ctl); + + /* + * Disable timestamping of received PTP packets. + */ + tsync_ctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCRXCTL); + tsync_ctl &= ~IGBE_82576_TSYNCRXCTL_RXTS_ENABLE; + port_id_pci_reg_write(pi, IGBE_82576_TSYNCRXCTL, tsync_ctl); + + /* + * Disable L2 filtering of IEEE1588 Ethernet types. + */ + port_id_pci_reg_write(pi, IGBE_82576_ETQF(ETQF_FILTER_1588_REG), 0); + + /* + * Stop incrementation of the System Time registers. + */ + port_id_pci_reg_write(pi, IGBE_82576_TIMINCA, 0); +} + +/** + * Return the 64-bit value contained in the RX IEEE1588 timestamp registers + * of a 1GbE 82576 port. + * + * @param pi + * The port identifier. + * + * @param tmst + * The address of a 64-bit variable to return the value of the RX timestamp. + * + * @return + * -1: the RXSTMPL and RXSTMPH registers of the port are not valid. + * 0: the variable pointed to by the "tmst" parameter contains the value + * of the RXSTMPL and RXSTMPH registers of the port. + */ +static int +igbe_82576_rx_timestamp_read(portid_t pi, uint64_t *tmst) +{ + uint32_t tsync_rxctl; + uint32_t rx_stmpl; + uint32_t rx_stmph; + + tsync_rxctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCRXCTL); + if ((tsync_rxctl & 0x01) == 0) + return (-1); + + rx_stmpl = port_id_pci_reg_read(pi, IGBE_82576_RXSTMPL); + rx_stmph = port_id_pci_reg_read(pi, IGBE_82576_RXSTMPH); + *tmst = (uint64_t)(((uint64_t) rx_stmph << 32) | rx_stmpl); + return (0); +} + +/** + * Return the 64-bit value contained in the TX IEEE1588 timestamp registers + * of a 1GbE 82576 port. + * + * @param pi + * The port identifier. + * + * @param tmst + * The address of a 64-bit variable to return the value of the TX timestamp. + * + * @return + * -1: the TXSTMPL and TXSTMPH registers of the port are not valid. + * 0: the variable pointed to by the "tmst" parameter contains the value + * of the TXSTMPL and TXSTMPH registers of the port. + */ +static int +igbe_82576_tx_timestamp_read(portid_t pi, uint64_t *tmst) +{ + uint32_t tsync_txctl; + uint32_t tx_stmpl; + uint32_t tx_stmph; + + tsync_txctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCTXCTL); + if ((tsync_txctl & 0x01) == 0) + return (-1); + + tx_stmpl = port_id_pci_reg_read(pi, IGBE_82576_TXSTMPL); + tx_stmph = port_id_pci_reg_read(pi, IGBE_82576_TXSTMPH); + *tmst = (uint64_t)(((uint64_t) tx_stmph << 32) | tx_stmpl); + return (0); +} + +static struct port_ieee1588_ops igbe_82576_ieee1588_ops = { + .ieee1588_start = igbe_82576_ieee1588_start, + .ieee1588_stop = igbe_82576_ieee1588_stop, + .rx_tmst_read = igbe_82576_rx_timestamp_read, + .tx_tmst_read = igbe_82576_tx_timestamp_read, +}; + +/** + * 10GbE 82599 IEEE1588 operations. + */ +static void +ixgbe_82599_ieee1588_start(portid_t pi) +{ + uint32_t tsync_ctl; + + /* + * Start incrementation of the System Time registers used to + * timestamp PTP packets. + */ + port_id_pci_reg_write(pi, IXGBE_82599_TIMINCA, IEEE1588_TIMINCA_INIT); + + /* + * Enable L2 filtering of IEEE1588 Ethernet frame types. + */ + port_id_pci_reg_write(pi, IXGBE_82599_ETQF(ETQF_FILTER_1588_REG), + (ETHER_TYPE_1588 | + IXGBE_82599_ETQF_FILTER_ENABLE | + IXGBE_82599_ETQF_1588_TIMESTAMP)); + + /* + * Enable timestamping of received PTP packets. + */ + tsync_ctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCRXCTL); + tsync_ctl |= IXGBE_82599_TSYNCRXCTL_RXTS_ENABLE; + port_id_pci_reg_write(pi, IXGBE_82599_TSYNCRXCTL, tsync_ctl); + + /* + * Enable Timestamping of transmitted PTP packets. + */ + tsync_ctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCTXCTL); + tsync_ctl |= IXGBE_82599_TSYNCTXCTL_TXTS_ENABLE; + port_id_pci_reg_write(pi, IXGBE_82599_TSYNCTXCTL, tsync_ctl); +} + +static void +ixgbe_82599_ieee1588_stop(portid_t pi) +{ + uint32_t tsync_ctl; + + /* + * Disable Timestamping of transmitted PTP packets. + */ + tsync_ctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCTXCTL); + tsync_ctl &= ~IXGBE_82599_TSYNCTXCTL_TXTS_ENABLE; + port_id_pci_reg_write(pi, IXGBE_82599_TSYNCTXCTL, tsync_ctl); + + /* + * Disable timestamping of received PTP packets. + */ + tsync_ctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCRXCTL); + tsync_ctl &= ~IXGBE_82599_TSYNCRXCTL_RXTS_ENABLE; + port_id_pci_reg_write(pi, IXGBE_82599_TSYNCRXCTL, tsync_ctl); + + /* + * Disable L2 filtering of IEEE1588 Ethernet frame types. + */ + port_id_pci_reg_write(pi, IXGBE_82599_ETQF(ETQF_FILTER_1588_REG), 0); + + /* + * Stop incrementation of the System Time registers. + */ + port_id_pci_reg_write(pi, IXGBE_82599_TIMINCA, 0); +} + +/** + * Return the 64-bit value contained in the RX IEEE1588 timestamp registers + * of a 10GbE 82599 port. + * + * @param pi + * The port identifier. + * + * @param tmst + * The address of a 64-bit variable to return the value of the TX timestamp. + * + * @return + * -1: the RX timestamp registers of the port are not valid. + * 0: the variable pointed to by the "tmst" parameter contains the value + * of the RXSTMPL and RXSTMPH registers of the port. + */ +static int +ixgbe_82599_rx_timestamp_read(portid_t pi, uint64_t *tmst) +{ + uint32_t tsync_rxctl; + uint32_t rx_stmpl; + uint32_t rx_stmph; + + tsync_rxctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCRXCTL); + if ((tsync_rxctl & 0x01) == 0) + return (-1); + + rx_stmpl = port_id_pci_reg_read(pi, IXGBE_82599_RXSTMPL); + rx_stmph = port_id_pci_reg_read(pi, IXGBE_82599_RXSTMPH); + *tmst = (uint64_t)(((uint64_t) rx_stmph << 32) | rx_stmpl); + return (0); +} + +/** + * Return the 64-bit value contained in the TX IEEE1588 timestamp registers + * of a 10GbE 82599 port. + * + * @param pi + * The port identifier. + * + * @param tmst + * The address of a 64-bit variable to return the value of the TX timestamp. + * + * @return + * -1: the TXSTMPL and TXSTMPH registers of the port are not valid. + * 0: the variable pointed to by the "tmst" parameter contains the value + * of the TXSTMPL and TXSTMPH registers of the port. + */ +static int +ixgbe_82599_tx_timestamp_read(portid_t pi, uint64_t *tmst) +{ + uint32_t tsync_txctl; + uint32_t tx_stmpl; + uint32_t tx_stmph; + + tsync_txctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCTXCTL); + if ((tsync_txctl & 0x01) == 0) + return (-1); + + tx_stmpl = port_id_pci_reg_read(pi, IXGBE_82599_TXSTMPL); + tx_stmph = port_id_pci_reg_read(pi, IXGBE_82599_TXSTMPH); + *tmst = (uint64_t)(((uint64_t) tx_stmph << 32) | tx_stmpl); + return (0); +} + +static struct port_ieee1588_ops ixgbe_82599_ieee1588_ops = { + .ieee1588_start = ixgbe_82599_ieee1588_start, + .ieee1588_stop = ixgbe_82599_ieee1588_stop, + .rx_tmst_read = ixgbe_82599_rx_timestamp_read, + .tx_tmst_read = ixgbe_82599_tx_timestamp_read, +}; + +static void +port_ieee1588_rx_timestamp_check(portid_t pi) +{ + struct port_ieee1588_ops *ieee_ops; + uint64_t rx_tmst; + + ieee_ops = (struct port_ieee1588_ops *)ports[pi].fwd_ctx; + if (ieee_ops->rx_tmst_read(pi, &rx_tmst) < 0) { + printf("Port %u: RX timestamp registers not valid\n", + (unsigned) pi); + return; + } + printf("Port %u RX timestamp value 0x%"PRIu64"\n", + (unsigned) pi, rx_tmst); +} + +#define MAX_TX_TMST_WAIT_MICROSECS 1000 /**< 1 milli-second */ + +static void +port_ieee1588_tx_timestamp_check(portid_t pi) +{ + struct port_ieee1588_ops *ieee_ops; + uint64_t tx_tmst; + unsigned wait_us; + + ieee_ops = (struct port_ieee1588_ops *)ports[pi].fwd_ctx; + wait_us = 0; + while ((ieee_ops->tx_tmst_read(pi, &tx_tmst) < 0) && + (wait_us < MAX_TX_TMST_WAIT_MICROSECS)) { + rte_delay_us(1); + wait_us++; + } + if (wait_us >= MAX_TX_TMST_WAIT_MICROSECS) { + printf("Port %u: TX timestamp registers not valid after" + "%u micro-seconds\n", + (unsigned) pi, (unsigned) MAX_TX_TMST_WAIT_MICROSECS); + return; + } + printf("Port %u TX timestamp value 0x%"PRIu64" validated after " + "%u micro-second%s\n", + (unsigned) pi, tx_tmst, wait_us, + (wait_us == 1) ? "" : "s"); +} + +static void +ieee1588_packet_fwd(struct fwd_stream *fs) +{ + struct rte_mbuf *mb; + struct ether_hdr *eth_hdr; + struct ptpv2_msg *ptp_hdr; + uint16_t eth_type; + + /* + * Receive 1 packet at a time. + */ + if (rte_eth_rx_burst(fs->rx_port, fs->rx_queue, &mb, 1) == 0) + return; + + fs->rx_packets += 1; + + /* + * Check that the received packet is a PTP packet that was detected + * by the hardware. + */ + eth_hdr = (struct ether_hdr *)mb->pkt.data; + eth_type = rte_be_to_cpu_16(eth_hdr->ether_type); + if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) { + if (eth_type == ETHER_TYPE_1588) { + printf("Port %u Received PTP packet not filtered" + " by hardware\n", + (unsigned) fs->rx_port); + } else { + printf("Port %u Received non PTP packet type=0x%4x " + "len=%u\n", + (unsigned) fs->rx_port, eth_type, + (unsigned) mb->pkt.pkt_len); + } + rte_pktmbuf_free(mb); + return; + } + if (eth_type != ETHER_TYPE_1588) { + printf("Port %u Received NON PTP packet wrongly" + " detected by hardware\n", + (unsigned) fs->rx_port); + rte_pktmbuf_free(mb); + return; + } + + /* + * Check that the received PTP packet is a PTP V2 packet of type + * PTP_SYNC_MESSAGE. + */ + ptp_hdr = (struct ptpv2_msg *) ((char *) mb->pkt.data + + sizeof(struct ether_hdr)); + if (ptp_hdr->version != 0x02) { + printf("Port %u Received PTP V2 Ethernet frame with wrong PTP" + " protocol version 0x%x (should be 0x02)\n", + (unsigned) fs->rx_port, ptp_hdr->version); + rte_pktmbuf_free(mb); + return; + } + if (ptp_hdr->msg_id != PTP_SYNC_MESSAGE) { + printf("Port %u Received PTP V2 Ethernet frame with unexpected" + " messageID 0x%x (expected 0x0 - PTP_SYNC_MESSAGE)\n", + (unsigned) fs->rx_port, ptp_hdr->msg_id); + rte_pktmbuf_free(mb); + return; + } + printf("Port %u IEEE1588 PTP V2 SYNC Message filtered by hardware\n", + (unsigned) fs->rx_port); + + /* + * Check that the received PTP packet has been timestamped by the + * hardware. + */ + if (! (mb->ol_flags & PKT_RX_IEEE1588_TMST)) { + printf("Port %u Received PTP packet not timestamped" + " by hardware\n", + (unsigned) fs->rx_port); + rte_pktmbuf_free(mb); + return; + } + + /* Check the RX timestamp */ + port_ieee1588_rx_timestamp_check(fs->rx_port); + + /* Forward PTP packet with hardware TX timestamp */ + mb->ol_flags |= PKT_TX_IEEE1588_TMST; + fs->tx_packets += 1; + if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) { + printf("Port %u sent PTP packet dropped\n", + (unsigned) fs->rx_port); + fs->fwd_dropped += 1; + rte_pktmbuf_free(mb); + return; + } + + /* + * Check the TX timestamp. + */ + port_ieee1588_tx_timestamp_check(fs->rx_port); +} + +static void +port_ieee1588_fwd_begin(portid_t pi) +{ + struct port_ieee1588_ops *ieee_ops; + + if (strcmp(ports[pi].dev_info.driver_name, "rte_igb_pmd") == 0) + ieee_ops = &igbe_82576_ieee1588_ops; + else + ieee_ops = &ixgbe_82599_ieee1588_ops; + ports[pi].fwd_ctx = ieee_ops; + (ieee_ops->ieee1588_start)(pi); +} + +static void +port_ieee1588_fwd_end(portid_t pi) +{ + struct port_ieee1588_ops *ieee_ops; + + ieee_ops = (struct port_ieee1588_ops *)ports[pi].fwd_ctx; + (ieee_ops->ieee1588_stop)(pi); +} + +struct fwd_engine ieee1588_fwd_engine = { + .fwd_mode_name = "ieee1588", + .port_fwd_begin = port_ieee1588_fwd_begin, + .port_fwd_end = port_ieee1588_fwd_end, + .packet_fwd = ieee1588_packet_fwd, +}; diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c new file mode 100644 index 0000000000..3f29f6dd5e --- /dev/null +++ b/app/test-pmd/iofwd.c @@ -0,0 +1,131 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +/* + * Forwarding of packets in I/O mode. + * Forward packets "as-is". + * This is the fastest possible forwarding operation, as it does not access + * to packets data. + */ +static void +pkt_burst_io_forward(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint16_t nb_rx; + uint16_t nb_tx; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* + * Receive a burst of packets and forward them. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); + fs->tx_packets += nb_tx; +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine io_fwd_engine = { + .fwd_mode_name = "io", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_io_forward, +}; diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c new file mode 100644 index 0000000000..8f31e053dc --- /dev/null +++ b/app/test-pmd/macfwd.c @@ -0,0 +1,148 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +/* + * Forwarding of packets in MAC mode. + * Change the source and the destination Ethernet addressed of packets + * before forwarding them. + */ +static void +pkt_burst_mac_forward(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_port *txp; + struct rte_mbuf *mb; + struct ether_hdr *eth_hdr; + uint16_t nb_rx; + uint16_t nb_tx; + uint16_t i; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* + * Receive a burst of packets and forward them. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + txp = &ports[fs->tx_port]; + for (i = 0; i < nb_rx; i++) { + mb = pkts_burst[i]; + eth_hdr = (struct ether_hdr *) mb->pkt.data; + ether_addr_copy(&peer_eth_addrs[fs->peer_addr], + ð_hdr->d_addr); + ether_addr_copy(&ports[fs->tx_port].eth_addr, + ð_hdr->s_addr); + mb->ol_flags = txp->tx_ol_flags; + mb->pkt.l2_len = sizeof(struct ether_hdr); + mb->pkt.l3_len = sizeof(struct ipv4_hdr); + mb->pkt.vlan_tci = txp->tx_vlan_id; + } + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx); + fs->tx_packets += nb_tx; +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_rx)) { + fs->fwd_dropped += (nb_rx - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_rx); + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine mac_fwd_engine = { + .fwd_mode_name = "mac", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_mac_forward, +}; diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c new file mode 100644 index 0000000000..4c559efb25 --- /dev/null +++ b/app/test-pmd/parameters.c @@ -0,0 +1,646 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +static void +usage(char* progname) +{ + printf("usage: %s [--interactive|-i] [--help|-h] | [" + "--coremask=COREMASK --portmask=PORTMASK --numa " + "--eth-peers-configfile= | " + "--eth-peer=X,M:M:M:M:M:M | --nb-cores= | --nb-ports= | " + "--pkt-filter-mode= |" + "--rss-ip | --rss-udp | " + "--rxpt= | --rxht= | --rxwt= | --rxfreet= | " + "--txpt= | --txht= | --txwt= | --txfreet= | " + "--txrst= ]\n", + progname); + printf(" --interactive: run in interactive mode\n"); + printf(" --help: display this message and quit\n"); + printf(" --eth-peers-configfile=name of file with ethernet addresses " + "of peer ports\n"); + printf(" --eth-peer=X,M:M:M:M:M:M set the mac address of the X peer " + "port (0 <= X < %d)\n", RTE_MAX_ETHPORTS); + printf(" --nb-cores=N set the number of forwarding cores" + " (1 <= N <= %d)\n", nb_lcores); + printf(" --nb-ports=N set the number of forwarding ports" + " (1 <= N <= %d)\n", nb_ports); + printf(" --coremask=COREMASK: hexadecimal bitmask of cores running " + "the packet forwarding test\n"); + printf(" --portmask=PORTMASK: hexadecimal bitmask of ports used " + "by the packet forwarding test\n"); + printf(" --numa: enable NUMA-aware allocation of RX/TX rings and of " + " RX memory buffers (mbufs)\n"); + printf(" --mbuf-size=N set the data size of mbuf to N bytes\n"); + printf(" --max-pkt-len=N set the maximum size of packet to N bytes\n"); + printf(" --pkt-filter-mode=N: set Flow director mode " + "( N: none (default mode) or signature or perfect)\n"); + printf(" --pkt-filter-report-hash=N: set Flow director report mode " + "( N: none or match (default) or always)\n"); + printf(" --pkt-filter-size=N: set Flow director mode " + "( N: 64K (default mode) or 128K or 256K)\n"); + printf(" --pkt-filter-flexbytes-offset=N: set flexbytes-offset." + " The offset is defined in word units counted from the" + " first byte of the destination Ethernet MAC address." + " 0 <= N <= 32\n"); + printf(" --pkt-filter-drop-queue=N: set drop-queue." + " In perfect mode, when you add a rule with queue -1" + " the packet will be enqueued into the rx drop-queue." + " If the drop-queue doesn't exist, the packet is dropped." + " By default drop-queue=127\n"); + printf(" --crc-strip: enable CRC stripping by hardware\n"); + printf(" --enable-rx-cksum: enable rx hardware checksum offload\n"); + printf(" --disable-hw-vlan: disable hardware vlan\n"); + printf(" --disable-rss: disable rss\n"); + printf(" --port-topology=N: set port topology (N: paired (default) or " + "chained)\n"); + printf(" --rss-ip: set RSS functions to IPv4/IPv6 only \n"); + printf(" --rss-udp: set RSS functions to IPv4/IPv6 + UDP\n"); + printf(" --rxq=N set the number of RX queues per port to N\n"); + printf(" --rxd=N set the number of descriptors in RX rings to N\n"); + printf(" --txq=N set the number of TX queues per port to N\n"); + printf(" --txd=N set the number of descriptors in TX rings to N\n"); + printf(" --burst=N set the number of packets per burst to N\n"); + printf(" --mbcache=N set the cache of mbuf memory pool to N\n"); + printf(" --rxpt=N set prefetch threshold register of RX rings to N" + " (0 <= N <= 16)\n"); + printf(" --rxht=N set the host threshold register of RX rings to N" + " (0 <= N <= 16)\n"); + printf(" --rxfreet=N set the free threshold of RX descriptors to N" + " (0 <= N < value of rxd)\n"); + printf(" --rxwt=N set the write-back threshold register of RX rings" + " to N (0 <= N <= 16)\n"); + printf(" --txpt=N set the prefetch threshold register of TX rings" + " to N (0 <= N <= 16)\n"); + printf(" --txht=N set the nhost threshold register of TX rings to N" + " (0 <= N <= 16)\n"); + printf(" --txwt=N set the write-back threshold register of TX rings" + " to N (0 <= N <= 16)\n"); + printf(" --txfreet=N set the transmit free threshold of TX rings to N" + " (0 <= N <= value of txd)\n"); + printf(" --txrst=N set the transmit RS bit threshold of TX rings to N" + " (0 <= N <= value of txd)\n"); +} + +static int +init_peer_eth_addrs(char *config_filename) +{ + FILE *config_file; + portid_t i; + char buf[50]; + + config_file = fopen(config_filename, "r"); + if (config_file == NULL) { + perror("open log file failed\n"); + return -1; + } + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + + if (fgets(buf, sizeof(buf), config_file) == NULL) + break; + + if (cmdline_parse_etheraddr(NULL, buf, &peer_eth_addrs[i]) < 0 ){ + printf("bad format of mac address on line %d\n", i); + fclose(config_file); + return -1; + } + } + fclose(config_file); + nb_peer_eth_addrs = (portid_t) i; + return 0; +} + +/* + * Parse the coremask given as argument (hexadecimal string) and set + * the global configuration of forwarding cores. + */ +static void +parse_fwd_coremask(const char *coremask) +{ + char *end; + unsigned long long int cm; + + /* parse hexadecimal string */ + end = NULL; + cm = strtoull(coremask, &end, 16); + if ((coremask[0] == '\0') || (end == NULL) || (*end != '\0')) + rte_exit(EXIT_FAILURE, "Invalid fwd core mask\n"); + else + set_fwd_lcores_mask((uint64_t) cm); +} + +/* + * Parse the coremask given as argument (hexadecimal string) and set + * the global configuration of forwarding cores. + */ +static void +parse_fwd_portmask(const char *portmask) +{ + char *end; + unsigned long long int pm; + + /* parse hexadecimal string */ + end = NULL; + pm = strtoull(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n"); + else + set_fwd_ports_mask((uint64_t) pm); +} + +void +launch_args_parse(int argc, char** argv) +{ + int n, opt; + char **argvopt; + int opt_idx; + static struct option lgopts[] = { + { "help", 0, 0, 0 }, + { "interactive", 0, 0, 0 }, + { "eth-peers-configfile", 1, 0, 0 }, + { "eth-peer", 1, 0, 0 }, + { "ports", 1, 0, 0 }, + { "nb-cores", 1, 0, 0 }, + { "nb-ports", 1, 0, 0 }, + { "coremask", 1, 0, 0 }, + { "portmask", 1, 0, 0 }, + { "numa", 0, 0, 0 }, + { "mbuf-size", 1, 0, 0 }, + { "max-pkt-len", 1, 0, 0 }, + { "pkt-filter-mode", 1, 0, 0 }, + { "pkt-filter-report-hash", 1, 0, 0 }, + { "pkt-filter-size", 1, 0, 0 }, + { "pkt-filter-flexbytes-offset",1, 0, 0 }, + { "pkt-filter-drop-queue", 1, 0, 0 }, + { "crc-strip", 0, 0, 0 }, + { "disable-hw-vlan", 0, 0, 0 }, + { "disable-rss", 0, 0, 0 }, + { "port-topology", 1, 0, 0 }, + { "rss-ip", 0, 0, 0 }, + { "rss-udp", 0, 0, 0 }, + { "rxq", 1, 0, 0 }, + { "txq", 1, 0, 0 }, + { "rxd", 1, 0, 0 }, + { "txd", 1, 0, 0 }, + { "burst", 1, 0, 0 }, + { "mbcache", 1, 0, 0 }, + { "txpt", 1, 0, 0 }, + { "txht", 1, 0, 0 }, + { "txwt", 1, 0, 0 }, + { "txfreet", 1, 0, 0 }, + { "txrst", 1, 0, 0 }, + { "rxpt", 1, 0, 0 }, + { "rxht", 1, 0, 0 }, + { "rxwt", 1, 0, 0 }, + { "rxfreet", 1, 0, 0 }, + { 0, 0, 0, 0 }, + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "ih", + lgopts, &opt_idx)) != EOF) { + switch (opt) { + case 'i': + printf("Interactive-mode selected\n"); + interactive = 1; + break; + case 0: /*long options */ + if (!strcmp(lgopts[opt_idx].name, "help")) { + usage(argv[0]); + rte_exit(EXIT_SUCCESS, "Displayed help\n"); + } + if (!strcmp(lgopts[opt_idx].name, "interactive")) { + printf("Interactive-mode selected\n"); + interactive = 1; + } + if (!strcmp(lgopts[opt_idx].name, + "eth-peers-configfile")) { + if (init_peer_eth_addrs(optarg) != 0) + rte_exit(EXIT_FAILURE, + "Cannot open logfile\n"); + } + if (!strcmp(lgopts[opt_idx].name, "eth-peer")) { + char *port_end; + uint8_t c, peer_addr[6]; + + errno = 0; + n = strtoul(optarg, &port_end, 10); + if (errno != 0 || port_end == optarg || *port_end++ != ',') + rte_exit(EXIT_FAILURE, + "Invalid eth-peer: %s", optarg); + if (n >= RTE_MAX_ETHPORTS) + rte_exit(EXIT_FAILURE, + "eth-peer: port %d >= RTE_MAX_ETHPORTS(%d)\n", + n, RTE_MAX_ETHPORTS); + + if (cmdline_parse_etheraddr(NULL, port_end, &peer_addr) < 0 ) + rte_exit(EXIT_FAILURE, + "Invalid ethernet address: %s\n", + port_end); + for (c = 0; c < 6; c++) + peer_eth_addrs[n].addr_bytes[c] = + peer_addr[c]; + nb_peer_eth_addrs++; + } + if (!strcmp(lgopts[opt_idx].name, "nb-ports")) { + n = atoi(optarg); + if (n > 0 && n <= nb_ports) + nb_fwd_ports = (uint8_t) n; + else + rte_exit(EXIT_FAILURE, + "nb-ports should be > 0 and <= %d\n", + nb_ports); + } + if (!strcmp(lgopts[opt_idx].name, "nb-cores")) { + n = atoi(optarg); + if (n > 0 && n <= nb_lcores) + nb_fwd_lcores = (uint8_t) n; + else + rte_exit(EXIT_FAILURE, + "nb-cores should be > 0 and <= %d\n", + nb_lcores); + } + if (!strcmp(lgopts[opt_idx].name, "coremask")) + parse_fwd_coremask(optarg); + if (!strcmp(lgopts[opt_idx].name, "portmask")) + parse_fwd_portmask(optarg); + if (!strcmp(lgopts[opt_idx].name, "numa")) + numa_support = 1; + if (!strcmp(lgopts[opt_idx].name, "mbuf-size")) { + n = atoi(optarg); + if (n > 0 && n <= 0xFFFF) + mbuf_data_size = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, + "mbuf-size should be > 0 and < 65536\n"); + } + if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) { + n = atoi(optarg); + if (n >= ETHER_MIN_LEN) { + rx_mode.max_rx_pkt_len = (uint32_t) n; + if (n > ETHER_MAX_LEN) + rx_mode.jumbo_frame = 1; + } else + rte_exit(EXIT_FAILURE, + "Invalid max-pkt-len=%d - should be > %d\n", + n, ETHER_MIN_LEN); + } + if (!strcmp(lgopts[opt_idx].name, "pkt-filter-mode")) { + if (!strcmp(optarg, "signature")) + fdir_conf.mode = + RTE_FDIR_MODE_SIGNATURE; + else if (!strcmp(optarg, "perfect")) + fdir_conf.mode = RTE_FDIR_MODE_PERFECT; + else if (!strcmp(optarg, "none")) + fdir_conf.mode = RTE_FDIR_MODE_NONE; + else + rte_exit(EXIT_FAILURE, + "pkt-mode-invalid %s invalid - must be: " + "none, signature or perfect\n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, + "pkt-filter-report-hash")) { + if (!strcmp(optarg, "none")) + fdir_conf.status = + RTE_FDIR_NO_REPORT_STATUS; + else if (!strcmp(optarg, "match")) + fdir_conf.status = + RTE_FDIR_REPORT_STATUS; + else if (!strcmp(optarg, "always")) + fdir_conf.status = + RTE_FDIR_REPORT_STATUS_ALWAYS; + else + rte_exit(EXIT_FAILURE, + "pkt-filter-report-hash %s invalid " + "- must be: none or match or always\n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) { + if (!strcmp(optarg, "64K")) + fdir_conf.pballoc = + RTE_FDIR_PBALLOC_64K; + else if (!strcmp(optarg, "128K")) + fdir_conf.pballoc = + RTE_FDIR_PBALLOC_128K; + else if (!strcmp(optarg, "256K")) + fdir_conf.pballoc = + RTE_FDIR_PBALLOC_256K; + else + rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -" + " must be: 64K or 128K or 256K\n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, + "pkt-filter-flexbytes-offset")) { + n = atoi(optarg); + if ( n >= 0 && n <= (int) 32) + fdir_conf.flexbytes_offset = + (uint8_t) n; + else + rte_exit(EXIT_FAILURE, + "flexbytes %d invalid - must" + "be >= 0 && <= 32\n", n); + } + if (!strcmp(lgopts[opt_idx].name, + "pkt-filter-drop-queue")) { + n = atoi(optarg); + if (n >= 0) + fdir_conf.drop_queue = (uint8_t) n; + else + rte_exit(EXIT_FAILURE, + "drop queue %d invalid - must" + "be >= 0 \n", n); + } + if (!strcmp(lgopts[opt_idx].name, "crc-strip")) + rx_mode.hw_strip_crc = 1; + if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum")) + rx_mode.hw_ip_checksum = 1; + if (!strcmp(lgopts[opt_idx].name, "disable-hw-vlan")) + rx_mode.hw_vlan_filter = 0; + if (!strcmp(lgopts[opt_idx].name, "disable-rss")) + rss_hf = 0; + if (!strcmp(lgopts[opt_idx].name, "port-topology")) { + if (!strcmp(optarg, "paired")) + port_topology = PORT_TOPOLOGY_PAIRED; + else if (!strcmp(optarg, "chained")) + port_topology = PORT_TOPOLOGY_CHAINED; + else + rte_exit(EXIT_FAILURE, "port-topology %s invalid -" + " must be: paired or chained \n", + optarg); + } + if (!strcmp(lgopts[opt_idx].name, "rss-ip")) + rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; + if (!strcmp(lgopts[opt_idx].name, "rss-udp")) + rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6 | + ETH_RSS_IPV4_UDP; + if (!strcmp(lgopts[opt_idx].name, "rxq")) { + n = atoi(optarg); + if (n >= 1 && n <= (int) MAX_QUEUE_ID) + nb_rxq = (queueid_t) n; + else + rte_exit(EXIT_FAILURE, "rxq %d invalid - must be" + " >= 1 && <= %d\n", n, + (int) MAX_QUEUE_ID); + } + if (!strcmp(lgopts[opt_idx].name, "txq")) { + n = atoi(optarg); + if (n >= 1 && n <= (int) MAX_QUEUE_ID) + nb_txq = (queueid_t) n; + else + rte_exit(EXIT_FAILURE, "txq %d invalid - must be" + " >= 1 && <= %d\n", n, + (int) MAX_QUEUE_ID); + } + if (!strcmp(lgopts[opt_idx].name, "rxd")) { + n = atoi(optarg); + if (n > 0) + nb_rxd = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, "rxd must be > 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txd")) { + n = atoi(optarg); + if (n > 0) + nb_txd = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, "txd must be in > 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "burst")) { + n = atoi(optarg); + if ((n >= 1) && (n <= MAX_PKT_BURST)) + nb_pkt_per_burst = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, + "burst must >= 1 and <= %d]", + MAX_PKT_BURST); + } + if (!strcmp(lgopts[opt_idx].name, "mbcache")) { + n = atoi(optarg); + if ((n >= 0) && + (n <= RTE_MEMPOOL_CACHE_MAX_SIZE)) + mb_mempool_cache = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, + "mbcache must be >= 0 and <= %d\n", + RTE_MEMPOOL_CACHE_MAX_SIZE); + } + if (!strcmp(lgopts[opt_idx].name, "txpt")) { + n = atoi(optarg); + if (n >= 0) + tx_thresh.pthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "txpt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txht")) { + n = atoi(optarg); + if (n >= 0) + tx_thresh.hthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "txht must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txwt")) { + n = atoi(optarg); + if (n >= 0) + tx_thresh.wthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "txwt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txfreet")) { + n = atoi(optarg); + if (n >= 0) + tx_free_thresh = (uint16_t)n; + else + rte_exit(EXIT_FAILURE, "txfreet must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txrst")) { + n = atoi(optarg); + if (n >= 0) + tx_rs_thresh = (uint16_t)n; + else + rte_exit(EXIT_FAILURE, "txrst must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxpt")) { + n = atoi(optarg); + if (n >= 0) + rx_thresh.pthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "rxpt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxht")) { + n = atoi(optarg); + if (n >= 0) + rx_thresh.hthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "rxht must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxwt")) { + n = atoi(optarg); + if (n >= 0) + rx_thresh.wthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "rxwt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxd")) { + n = atoi(optarg); + if (n > 0) { + if (rx_free_thresh >= n) + rte_exit(EXIT_FAILURE, + "rxd must be > " + "rx_free_thresh(%d)\n", + (int)rx_free_thresh); + else + nb_rxd = (uint16_t) n; + } else + rte_exit(EXIT_FAILURE, + "rxd(%d) invalid - must be > 0\n", + n); + } + if (!strcmp(lgopts[opt_idx].name, "txd")) { + n = atoi(optarg); + if (n > 0) + nb_txd = (uint16_t) n; + else + rte_exit(EXIT_FAILURE, "txd must be in > 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txpt")) { + n = atoi(optarg); + if (n >= 0) + tx_thresh.pthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "txpt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txht")) { + n = atoi(optarg); + if (n >= 0) + tx_thresh.hthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "txht must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "txwt")) { + n = atoi(optarg); + if (n >= 0) + tx_thresh.wthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "txwt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxpt")) { + n = atoi(optarg); + if (n >= 0) + rx_thresh.pthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "rxpt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxht")) { + n = atoi(optarg); + if (n >= 0) + rx_thresh.hthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "rxht must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxwt")) { + n = atoi(optarg); + if (n >= 0) + rx_thresh.wthresh = (uint8_t)n; + else + rte_exit(EXIT_FAILURE, "rxwt must be >= 0\n"); + } + if (!strcmp(lgopts[opt_idx].name, "rxfreet")) { + n = atoi(optarg); + if (n >= 0) + rx_free_thresh = (uint16_t)n; + else + rte_exit(EXIT_FAILURE, "rxfreet must be >= 0\n"); + } + break; + case 'h': + usage(argv[0]); + rte_exit(EXIT_SUCCESS, "Displayed help\n"); + break; + default: + usage(argv[0]); + rte_exit(EXIT_FAILURE, + "Command line is incomplete or incorrect\n"); + break; + } + } +} diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c new file mode 100644 index 0000000000..d1b128964d --- /dev/null +++ b/app/test-pmd/rxonly.c @@ -0,0 +1,194 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +#define MAX_PKT_RX_FLAGS 11 +static const char *pkt_rx_flag_names[MAX_PKT_RX_FLAGS] = { + "VLAN_PKT", + "RSS_HASH", + "PKT_RX_FDIR", + "IP_CKSUM", + "IP_CKSUM_BAD", + + "IPV4_HDR", + "IPV4_HDR_EXT", + "IPV6_HDR", + "IPV6_HDR_EXT", + + "IEEE1588_PTP", + "IEEE1588_TMST", +}; + +static inline void +print_ether_addr(const char *what, struct ether_addr *eth_addr) +{ + printf("%s%02X:%02X:%02X:%02X:%02X:%02X", + what, + eth_addr->addr_bytes[0], + eth_addr->addr_bytes[1], + eth_addr->addr_bytes[2], + eth_addr->addr_bytes[3], + eth_addr->addr_bytes[4], + eth_addr->addr_bytes[5]); +} + +/* + * Received a burst of packets. + */ +static void +pkt_burst_receive(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *mb; + struct ether_hdr *eth_hdr; + uint16_t eth_type; + uint16_t ol_flags; + uint16_t nb_rx; + uint16_t i; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + /* + * Receive a burst of packets. + */ + nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst, + nb_pkt_per_burst); + if (unlikely(nb_rx == 0)) + return; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->rx_burst_stats.pkt_burst_spread[nb_rx]++; +#endif + fs->rx_packets += nb_rx; + + /* + * Dump each received packet if verbose_level > 0. + */ + if (verbose_level > 0) + printf("port %u/queue %u: received %u packets\n", + (unsigned) fs->rx_port, + (unsigned) fs->rx_queue, + (unsigned) nb_rx); + for (i = 0; i < nb_rx; i++) { + mb = pkts_burst[i]; + if (verbose_level == 0) { + rte_pktmbuf_free(mb); + continue; + } + eth_hdr = (struct ether_hdr *) mb->pkt.data; + eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type); + ol_flags = mb->ol_flags; + print_ether_addr(" src=", ð_hdr->s_addr); + print_ether_addr(" - dst=", ð_hdr->d_addr); + printf(" - type=0x%04x - length=%u - nb_segs=%d", + eth_type, (unsigned) mb->pkt.pkt_len, + (int)mb->pkt.nb_segs); + if (ol_flags & PKT_RX_RSS_HASH) + printf(" - RSS hash=0x%x", (unsigned) mb->pkt.hash.rss); + else if (ol_flags & PKT_RX_FDIR) + printf(" - FDIR hash=0x%x - FDIR id=0x%x ", + mb->pkt.hash.fdir.hash, mb->pkt.hash.fdir.id); + if (ol_flags & PKT_RX_VLAN_PKT) + printf(" - VLAN tci=0x%x", mb->pkt.vlan_tci); + printf("\n"); + if (ol_flags != 0) { + int rxf; + + for (rxf = 0; rxf < MAX_PKT_RX_FLAGS; rxf++) { + if (ol_flags & (1 << rxf)) + printf(" PKT_RX_%s\n", + pkt_rx_flag_names[rxf]); + } + } + rte_pktmbuf_free(mb); + } + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +struct fwd_engine rx_only_engine = { + .fwd_mode_name = "rxonly", + .port_fwd_begin = NULL, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_receive, +}; diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c new file mode 100644 index 0000000000..6813b664bc --- /dev/null +++ b/app/test-pmd/testpmd.c @@ -0,0 +1,1105 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +uint16_t verbose_level = 0; /**< Silent by default. */ + +/* use master core for command line ? */ +uint8_t interactive = 0; + +/* + * NUMA support configuration. + * When set, the NUMA support attempts to dispatch the allocation of the + * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the + * probed ports among the CPU sockets 0 and 1. + * Otherwise, all memory is allocated from CPU socket 0. + */ +uint8_t numa_support = 0; /**< No numa support by default */ + +/* + * Record the Ethernet address of peer target ports to which packets are + * forwarded. + * Must be instanciated with the ethernet addresses of peer traffic generator + * ports. + */ +struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; +portid_t nb_peer_eth_addrs = 0; + +/* + * Probed Target Environment. + */ +struct rte_port *ports; /**< For all probed ethernet ports. */ +portid_t nb_ports; /**< Number of probed ethernet ports. */ +struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ +lcoreid_t nb_lcores; /**< Number of probed logical cores. */ + +/* + * Test Forwarding Configuration. + * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores + * nb_fwd_ports <= nb_cfg_ports <= nb_ports + */ +lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ +lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ +portid_t nb_cfg_ports; /**< Number of configured ports. */ +portid_t nb_fwd_ports; /**< Number of forwarding ports. */ + +unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */ +portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */ + +struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */ +streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */ + +/* + * Forwarding engines. + */ +struct fwd_engine * fwd_engines[] = { + &io_fwd_engine, + &mac_fwd_engine, + &rx_only_engine, + &tx_only_engine, + &csum_fwd_engine, +#ifdef RTE_LIBRTE_IEEE1588 + &ieee1588_fwd_engine, +#endif + NULL, +}; + +struct fwd_config cur_fwd_config; +struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ + +uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ + +/* + * Configuration of packet segments used by the "txonly" processing engine. + */ +uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */ +uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = { + TXONLY_DEF_PACKET_LEN, +}; +uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ + +uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ +uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */ + +/* + * Ethernet Ports Configuration. + */ +int promiscuous_on = 1; /**< Ports set in promiscuous mode by default. */ + +/* + * Configurable number of RX/TX queues. + */ +queueid_t nb_rxq = 1; /**< Number of RX queues per port. */ +queueid_t nb_txq = 1; /**< Number of TX queues per port. */ + +/* + * Configurable number of RX/TX ring descriptors. + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ +uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ + +/* + * Configurable values of RX and TX ring threshold registers. + */ +#define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */ +#define RX_HTHRESH 8 /**< Default value of RX host threshold register. */ +#define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */ + +#define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */ +#define TX_HTHRESH 0 /**< Default value of TX host threshold register. */ +#define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */ + +struct rte_eth_thresh rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, +}; + +struct rte_eth_thresh tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, +}; + +/* + * Configurable value of RX free threshold. + */ +uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */ + +/* + * Configurable value of TX free threshold. + */ +uint16_t tx_free_thresh = 0; /* Use default values. */ + +/* + * Configurable value of TX RS bit threshold. + */ +uint16_t tx_rs_thresh = 0; /* Use default values. */ + +/* + * Receive Side Scaling (RSS) configuration. + */ +uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */ + +/* + * Port topology configuration + */ +uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */ + +/* + * Ethernet device configuration. + */ +struct rte_eth_rxmode rx_mode = { + .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled. */ + .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ + .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ + .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ +}; + +struct rte_fdir_conf fdir_conf = { + .mode = RTE_FDIR_MODE_NONE, + .pballoc = RTE_FDIR_PBALLOC_64K, + .status = RTE_FDIR_REPORT_STATUS, + .flexbytes_offset = 0x6, + .drop_queue = 127, +}; + +static volatile int test_done = 1; /* stop packet forwarding when set to 1. */ + +/* + * Setup default configuration. + */ +static void +set_default_fwd_lcores_config(void) +{ + unsigned int i; + unsigned int nb_lc; + + nb_lc = 0; + for (i = 0; i < RTE_MAX_LCORE; i++) { + if (! rte_lcore_is_enabled(i)) + continue; + if (i == rte_get_master_lcore()) + continue; + fwd_lcores_cpuids[nb_lc++] = i; + } + nb_lcores = (lcoreid_t) nb_lc; + nb_cfg_lcores = nb_lcores; + nb_fwd_lcores = 1; +} + +static void +set_def_peer_eth_addrs(void) +{ + portid_t i; + + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { + peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR; + peer_eth_addrs[i].addr_bytes[5] = i; + } +} + +static void +set_default_fwd_ports_config(void) +{ + portid_t pt_id; + + for (pt_id = 0; pt_id < nb_ports; pt_id++) + fwd_ports_ids[pt_id] = pt_id; + + nb_cfg_ports = nb_ports; + nb_fwd_ports = nb_ports; +} + +void +set_def_fwd_config(void) +{ + set_default_fwd_lcores_config(); + set_def_peer_eth_addrs(); + set_default_fwd_ports_config(); +} + +/* + * Configuration initialisation done once at init time. + */ +struct mbuf_ctor_arg { + uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */ + uint16_t seg_buf_size; /**< size of data segment in mbuf. */ +}; + +struct mbuf_pool_ctor_arg { + uint16_t seg_buf_size; /**< size of data segment in mbuf. */ +}; + +static void +testpmd_mbuf_ctor(struct rte_mempool *mp, + void *opaque_arg, + void *raw_mbuf, + __attribute__((unused)) unsigned i) +{ + struct mbuf_ctor_arg *mb_ctor_arg; + struct rte_mbuf *mb; + + mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg; + mb = (struct rte_mbuf *) raw_mbuf; + + mb->pool = mp; + mb->buf_addr = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset); + mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) + + mb_ctor_arg->seg_buf_offset); + mb->buf_len = mb_ctor_arg->seg_buf_size; + mb->type = RTE_MBUF_PKT; + mb->ol_flags = 0; + mb->pkt.data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM; + mb->pkt.nb_segs = 1; + mb->pkt.l2_len = 0; + mb->pkt.l3_len = 0; + mb->pkt.vlan_tci = 0; + mb->pkt.hash.rss = 0; +} + +static void +testpmd_mbuf_pool_ctor(struct rte_mempool *mp, + void *opaque_arg) +{ + struct mbuf_pool_ctor_arg *mbp_ctor_arg; + struct rte_pktmbuf_pool_private *mbp_priv; + + if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { + printf("%s(%s) private_data_size %d < %d\n", + __func__, mp->name, (int) mp->private_data_size, + (int) sizeof(struct rte_pktmbuf_pool_private)); + return; + } + mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg; + mbp_priv = (struct rte_pktmbuf_pool_private *) + ((char *)mp + sizeof(struct rte_mempool)); + mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size; +} + +static void +mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, + unsigned int socket_id) +{ + char pool_name[RTE_MEMPOOL_NAMESIZE]; + struct rte_mempool *rte_mp; + struct mbuf_pool_ctor_arg mbp_ctor_arg; + struct mbuf_ctor_arg mb_ctor_arg; + uint32_t mb_size; + + mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM + + mbuf_seg_size); + mb_ctor_arg.seg_buf_offset = + (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf)); + mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size; + mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size; + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); + rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size, + (unsigned) mb_mempool_cache, + sizeof(struct rte_pktmbuf_pool_private), + testpmd_mbuf_pool_ctor, &mbp_ctor_arg, + testpmd_mbuf_ctor, &mb_ctor_arg, + socket_id, 0); + if (rte_mp == NULL) { + rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u failed\n", + socket_id); + } +} + +static void +init_config(void) +{ + struct rte_port *port; + struct rte_mempool *mbp; + unsigned int nb_mbuf_per_pool; + streamid_t sm_id; + lcoreid_t lc_id; + portid_t pt_id; + + /* Configuration of logical cores. */ + fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", + sizeof(struct fwd_lcore *) * nb_lcores, + CACHE_LINE_SIZE); + if (fwd_lcores == NULL) { + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) failed\n", + nb_lcores); + } + for (lc_id = 0; lc_id < nb_lcores; lc_id++) { + fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore", + sizeof(struct fwd_lcore), + CACHE_LINE_SIZE); + if (fwd_lcores[lc_id] == NULL) { + rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) failed\n"); + } + fwd_lcores[lc_id]->cpuid_idx = lc_id; + } + + /* + * Create pools of mbuf. + * If NUMA support is disabled, create a single pool of mbuf in + * socket 0 memory. + * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. + */ + nb_mbuf_per_pool = nb_rxd + (nb_lcores * mb_mempool_cache) + + nb_txd + MAX_PKT_BURST; + if (numa_support) { + nb_mbuf_per_pool = nb_mbuf_per_pool * (nb_ports >> 1); + mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); + mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1); + } else { + nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports); + mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); + } + + /* + * Records which Mbuf pool to use by each logical core, if needed. + */ + for (lc_id = 0; lc_id < nb_lcores; lc_id++) { + mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id)); + if (mbp == NULL) + mbp = mbuf_pool_find(0); + fwd_lcores[lc_id]->mbp = mbp; + } + + /* Configuration of Ethernet ports. */ + ports = rte_zmalloc("testpmd: ports", + sizeof(struct rte_port) * nb_ports, + CACHE_LINE_SIZE); + if (ports == NULL) { + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) failed\n", + nb_ports); + } + port = ports; + for (pt_id = 0; pt_id < nb_ports; pt_id++, port++) { + rte_eth_dev_info_get(pt_id, &port->dev_info); + if (nb_rxq > port->dev_info.max_rx_queues) { + rte_exit(EXIT_FAILURE, "Port %d: max RX queues %d < nb_rxq %d\n", + (int) pt_id, + (int) port->dev_info.max_rx_queues, + (int) nb_rxq); + } + if (nb_txq > port->dev_info.max_tx_queues) { + rte_exit(EXIT_FAILURE, "Port %d: max TX queues %d < nb_txq %d\n", + (int) pt_id, + (int) port->dev_info.max_tx_queues, + (int) nb_txq); + } + + if (numa_support) + port->socket_id = (pt_id < (nb_ports >> 1)) ? 0 : 1; + else + port->socket_id = 0; + } + + /* Configuration of packet forwarding streams. */ + nb_fwd_streams = (streamid_t) (nb_ports * nb_rxq); + fwd_streams = rte_zmalloc("testpmd: fwd_streams", + sizeof(struct fwd_stream *) * nb_fwd_streams, + CACHE_LINE_SIZE); + if (fwd_streams == NULL) { + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) failed\n", + nb_fwd_streams); + } + for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { + fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", + sizeof(struct fwd_stream), + CACHE_LINE_SIZE); + if (fwd_streams[sm_id] == NULL) { + rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream) failed\n"); + } + } +} + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS +static void +pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) +{ + unsigned int total_burst; + unsigned int nb_burst; + unsigned int burst_stats[3]; + uint16_t pktnb_stats[3]; + uint16_t nb_pkt; + int burst_percent[3]; + + /* + * First compute the total number of packet bursts and the + * two highest numbers of bursts of the same number of packets. + */ + total_burst = 0; + burst_stats[0] = burst_stats[1] = burst_stats[2] = 0; + pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0; + for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) { + nb_burst = pbs->pkt_burst_spread[nb_pkt]; + if (nb_burst == 0) + continue; + total_burst += nb_burst; + if (nb_burst > burst_stats[0]) { + burst_stats[1] = burst_stats[0]; + pktnb_stats[1] = pktnb_stats[0]; + burst_stats[0] = nb_burst; + pktnb_stats[0] = nb_pkt; + } + } + if (total_burst == 0) + return; + burst_percent[0] = (burst_stats[0] * 100) / total_burst; + printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst, + burst_percent[0], (int) pktnb_stats[0]); + if (burst_stats[0] == total_burst) { + printf("]\n"); + return; + } + if (burst_stats[0] + burst_stats[1] == total_burst) { + printf(" + %d%% of %d pkts]\n", + 100 - burst_percent[0], pktnb_stats[1]); + return; + } + burst_percent[1] = (burst_stats[1] * 100) / total_burst; + burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]); + if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) { + printf(" + %d%% of others]\n", 100 - burst_percent[0]); + return; + } + printf(" + %d%% of %d pkts + %d%% of others]\n", + burst_percent[1], (int) pktnb_stats[1], burst_percent[2]); +} +#endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ + +static void +fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) +{ + struct rte_port *port; + + static const char *fwd_stats_border = "----------------------"; + + port = &ports[port_id]; + printf("\n %s Forward statistics for port %-2d %s\n", + fwd_stats_border, port_id, fwd_stats_border); + printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " + "%-"PRIu64"\n", + stats->ipackets, stats->ierrors, + (uint64_t) (stats->ipackets + stats->ierrors)); + + if (cur_fwd_eng == &csum_fwd_engine) + printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", + port->rx_bad_ip_csum, port->rx_bad_l4_csum); + + printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " + "%-"PRIu64"\n", + stats->opackets, port->tx_dropped, + (uint64_t) (stats->opackets + port->tx_dropped)); + + if (stats->rx_nombuf > 0) + printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + if (port->rx_stream) + pkt_burst_stats_display("RX", &port->rx_stream->rx_burst_stats); + if (port->tx_stream) + pkt_burst_stats_display("TX", &port->tx_stream->tx_burst_stats); +#endif + /* stats fdir */ + if (fdir_conf.mode != RTE_FDIR_MODE_NONE) + printf(" Fdirmiss: %-14"PRIu64" Fdirmatch: %-14"PRIu64"\n", + stats->fdirmiss, + stats->fdirmatch); + + printf(" %s--------------------------------%s\n", + fwd_stats_border, fwd_stats_border); +} + +static void +fwd_stream_stats_display(streamid_t stream_id) +{ + struct fwd_stream *fs; + static const char *fwd_top_stats_border = "-------"; + + fs = fwd_streams[stream_id]; + if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && + (fs->fwd_dropped == 0)) + return; + printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " + "TX Port=%2d/Queue=%2d %s\n", + fwd_top_stats_border, fs->rx_port, fs->rx_queue, + fs->tx_port, fs->tx_queue, fwd_top_stats_border); + printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", + fs->rx_packets, fs->tx_packets, fs->fwd_dropped); + + /* if checksum mode */ + if (cur_fwd_eng == &csum_fwd_engine) { + printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: %-14u\n", + fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); + } + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + pkt_burst_stats_display("RX", &fs->rx_burst_stats); + pkt_burst_stats_display("TX", &fs->tx_burst_stats); +#endif +} + +static void +flush_all_rx_queues(void) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + portid_t rxp; + queueid_t rxq; + uint16_t nb_rx; + uint16_t i; + uint8_t j; + + for (j = 0; j < 2; j++) { + for (rxp = 0; rxp < nb_ports; rxp++) { + for (rxq = 0; rxq < nb_rxq; rxq++) { + do { + nb_rx = rte_eth_rx_burst(rxp, rxq, + pkts_burst, + MAX_PKT_BURST); + for (i = 0; i < nb_rx; i++) + rte_pktmbuf_free(pkts_burst[i]); + } while (nb_rx > 0); + } + } + rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ + } +} + +static void +run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) +{ + struct fwd_stream **fsm; + streamid_t nb_fs; + streamid_t sm_id; + + fsm = &fwd_streams[fc->stream_idx]; + nb_fs = fc->stream_nb; + do { + for (sm_id = 0; sm_id < nb_fs; sm_id++) + (*pkt_fwd)(fsm[sm_id]); + } while (! fc->stopped); +} + +static int +start_pkt_forward_on_core(void *fwd_arg) +{ + run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg, + cur_fwd_config.fwd_eng->packet_fwd); + return 0; +} + +/* + * Run the TXONLY packet forwarding engine to send a single burst of packets. + * Used to start communication flows in network loopback test configurations. + */ +static int +run_one_txonly_burst_on_core(void *fwd_arg) +{ + struct fwd_lcore *fwd_lc; + struct fwd_lcore tmp_lcore; + + fwd_lc = (struct fwd_lcore *) fwd_arg; + tmp_lcore = *fwd_lc; + tmp_lcore.stopped = 1; + run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd); + return 0; +} + +/* + * Launch packet forwarding: + * - Setup per-port forwarding context. + * - launch logical cores with their forwarding configuration. + */ +static void +launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) +{ + port_fwd_begin_t port_fwd_begin; + unsigned int i; + unsigned int lc_id; + int diag; + + port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; + if (port_fwd_begin != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) + (*port_fwd_begin)(fwd_ports_ids[i]); + } + for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { + lc_id = fwd_lcores_cpuids[i]; + if ((interactive == 0) || (lc_id != rte_lcore_id())) { + fwd_lcores[i]->stopped = 0; + diag = rte_eal_remote_launch(pkt_fwd_on_lcore, + fwd_lcores[i], lc_id); + if (diag != 0) + printf("launch lcore %u failed - diag=%d\n", + lc_id, diag); + } + } +} + +/* + * Launch packet forwarding configuration. + */ +void +start_packet_forwarding(int with_tx_first) +{ + port_fwd_begin_t port_fwd_begin; + port_fwd_end_t port_fwd_end; + struct rte_port *port; + unsigned int i; + portid_t pt_id; + streamid_t sm_id; + + if (test_done == 0) { + printf("Packet forwarding already started\n"); + return; + } + test_done = 0; + flush_all_rx_queues(); + fwd_config_setup(); + rxtx_config_display(); + + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + rte_eth_stats_get(pt_id, &port->stats); + port->tx_dropped = 0; + } + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { + fwd_streams[sm_id]->rx_packets = 0; + fwd_streams[sm_id]->tx_packets = 0; + fwd_streams[sm_id]->fwd_dropped = 0; + fwd_streams[sm_id]->rx_bad_ip_csum = 0; + fwd_streams[sm_id]->rx_bad_l4_csum = 0; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + memset(&fwd_streams[sm_id]->rx_burst_stats, 0, + sizeof(fwd_streams[sm_id]->rx_burst_stats)); + memset(&fwd_streams[sm_id]->tx_burst_stats, 0, + sizeof(fwd_streams[sm_id]->tx_burst_stats)); +#endif +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + fwd_streams[sm_id]->core_cycles = 0; +#endif + } + if (with_tx_first) { + port_fwd_begin = tx_only_engine.port_fwd_begin; + if (port_fwd_begin != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) + (*port_fwd_begin)(fwd_ports_ids[i]); + } + launch_packet_forwarding(run_one_txonly_burst_on_core); + rte_eal_mp_wait_lcore(); + port_fwd_end = tx_only_engine.port_fwd_end; + if (port_fwd_end != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) + (*port_fwd_end)(fwd_ports_ids[i]); + } + } + launch_packet_forwarding(start_pkt_forward_on_core); +} + +void +stop_packet_forwarding(void) +{ + struct rte_eth_stats stats; + struct rte_port *port; + port_fwd_end_t port_fwd_end; + int i; + portid_t pt_id; + streamid_t sm_id; + lcoreid_t lc_id; + uint64_t total_recv; + uint64_t total_xmit; + uint64_t total_rx_dropped; + uint64_t total_tx_dropped; + uint64_t total_rx_nombuf; + uint64_t tx_dropped; + uint64_t rx_bad_ip_csum; + uint64_t rx_bad_l4_csum; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t fwd_cycles; +#endif + static const char *acc_stats_border = "+++++++++++++++"; + + if (test_done) { + printf("Packet forwarding not started\n"); + return; + } + printf("Telling cores to stop..."); + for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) + fwd_lcores[lc_id]->stopped = 1; + printf("\nWaiting for lcores to finish...\n"); + rte_eal_mp_wait_lcore(); + port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end; + if (port_fwd_end != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + (*port_fwd_end)(pt_id); + } + } +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + fwd_cycles = 0; +#endif + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { + if (cur_fwd_config.nb_fwd_streams > + cur_fwd_config.nb_fwd_ports) { + fwd_stream_stats_display(sm_id); + ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; + ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; + } else { + ports[fwd_streams[sm_id]->tx_port].tx_stream = + fwd_streams[sm_id]; + ports[fwd_streams[sm_id]->rx_port].rx_stream = + fwd_streams[sm_id]; + } + tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; + tx_dropped = (uint64_t) (tx_dropped + + fwd_streams[sm_id]->fwd_dropped); + ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; + + rx_bad_ip_csum = ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; + rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + + fwd_streams[sm_id]->rx_bad_ip_csum); + ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = rx_bad_ip_csum; + + rx_bad_l4_csum = ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; + rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + + fwd_streams[sm_id]->rx_bad_l4_csum); + ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = rx_bad_l4_csum; + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + fwd_cycles = (uint64_t) (fwd_cycles + + fwd_streams[sm_id]->core_cycles); +#endif + } + total_recv = 0; + total_xmit = 0; + total_rx_dropped = 0; + total_tx_dropped = 0; + total_rx_nombuf = 0; + for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) { + pt_id = fwd_ports_ids[i]; + + port = &ports[pt_id]; + rte_eth_stats_get(pt_id, &stats); + stats.ipackets -= port->stats.ipackets; + port->stats.ipackets = 0; + stats.opackets -= port->stats.opackets; + port->stats.opackets = 0; + stats.ibytes -= port->stats.ibytes; + port->stats.ibytes = 0; + stats.obytes -= port->stats.obytes; + port->stats.obytes = 0; + stats.ierrors -= port->stats.ierrors; + port->stats.ierrors = 0; + stats.oerrors -= port->stats.oerrors; + port->stats.oerrors = 0; + stats.rx_nombuf -= port->stats.rx_nombuf; + port->stats.rx_nombuf = 0; + stats.fdirmatch -= port->stats.fdirmatch; + port->stats.rx_nombuf = 0; + stats.fdirmiss -= port->stats.fdirmiss; + port->stats.rx_nombuf = 0; + + total_recv += stats.ipackets; + total_xmit += stats.opackets; + total_rx_dropped += stats.ierrors; + total_tx_dropped += port->tx_dropped; + total_rx_nombuf += stats.rx_nombuf; + + fwd_port_stats_display(pt_id, &stats); + } + printf("\n %s Accumulated forward statistics for all ports" + "%s\n", + acc_stats_border, acc_stats_border); + printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " + "%-"PRIu64"\n" + " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " + "%-"PRIu64"\n", + total_recv, total_rx_dropped, total_recv + total_rx_dropped, + total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); + if (total_rx_nombuf > 0) + printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); + printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" + "%s\n", + acc_stats_border, acc_stats_border); +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + if (total_recv > 0) + printf("\n CPU cycles/packet=%u (total cycles=" + "%"PRIu64" / total RX packets=%"PRIu64")\n", + (unsigned int)(fwd_cycles / total_recv), + fwd_cycles, total_recv); +#endif + printf("\nDone.\n"); + test_done = 1; +} + +void +pmd_test_exit(void) +{ + portid_t pt_id; + + for (pt_id = 0; pt_id < nb_ports; pt_id++) { + printf("Stopping port %d...", pt_id); + fflush(stdout); + rte_eth_dev_close(pt_id); + printf("done\n"); + } + printf("bye...\n"); +} + +typedef void (*cmd_func_t)(void); +struct pmd_test_command { + const char *cmd_name; + cmd_func_t cmd_func; +}; + +#define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0])) + +static void +fatal_init_error(const char *func_name, uint8_t port_id, int diag) +{ + rte_panic("%s(port_id=%d) failed - diag=%d\n", + func_name, port_id, diag); +} + +static void +init_ports(void) +{ + struct rte_eth_link link; + struct rte_eth_conf port_conf = { + .intr_conf = { + .lsc = 0, + }, + }; + struct rte_eth_rxconf rx_conf; + struct rte_eth_txconf tx_conf; + struct rte_port *port; + unsigned int sock_id; + portid_t pi; + queueid_t qi; + int diag; + + port_conf.rxmode = rx_mode; + port_conf.fdir_conf = fdir_conf; + + if (nb_rxq > 0) { /* configure RSS */ + port_conf.rx_adv_conf.rss_conf.rss_key = NULL; + /* use default hash key */ + port_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; + } else + port_conf.rx_adv_conf.rss_conf.rss_hf = 0; + rx_conf.rx_thresh = rx_thresh; + rx_conf.rx_free_thresh = rx_free_thresh; + tx_conf.tx_thresh = tx_thresh; + tx_conf.tx_rs_thresh = tx_rs_thresh; + tx_conf.tx_free_thresh = tx_free_thresh; + + for (pi = 0; pi < nb_ports; pi++) { + port = &ports[pi]; + memcpy(&port->dev_conf, &port_conf, sizeof(port_conf)); + sock_id = port->socket_id; + printf("Initializing port %d... ", pi); + fflush(stdout); + diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, &port_conf); + if (diag != 0) { + fatal_init_error("rte_eth_dev_configure", pi, diag); + /* NOT REACHED */ + } + rte_eth_macaddr_get(pi, &port->eth_addr); + for (qi = 0; qi < nb_txq; qi++) { + diag = rte_eth_tx_queue_setup(pi, qi, nb_txd, + sock_id, + &tx_conf); + if (diag != 0) { + fatal_init_error("rte_eth_tx_queue_setup", + pi, diag); + /* NOT REACHED */ + } + } + for (qi = 0; qi < nb_rxq; qi++) { + diag = rte_eth_rx_queue_setup(pi, qi, nb_rxd, sock_id, + &rx_conf, + mbuf_pool_find(sock_id)); + if (diag != 0) { + fatal_init_error("rte_eth_rx_queue_setup", + pi , diag); + /* NOT REACHED */ + } + } + + /* Start device */ + diag = rte_eth_dev_start(pi); + if (diag != 0) { + fatal_init_error("rte_eth_dev_start", pi, diag); + /* NOT REACHED */ + } + printf("done: "); + rte_eth_link_get(pi, &link); + if (link.link_status) { + printf(" Link Up - speed %u Mbps - %s\n", + (unsigned) link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + } else { + printf(" Link Down\n"); + } + + /* + * If enabled, put device in promiscuous mode. + * This allows the PMD test in IO forwarding mode to forward + * packets to itself through 2 cross-connected ports of the + * target machine. + */ + if (promiscuous_on) + rte_eth_promiscuous_enable(pi); + } +} + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define main _main +#endif + +int +main(int argc, char** argv) +{ + int diag; + + diag = rte_eal_init(argc, argv); + if (diag < 0) + rte_panic("Cannot init EAL\n"); + +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init()) + rte_panic("Cannot init igb PMD\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init()) + rte_panic("Cannot init ixgbe PMD\n"); + + if (rte_ixgbevf_pmd_init()) + rte_panic("Cannot init ixgbevf PMD\n"); +#endif + + if (rte_eal_pci_probe()) + rte_panic("Cannot probe PCI\n"); + + nb_ports = (portid_t) rte_eth_dev_count(); + if (nb_ports == 0) + rte_exit(EXIT_FAILURE, "No probed ethernet devices - check that " + "CONFIG_RTE_LIBRTE_IGB_PMD=y and that " + "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your " + "configuration file\n"); + + set_def_fwd_config(); + if (nb_lcores == 0) + rte_panic("Empty set of forwarding logical cores - check the " + "core mask supplied in the command parameters\n"); + + argc -= diag; + argv += diag; + if (argc > 1) + launch_args_parse(argc, argv); + + if (nb_rxq > nb_txq) + printf("Warning: nb_rxq=%d enables RSS configuration, " + "but nb_txq=%d will prevent to fully test it.\n", + nb_rxq, nb_txq); + + init_config(); + + init_ports(); + + if (interactive == 1) + prompt(); + else { + char c; + int rc; + + printf("No commandline core given, start packet forwarding\n"); + start_packet_forwarding(0); + printf("Press enter to exit\n"); + rc = read(0, &c, 1); + if (rc < 0) + return 1; + } + + return 0; +} diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h new file mode 100644 index 0000000000..cc4a0fddce --- /dev/null +++ b/app/test-pmd/testpmd.h @@ -0,0 +1,413 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _TESTPMD_H_ +#define _TESTPMD_H_ + +/* icc on baremetal gives us troubles with function named 'main' */ +#ifdef RTE_EXEC_ENV_BAREMETAL +#define main _main +int main(int argc, char **argv); +#endif + +/* + * Default size of the mbuf data buffer to receive standard 1518-byte + * Ethernet frames in a mono-segment memory buffer. + */ +#define DEFAULT_MBUF_DATA_SIZE 2048 /**< Default size of mbuf data buffer. */ + +/* + * The maximum number of segments per packet is used when creating + * scattered transmit packets composed of a list of mbufs. + */ +#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */ + +#define MAX_PKT_BURST 512 +#define DEF_PKT_BURST 16 + +#define CACHE_LINE_SIZE_ROUNDUP(size) \ + (CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE)) + +typedef uint8_t lcoreid_t; +typedef uint8_t portid_t; +typedef uint16_t queueid_t; +typedef uint16_t streamid_t; + +#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1) + +enum { + PORT_TOPOLOGY_PAIRED, + PORT_TOPOLOGY_CHAINED +}; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS +/** + * The data structure associated with RX and TX packet burst statistics + * that are recorded for each forwarding stream. + */ +struct pkt_burst_stats { + unsigned int pkt_burst_spread[MAX_PKT_BURST]; +}; +#endif + +/** + * The data structure associated with a forwarding stream between a receive + * port/queue and a transmit port/queue. + */ +struct fwd_stream { + /* "read-only" data */ + portid_t rx_port; /**< port to poll for received packets */ + queueid_t rx_queue; /**< RX queue to poll on "rx_port" */ + portid_t tx_port; /**< forwarding port of received packets */ + queueid_t tx_queue; /**< TX queue to send forwarded packets */ + streamid_t peer_addr; /**< index of peer ethernet address of packets */ + + /* "read-write" results */ + unsigned int rx_packets; /**< received packets */ + unsigned int tx_packets; /**< received packets transmitted */ + unsigned int fwd_dropped; /**< received packets not forwarded */ + unsigned int rx_bad_ip_csum ; /**< received packets has bad ip checksum */ + unsigned int rx_bad_l4_csum ; /**< received packets has bad l4 checksum */ +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t core_cycles; /**< used for RX and TX processing */ +#endif +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + struct pkt_burst_stats rx_burst_stats; + struct pkt_burst_stats tx_burst_stats; +#endif +}; + +/** + * The data structure associated with each port. + * tx_ol_flags is slightly different from ol_flags of rte_mbuf. + * Bit 0: Insert IP checksum + * Bit 1: Insert UDP checksum + * Bit 2: Insert TCP checksum + * Bit 3: Insert SCTP checksum + * Bit 11: Insert VLAN Label + */ +struct rte_port { + struct rte_eth_dev_info dev_info; /**< PCI info + driver name */ + struct rte_eth_conf dev_conf; /**< Port configuration. */ + struct ether_addr eth_addr; /**< Port ethernet address */ + struct rte_eth_stats stats; /**< Last port statistics */ + uint64_t tx_dropped; /**< If no descriptor in TX ring */ + struct fwd_stream *rx_stream; /**< Port RX stream, if unique */ + struct fwd_stream *tx_stream; /**< Port TX stream, if unique */ + unsigned int socket_id; /**< For NUMA support */ + uint16_t tx_ol_flags;/**< Offload Flags of TX packets. */ + uint16_t tx_vlan_id; /**< Tag Id. in TX VLAN packets. */ + void *fwd_ctx; /**< Forwarding mode context */ + uint64_t rx_bad_ip_csum; /**< rx pkts with bad ip checksum */ + uint64_t rx_bad_l4_csum; /**< rx pkts with bad l4 checksum */ +}; + +/** + * The data structure associated with each forwarding logical core. + * The logical cores are internally numbered by a core index from 0 to + * the maximum number of logical cores - 1. + * The system CPU identifier of all logical cores are setup in a global + * CPU id. configuration table. + */ +struct fwd_lcore { + struct rte_mempool *mbp; /**< The mbuf pool to use by this core */ + streamid_t stream_idx; /**< index of 1st stream in "fwd_streams" */ + streamid_t stream_nb; /**< number of streams in "fwd_streams" */ + lcoreid_t cpuid_idx; /**< index of logical core in CPU id table */ + queueid_t tx_queue; /**< TX queue to send forwarded packets */ + volatile char stopped; /**< stop forwarding when set */ +}; + +/* + * Forwarding mode operations: + * - IO forwarding mode (default mode) + * Forwards packets unchanged. + * + * - MAC forwarding mode + * Set the source and the destination Ethernet addresses of packets + * before forwarding them. + * + * - IEEE1588 forwarding mode + * Check that received IEEE1588 Precise Time Protocol (PTP) packets are + * filtered and timestamped by the hardware. + * Forwards packets unchanged on the same port. + * Check that sent IEEE1588 PTP packets are timestamped by the hardware. + */ +typedef void (*port_fwd_begin_t)(portid_t pi); +typedef void (*port_fwd_end_t)(portid_t pi); +typedef void (*packet_fwd_t)(struct fwd_stream *fs); + +struct fwd_engine { + const char *fwd_mode_name; /**< Forwarding mode name. */ + port_fwd_begin_t port_fwd_begin; /**< NULL if nothing special to do. */ + port_fwd_end_t port_fwd_end; /**< NULL if nothing special to do. */ + packet_fwd_t packet_fwd; /**< Mandatory. */ +}; + +extern struct fwd_engine io_fwd_engine; +extern struct fwd_engine mac_fwd_engine; +extern struct fwd_engine rx_only_engine; +extern struct fwd_engine tx_only_engine; +extern struct fwd_engine csum_fwd_engine; +#ifdef RTE_LIBRTE_IEEE1588 +extern struct fwd_engine ieee1588_fwd_engine; +#endif + +extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */ + +/** + * Forwarding Configuration + * + */ +struct fwd_config { + struct fwd_engine *fwd_eng; /**< Packet forwarding mode. */ + streamid_t nb_fwd_streams; /**< Nb. of forward streams to process. */ + lcoreid_t nb_fwd_lcores; /**< Nb. of logical cores to launch. */ + portid_t nb_fwd_ports; /**< Nb. of ports involved. */ +}; + +/* globals used for configuration */ +extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */ +extern uint8_t interactive; +extern uint8_t numa_support; /**< set by "--numa" parameter */ +extern uint16_t port_topology; /**< set by "--port-topology" parameter */ + +/* + * Configuration of logical cores: + * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores + */ +extern lcoreid_t nb_lcores; /**< Number of logical cores probed at init time. */ +extern lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */ +extern lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */ +extern unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; + +/* + * Configuration of Ethernet ports: + * nb_fwd_ports <= nb_cfg_ports <= nb_ports + */ +extern portid_t nb_ports; /**< Number of ethernet ports probed at init time. */ +extern portid_t nb_cfg_ports; /**< Number of configured ports. */ +extern portid_t nb_fwd_ports; /**< Number of forwarding ports. */ +extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; +extern struct rte_port *ports; + +extern struct rte_eth_rxmode rx_mode; +extern uint16_t rss_hf; + +extern queueid_t nb_rxq; +extern queueid_t nb_txq; + +extern uint16_t nb_rxd; +extern uint16_t nb_txd; + +extern uint16_t rx_free_thresh; +extern uint16_t tx_free_thresh; +extern uint16_t tx_rs_thresh; + +extern uint16_t mbuf_data_size; /**< Mbuf data space size. */ + +extern struct rte_fdir_conf fdir_conf; + +/* + * Configuration of packet segments used by the "txonly" processing engine. + */ +#define TXONLY_DEF_PACKET_LEN 64 +extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */ +extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */ +extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */ + +extern uint16_t nb_pkt_per_burst; +extern uint16_t mb_mempool_cache; +extern struct rte_eth_thresh rx_thresh; +extern struct rte_eth_thresh tx_thresh; + +extern struct fwd_config cur_fwd_config; +extern struct fwd_engine *cur_fwd_eng; +extern struct fwd_lcore **fwd_lcores; +extern struct fwd_stream **fwd_streams; + +extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */ +extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; + +static inline unsigned int +lcore_num(void) +{ + unsigned int i; + + for (i = 0; i < RTE_MAX_LCORE; ++i) + if (fwd_lcores_cpuids[i] == rte_lcore_id()) + return i; + + rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n"); +} + +static inline struct fwd_lcore * +current_fwd_lcore(void) +{ + return fwd_lcores[lcore_num()]; +} + +/* Mbuf Pools */ +static inline void +mbuf_poolname_build(unsigned int sock_id, char* mp_name, int name_size) +{ + rte_snprintf(mp_name, name_size, "mbuf_pool_socket_%u", sock_id); +} + +static inline struct rte_mempool * +mbuf_pool_find(unsigned int sock_id) +{ + char pool_name[RTE_MEMPOOL_NAMESIZE]; + + mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name)); + return (rte_mempool_lookup((const char *)pool_name)); +} + +/** + * Read/Write operations on a PCI register of a port. + */ +static inline uint32_t +port_pci_reg_read(struct rte_port *port, uint32_t reg_off) +{ + void *reg_addr; + uint32_t reg_v; + + reg_addr = (void *)((char *)port->dev_info.pci_dev->mem_resource.addr + + reg_off); + reg_v = *((volatile uint32_t *)reg_addr); + return rte_le_to_cpu_32(reg_v); +} + +#define port_id_pci_reg_read(pt_id, reg_off) \ + port_pci_reg_read(&ports[(pt_id)], (reg_off)) + +static inline void +port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v) +{ + void *reg_addr; + + reg_addr = (void *)((char *)port->dev_info.pci_dev->mem_resource.addr + + reg_off); + *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v); +} + +#define port_id_pci_reg_write(pt_id, reg_off, reg_value) \ + port_pci_reg_write(&ports[(pt_id)], (reg_off), (reg_value)) + +/* Prototypes */ +void launch_args_parse(int argc, char** argv); +void prompt(void); +void nic_stats_display(portid_t port_id); +void nic_stats_clear(portid_t port_id); +void port_infos_display(portid_t port_id); +void fwd_lcores_config_display(void); +void fwd_config_display(void); +void rxtx_config_display(void); +void fwd_config_setup(void); +void set_def_fwd_config(void); + +void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos); +void port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos, + uint8_t bit_v); +void port_reg_bit_field_display(portid_t port_id, uint32_t reg_off, + uint8_t bit1_pos, uint8_t bit2_pos); +void port_reg_bit_field_set(portid_t port_id, uint32_t reg_off, + uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value); +void port_reg_display(portid_t port_id, uint32_t reg_off); +void port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t value); + +void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id); +void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id); + +void set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc); +void set_fwd_lcores_mask(uint64_t lcoremask); +void set_fwd_lcores_number(uint16_t nb_lc); + +void set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt); +void set_fwd_ports_mask(uint64_t portmask); +void set_fwd_ports_number(uint16_t nb_pt); + +void rx_vlan_filter_set(portid_t port_id, uint16_t vlan_id, int on); +void rx_vlan_all_filter_set(portid_t port_id, int on); +void tx_vlan_set(portid_t port_id, uint16_t vlan_id); +void tx_vlan_reset(portid_t port_id); + +void tx_cksum_set(portid_t port_id, uint8_t cksum_mask); + +void set_verbose_level(uint16_t vb_level); +void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs); +void set_nb_pkt_per_burst(uint16_t pkt_burst); +void set_pkt_forwarding_mode(const char *fwd_mode); +void start_packet_forwarding(int with_tx_first); +void stop_packet_forwarding(void); +void pmd_test_exit(void); + +void fdir_add_signature_filter(portid_t port_id, uint8_t queue_id, + struct rte_fdir_filter *fdir_filter); +void fdir_update_signature_filter(portid_t port_id, uint8_t queue_id, + struct rte_fdir_filter *fdir_filter); +void fdir_remove_signature_filter(portid_t port_id, + struct rte_fdir_filter *fdir_filter); +void fdir_get_infos(portid_t port_id); +void fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id, + uint8_t queue_id, uint8_t drop, + struct rte_fdir_filter *fdir_filter); +void fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id, + uint8_t queue_id, uint8_t drop, + struct rte_fdir_filter *fdir_filter); +void fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id, + struct rte_fdir_filter *fdir_filter); +void fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks); + +/* + * Work-around of a compilation error with ICC on invocations of the + * rte_be_to_cpu_16() function. + */ +#ifdef __GCC__ +#define RTE_BE_TO_CPU_16(be_16_v) rte_be_to_cpu_16((be_16_v)) +#define RTE_CPU_TO_BE_16(cpu_16_v) rte_cpu_to_be_16((cpu_16_v)) +#else +#ifdef __big_endian__ +#define RTE_BE_TO_CPU_16(be_16_v) (be_16_v) +#define RTE_CPU_TO_BE_16(cpu_16_v) (cpu_16_v) +#else +#define RTE_BE_TO_CPU_16(be_16_v) \ + (uint16_t) ((((be_16_v) & 0xFF) << 8) | ((be_16_v) >> 8)) +#define RTE_CPU_TO_BE_16(cpu_16_v) \ + (uint16_t) ((((cpu_16_v) & 0xFF) << 8) | ((cpu_16_v) >> 8)) +#endif +#endif /* __GCC__ */ + +#endif /* _TESTPMD_H_ */ diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c new file mode 100644 index 0000000000..bf0a3e2740 --- /dev/null +++ b/app/test-pmd/txonly.c @@ -0,0 +1,317 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "testpmd.h" + +#define UDP_SRC_PORT 1024 +#define UDP_DST_PORT 1024 + +#define IP_SRC_ADDR ((192 << 24) | (168 << 16) | (0 << 8) | 1) +#define IP_DST_ADDR ((192 << 24) | (168 << 16) | (0 << 8) | 2) + +#define IP_DEFTTL 64 /* from RFC 1340. */ +#define IP_VERSION 0x40 +#define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */ +#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN) + +static struct ipv4_hdr pkt_ip_hdr; /**< IP header of transmitted packets. */ +static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */ + +static inline struct rte_mbuf * +tx_mbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + void *mb; + + if (rte_mempool_get(mp, &mb) < 0) + return NULL; + m = (struct rte_mbuf *)mb; + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + return m; +} + +static void +copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt, + unsigned offset) +{ + struct rte_mbuf *seg; + void *seg_buf; + unsigned copy_len; + + seg = pkt; + while (offset >= seg->pkt.data_len) { + offset -= seg->pkt.data_len; + seg = seg->pkt.next; + } + copy_len = seg->pkt.data_len - offset; + seg_buf = ((char *) seg->pkt.data + offset); + while (len > copy_len) { + rte_memcpy(seg_buf, buf, (size_t) copy_len); + len -= copy_len; + buf = ((char*) buf + copy_len); + seg = seg->pkt.next; + seg_buf = seg->pkt.data; + } + rte_memcpy(seg_buf, buf, (size_t) len); +} + +static inline void +copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset) +{ + if (offset + len <= pkt->pkt.data_len) { + rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) len); + return; + } + copy_buf_to_pkt_segs(buf, len, pkt, offset); +} + +static void +setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr, + struct udp_hdr *udp_hdr, + uint16_t pkt_data_len) +{ + uint16_t *ptr16; + uint32_t ip_cksum; + uint16_t pkt_len; + + /* + * Initialize UDP header. + */ + pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr)); + udp_hdr->src_port = rte_cpu_to_be_16(UDP_SRC_PORT); + udp_hdr->dst_port = rte_cpu_to_be_16(UDP_DST_PORT); + udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len); + udp_hdr->dgram_cksum = 0; /* No UDP checksum. */ + + /* + * Initialize IP header. + */ + pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr)); + ip_hdr->version_ihl = IP_VHL_DEF; + ip_hdr->type_of_service = 0; + ip_hdr->fragment_offset = 0; + ip_hdr->time_to_live = IP_DEFTTL; + ip_hdr->next_proto_id = IPPROTO_UDP; + ip_hdr->packet_id = 0; + ip_hdr->total_length = RTE_CPU_TO_BE_16(pkt_len); + ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR); + ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR); + + /* + * Compute IP header checksum. + */ + ptr16 = (uint16_t*) ip_hdr; + ip_cksum = 0; + ip_cksum += ptr16[0]; ip_cksum += ptr16[1]; + ip_cksum += ptr16[2]; ip_cksum += ptr16[3]; + ip_cksum += ptr16[4]; + ip_cksum += ptr16[6]; ip_cksum += ptr16[7]; + ip_cksum += ptr16[8]; ip_cksum += ptr16[9]; + + /* + * Reduce 32 bit checksum to 16 bits and complement it. + */ + ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) + + (ip_cksum & 0x0000FFFF); + if (ip_cksum > 65535) + ip_cksum -= 65535; + ip_cksum = (~ip_cksum) & 0x0000FFFF; + if (ip_cksum == 0) + ip_cksum = 0xFFFF; + ip_hdr->hdr_checksum = (uint16_t) ip_cksum; +} + +/* + * Transmit a burst of multi-segments packets. + */ +static void +pkt_burst_transmit(struct fwd_stream *fs) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *pkt; + struct rte_mbuf *pkt_seg; + struct rte_mempool *mbp; + struct ether_hdr eth_hdr; + uint16_t nb_tx; + uint16_t nb_pkt; + uint16_t vlan_tci; + uint16_t ol_flags; + uint8_t i; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t start_tsc; + uint64_t end_tsc; + uint64_t core_cycles; +#endif + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + start_tsc = rte_rdtsc(); +#endif + + mbp = current_fwd_lcore()->mbp; + vlan_tci = ports[fs->tx_port].tx_vlan_id; + ol_flags = ports[fs->tx_port].tx_ol_flags; + for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) { + pkt = tx_mbuf_alloc(mbp); + if (pkt == NULL) { + nomore_mbuf: + if (nb_pkt == 0) + return; + break; + } + pkt->pkt.data_len = tx_pkt_seg_lengths[0]; + pkt_seg = pkt; + for (i = 1; i < tx_pkt_nb_segs; i++) { + pkt_seg->pkt.next = tx_mbuf_alloc(mbp); + if (pkt_seg->pkt.next == NULL) { + rte_pktmbuf_free(pkt); + goto nomore_mbuf; + } + pkt_seg = pkt_seg->pkt.next; + pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i]; + } + pkt_seg->pkt.next = NULL; /* Last segment of packet. */ + + /* + * Initialize Ethernet header. + */ + ether_addr_copy(&peer_eth_addrs[fs->peer_addr],ð_hdr.d_addr); + ether_addr_copy(&ports[fs->tx_port].eth_addr, ð_hdr.s_addr); + eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + + /* + * Copy headers in first packet segment(s). + */ + copy_buf_to_pkt(ð_hdr, sizeof(eth_hdr), pkt, 0); + copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt, + sizeof(struct ether_hdr)); + copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt, + sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr)); + + /* + * Complete first mbuf of packet and append it to the + * burst of packets to be transmitted. + */ + pkt->pkt.nb_segs = tx_pkt_nb_segs; + pkt->pkt.pkt_len = tx_pkt_length; + pkt->ol_flags = ol_flags; + pkt->pkt.vlan_tci = vlan_tci; + pkt->pkt.l2_len = sizeof(struct ether_hdr); + pkt->pkt.l3_len = sizeof(struct ipv4_hdr); + pkts_burst[nb_pkt] = pkt; + } + nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt); + fs->tx_packets += nb_tx; + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + fs->tx_burst_stats.pkt_burst_spread[nb_tx]++; +#endif + if (unlikely(nb_tx < nb_pkt)) { + if (verbose_level > 0 && fs->fwd_dropped == 0) + printf("port %d tx_queue %d - drop " + "(nb_pkt:%u - nb_tx:%u)=%u packets\n", + fs->tx_port, fs->tx_queue, + (unsigned) nb_pkt, (unsigned) nb_tx, + (unsigned) (nb_pkt - nb_tx)); + fs->fwd_dropped += (nb_pkt - nb_tx); + do { + rte_pktmbuf_free(pkts_burst[nb_tx]); + } while (++nb_tx < nb_pkt); + } + +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + end_tsc = rte_rdtsc(); + core_cycles = (end_tsc - start_tsc); + fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles); +#endif +} + +static void +tx_only_begin(__attribute__((unused)) portid_t pi) +{ + uint16_t pkt_data_len; + + pkt_data_len = (uint16_t) (tx_pkt_length - (sizeof(struct ether_hdr) + + sizeof(struct ipv4_hdr) + + sizeof(struct udp_hdr))); + setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len); +} + +struct fwd_engine tx_only_engine = { + .fwd_mode_name = "txonly", + .port_fwd_begin = tx_only_begin, + .port_fwd_end = NULL, + .packet_fwd = pkt_burst_transmit, +}; diff --git a/app/test/Makefile b/app/test/Makefile new file mode 100644 index 0000000000..80d210d665 --- /dev/null +++ b/app/test/Makefile @@ -0,0 +1,82 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +APP = test + +# +# all sources are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_APP_TEST) := commands.c +SRCS-$(CONFIG_RTE_APP_TEST) += test.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_pci.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_prefetch.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_byteorder.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_per_lcore.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_atomic.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_malloc.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_cycles.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_spinlock.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_memory.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_memzone.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_ring.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_rwlock.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_timer.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_mempool.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_mbuf.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_logs.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_memcpy.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_hash.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_lpm.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_debug.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_errno.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_tailq.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_string_fns.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_mp_secondary.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_cpuflags.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_eal_flags.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_alarm.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_interrupts.c +SRCS-$(CONFIG_RTE_APP_TEST) += test_version.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# this application needs libraries first +DEPDIRS-$(CONFIG_RTE_APP_TEST) += lib + +include $(RTE_SDK)/mk/rte.app.mk diff --git a/app/test/autotest.py b/app/test/autotest.py new file mode 100755 index 0000000000..2609142965 --- /dev/null +++ b/app/test/autotest.py @@ -0,0 +1,664 @@ +#!/usr/bin/python + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# Script that uses qemu controlled by python-pexpect to check that +# all autotests are working in the baremetal environment. + +import sys, pexpect, time, os, re + +directory = sys.argv[2] +target = sys.argv[3] +log_file = "%s.txt"%(target) + +if "baremetal" in target: + cmdline = "qemu-system-x86_64 -cdrom %s.iso -boot d "%(sys.argv[1]) + cmdline += "-m 2000 -smp 4 -nographic -net nic,model=e1000" + platform = "QEMU x86_64" +else: + cmdline = "%s -c f -n 4"%(sys.argv[1]) + try: + platform = open("/root/rte_platform_model.txt").read() + except: + platform = "unknown" + +print cmdline + +report_hdr=""".. + +""" + +test_whitelist=None +test_blacklist=None + +class SubTest: + "Defines a subtest" + def __init__(self, title, function, command=None, timeout=10, genreport=None): + self.title = title + self.function = function + self.command = command + self.timeout = timeout + self.genreport = genreport + +class AutoTest: + """This class contains all methods needed to launch several + automatic tests, archive test results, log, and generate a nice + test report in restructured text""" + + title = "new" + mainlog = None + logbuf = None + literal = 0 + test_list = [] + report_list = [] + child = None + + def __init__(self, pexpectchild, filename, mode): + "Init the Autotest class" + self.mainlog = file(filename, mode) + self.child = pexpectchild + pexpectchild.logfile = self + def register(self, filename, title, subtest_list): + "Register a test with a list of subtests" + test = {} + test["filename"] = filename + test["title"] = title + test["subtest_list"] = subtest_list + self.test_list.append(test) + + def start(self): + "start the tests, and fill the internal report_list field" + for t in self.test_list: + report = {} + report["date"] = time.asctime() + report["title"] = t["title"] + report["filename"] = t["filename"] + report["subreport_list"] = [] + report["fails"] = 0 + report["success"] = 0 + report["subreport_list"] = [] + for st in t["subtest_list"]: + if test_whitelist is not None and st.title not in test_whitelist: + continue + if test_blacklist is not None and st.title in test_blacklist: + continue + subreport = {} + self.reportbuf = "" + subreport["title"] = st.title + subreport["func"] = st.function + subreport["command"] = st.command + subreport["timeout"] = st.timeout + subreport["genreport"] = st.genreport + + # launch subtest + print "%s (%s): "%(subreport["title"], subreport["command"]), + sys.stdout.flush() + start = time.time() + res = subreport["func"](self.child, + command = subreport["command"], + timeout = subreport["timeout"]) + t = int(time.time() - start) + + subreport["time"] = "%dmn%d"%(t/60, t%60) + subreport["result"] = res[0] # 0 or -1 + subreport["result_str"] = res[1] # cause of fail + subreport["logs"] = self.reportbuf + print "%s [%s]"%(subreport["result_str"], subreport["time"]) + if subreport["result"] == 0: + report["success"] += 1 + else: + report["fails"] += 1 + report["subreport_list"].append(subreport) + self.report_list.append(report) + + def gen_report(self): + for report in self.report_list: + # main report header and stats + self.literal = 0 + reportlog = file(report["filename"], "w") + reportlog.write(report_hdr) + reportlog.write(report["title"] + "\n") + reportlog.write(re.sub(".", "=", report["title"]) + "\n\n") + reportlog.write("Autogenerated test report:\n\n" ) + reportlog.write("- date: **%s**\n"%(report["date"])) + reportlog.write("- target: **%s**\n"%(target)) + reportlog.write("- success: **%d**\n"%(report["success"])) + reportlog.write("- fails: **%d**\n"%(report["fails"])) + reportlog.write("- platform: **%s**\n\n"%(platform)) + + # summary + reportlog.write(".. csv-table:: Test results summary\n") + reportlog.write(' :header: "Name", "Result"\n\n') + for subreport in report["subreport_list"]: + if subreport["result"] == 0: + res_str = "Success" + else: + res_str = "Failure" + reportlog.write(' "%s", "%s"\n'%(subreport["title"], res_str)) + reportlog.write('\n') + + # subreports + for subreport in report["subreport_list"]: + # print subtitle + reportlog.write(subreport["title"] + "\n") + reportlog.write(re.sub(".", "-", subreport["title"]) + "\n\n") + # print logs + reportlog.write("::\n \n ") + s = subreport["logs"].replace("\n", "\n ") + reportlog.write(s) + # print result + reportlog.write("\n\n") + reportlog.write("**" + subreport["result_str"] + "**\n\n") + # custom genreport + if subreport["genreport"] != None: + s = subreport["genreport"]() + reportlog.write(s) + + reportlog.close() + + # displayed on console + print + print "-------------------------" + print + if report["fails"] == 0: + print "All test OK" + else: + print "%s test(s) failed"%(report["fails"]) + + # file API, to store logs from pexpect + def write(self, buf): + s = buf[:] + s = s.replace("\r", "") + self.mainlog.write(s) + self.reportbuf += s + def flush(self): + self.mainlog.flush() + def close(self): + self.mainlog.close() + + +# Try to match prompt: return 0 on success, else return -1 +def wait_prompt(child): + for i in range(3): + index = child.expect(["RTE>>", pexpect.TIMEOUT], timeout = 1) + child.sendline("") + if index == 0: + return 0 + print "Cannot find prompt" + return -1 + +# Try to match prompt after boot: return 0 on success, else return -1 +def wait_boot(child): + index = child.expect(["RTE>>", pexpect.TIMEOUT], + timeout = 120) + if index == 0: + return 0 + if (wait_prompt(child) == -1): + print "Target did not boot, failed" + return -1 + return 0 + +# quit RTE +def quit(child): + if wait_boot(child) != 0: + return -1, "Cannot find prompt" + child.sendline("quit") + return 0, "Success" + +# Default function to launch an autotest that does not need to +# interact with the user. Basically, this function calls the autotest +# function through command line interface, then check that it displays +# "Test OK" or "Test Failed". +def default_autotest(child, command, timeout=10): + if wait_prompt(child) != 0: + return -1, "Failed: cannot find prompt" + child.sendline(command) + index = child.expect(["Test OK", "Test Failed", + pexpect.TIMEOUT], timeout = timeout) + if index == 1: + return -1, "Failed" + elif index == 2: + return -1, "Failed [Timeout]" + return 0, "Success" + +# wait boot +def boot_autotest(child, **kargs): + if wait_boot(child) != 0: + return -1, "Cannot find prompt" + return 0, "Success" + +# Test memory dump. We need to check that at least one memory zone is +# displayed. +def memory_autotest(child, command, **kargs): + if wait_prompt(child) != 0: + return -1, "Failed: cannot find prompt" + child.sendline(command) + regexp = "phys:0x[0-9a-f]*, len:0x([0-9a-f]*), virt:0x[0-9a-f]*, socket_id:[0-9]*" + index = child.expect([regexp, pexpect.TIMEOUT], timeout = 180) + if index != 0: + return -1, "Failed: timeout" + size = int(child.match.groups()[0], 16) + if size <= 0: + return -1, "Failed: bad size" + index = child.expect(["Test OK", "Test Failed", + pexpect.TIMEOUT], timeout = 10) + if index == 1: + return -1, "Failed: C code returned an error" + elif index == 2: + return -1, "Failed: timeout" + return 0, "Success" + +# Test some libc functions including scanf. This requires a +# interaction with the user (simulated in expect), so we cannot use +# default_autotest() here. +def string_autotest(child, command, **kargs): + if wait_prompt(child) != 0: + return -1, "Failed: cannot find prompt" + child.sendline(command) + index = child.expect(["Now, test scanf, enter this number", + pexpect.TIMEOUT], timeout = 10) + if index != 0: + return -1, "Failed: timeout" + child.sendline("123456") + index = child.expect(["number=123456", pexpect.TIMEOUT], timeout = 10) + if index != 0: + return -1, "Failed: timeout (2)" + index = child.expect(["Test OK", "Test Failed", + pexpect.TIMEOUT], timeout = 10) + if index != 0: + return -1, "Failed: C code returned an error" + return 0, "Success" + +# Test spinlock. This requires to check the order of displayed lines: +# we cannot use default_autotest() here. +def spinlock_autotest(child, command, **kargs): + i = 0 + ir = 0 + if wait_prompt(child) != 0: + return -1, "Failed: cannot find prompt" + child.sendline(command) + while True: + index = child.expect(["Test OK", + "Test Failed", + "Hello from core ([0-9]*) !", + "Hello from within recursive locks from ([0-9]*) !", + pexpect.TIMEOUT], timeout = 20) + # ok + if index == 0: + break + + # message, check ordering + elif index == 2: + if int(child.match.groups()[0]) < i: + return -1, "Failed: bad order" + i = int(child.match.groups()[0]) + elif index == 3: + if int(child.match.groups()[0]) < ir: + return -1, "Failed: bad order" + ir = int(child.match.groups()[0]) + + # fail + else: + return -1, "Failed: timeout or error" + + return 0, "Success" + + +# Test rwlock. This requires to check the order of displayed lines: +# we cannot use default_autotest() here. +def rwlock_autotest(child, command, **kargs): + i = 0 + if wait_prompt(child) != 0: + return -1, "Failed: cannot find prompt" + child.sendline(command) + while True: + index = child.expect(["Test OK", + "Test Failed", + "Hello from core ([0-9]*) !", + "Global write lock taken on master core ([0-9]*)", + pexpect.TIMEOUT], timeout = 10) + # ok + if index == 0: + if i != 0xffff: + return -1, "Failed: a message is missing" + break + + # message, check ordering + elif index == 2: + if int(child.match.groups()[0]) < i: + return -1, "Failed: bad order" + i = int(child.match.groups()[0]) + + # must be the last message, check ordering + elif index == 3: + i = 0xffff + + # fail + else: + return -1, "Failed: timeout or error" + + return 0, "Success" + +# Test logs. This requires to check the order of displayed lines: +# we cannot use default_autotest() here. +def logs_autotest(child, command, **kargs): + i = 0 + if wait_prompt(child) != 0: + return -1, "Failed: cannot find prompt" + child.sendline(command) + + log_list = [ + "TESTAPP1: this is a debug level message", + "TESTAPP1: this is a info level message", + "TESTAPP1: this is a warning level message", + "TESTAPP2: this is a info level message", + "TESTAPP2: this is a warning level message", + "TESTAPP1: this is a debug level message", + "TESTAPP1: this is a debug level message", + "TESTAPP1: this is a info level message", + "TESTAPP1: this is a warning level message", + "TESTAPP2: this is a info level message", + "TESTAPP2: this is a warning level message", + "TESTAPP1: this is a debug level message", + ] + + for log_msg in log_list: + index = child.expect([log_msg, + "Test OK", + "Test Failed", + pexpect.TIMEOUT], timeout = 10) + + # not ok + if index != 0: + return -1, "Failed: timeout or error" + + index = child.expect(["Test OK", + "Test Failed", + pexpect.TIMEOUT], timeout = 10) + + return 0, "Success" + +# Test timers. This requires to check the order of displayed lines: +# we cannot use default_autotest() here. +def timer_autotest(child, command, **kargs): + i = 0 + if wait_prompt(child) != 0: + return -1, "Failed: cannot find prompt" + child.sendline(command) + + index = child.expect(["Start timer stress tests \(30 seconds\)", + "Test Failed", + pexpect.TIMEOUT], timeout = 10) + + # not ok + if index != 0: + return -1, "Failed: timeout or error" + + index = child.expect(["Start timer basic tests \(30 seconds\)", + "Test Failed", + pexpect.TIMEOUT], timeout = 40) + + # not ok + if index != 0: + return -1, "Failed: timeout or error (2)" + + prev_lcore_timer1 = -1 + + lcore_tim0 = -1 + lcore_tim1 = -1 + lcore_tim2 = -1 + lcore_tim3 = -1 + + while True: + index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) count=([0-9]*) on core ([0-9]*)", + "Test OK", + "Test Failed", + pexpect.TIMEOUT], timeout = 10) + + if index == 1: + break + + if index != 0: + return -1, "Failed: timeout or error (3)" + + try: + t = int(child.match.groups()[0]) + id = int(child.match.groups()[1]) + cnt = int(child.match.groups()[2]) + lcore = int(child.match.groups()[3]) + except: + return -1, "Failed: cannot parse output" + + # timer0 always expires on the same core when cnt < 20 + if id == 0: + if lcore_tim0 == -1: + lcore_tim0 = lcore + elif lcore != lcore_tim0 and cnt < 20: + return -1, "Failed: lcore != lcore_tim0 (%d, %d)"%(lcore, lcore_tim0) + if cnt > 21: + return -1, "Failed: tim0 cnt > 21" + + # timer1 each time expires on a different core + if id == 1: + if lcore == lcore_tim1: + return -1, "Failed: lcore == lcore_tim1 (%d, %d)"%(lcore, lcore_tim1) + lcore_tim1 = lcore + if cnt > 10: + return -1, "Failed: tim1 cnt > 30" + + # timer0 always expires on the same core + if id == 2: + if lcore_tim2 == -1: + lcore_tim2 = lcore + elif lcore != lcore_tim2: + return -1, "Failed: lcore != lcore_tim2 (%d, %d)"%(lcore, lcore_tim2) + if cnt > 30: + return -1, "Failed: tim2 cnt > 30" + + # timer0 always expires on the same core + if id == 3: + if lcore_tim3 == -1: + lcore_tim3 = lcore + elif lcore != lcore_tim3: + return -1, "Failed: lcore_tim3 changed (%d -> %d)"%(lcore, lcore_tim3) + if cnt > 30: + return -1, "Failed: tim3 cnt > 30" + + # must be 2 different cores + if lcore_tim0 == lcore_tim3: + return -1, "Failed: lcore_tim0 (%d) == lcore_tim3 (%d)"%(lcore_tim0, lcore_tim3) + + return 0, "Success" + +# Ring autotest +def ring_autotest(child, command, timeout=10): + if wait_prompt(child) != 0: + return -1, "Failed: cannot find prompt" + child.sendline(command) + index = child.expect(["Test OK", "Test Failed", + pexpect.TIMEOUT], timeout = timeout) + if index != 0: + return -1, "Failed" + + child.sendline("set_watermark test 100") + child.sendline("set_quota test 16") + child.sendline("dump_ring test") + index = child.expect([" watermark=100", + pexpect.TIMEOUT], timeout = 1) + if index != 0: + return -1, "Failed: bad watermark" + + index = child.expect([" bulk_default=16", + pexpect.TIMEOUT], timeout = 1) + if index != 0: + return -1, "Failed: bad quota" + + return 0, "Success" + +def ring_genreport(): + s = "Performance curves\n" + s += "------------------\n\n" + sdk = os.getenv("RTE_SDK") + script = os.path.join(sdk, "app/test/graph_ring.py") + title ='"Autotest %s %s"'%(target, time.asctime()) + filename = target + ".txt" + os.system("/usr/bin/python %s %s %s"%(script, filename, title)) + for f in os.listdir("."): + if not f.startswith("ring"): + continue + if not f.endswith(".svg"): + continue + # skip single producer/consumer + if "_sc" in f: + continue + if "_sp" in f: + continue + f = f[:-4] + ".png" + s += ".. figure:: ../../images/autotests/%s/%s\n"%(target, f) + s += " :width: 50%\n\n" + s += " %s\n\n"%(f) + return s + +def mempool_genreport(): + s = "Performance curves\n" + s += "------------------\n\n" + sdk = os.getenv("RTE_SDK") + script = os.path.join(sdk, "app/test/graph_mempool.py") + title ='"Autotest %s %s"'%(target, time.asctime()) + filename = target + ".txt" + os.system("/usr/bin/python %s %s %s"%(script, filename, title)) + for f in os.listdir("."): + if not f.startswith("mempool"): + continue + if not f.endswith(".svg"): + continue + # skip when n_keep = 128 + if "_128." in f: + continue + f = f[:-4] + ".png" + s += ".. figure:: ../../images/autotests/%s/%s\n"%(target, f) + s += " :width: 50%\n\n" + s += " %s\n\n"%(f) + return s + +# +# main +# + +if len(sys.argv) > 4: + testlist=sys.argv[4].split(',') + if testlist[0].startswith('-'): + testlist[0]=testlist[0].lstrip('-') + test_blacklist=testlist + else: + test_whitelist=testlist + +child = pexpect.spawn(cmdline) +autotest = AutoTest(child, log_file,'w') + +# timeout for memcpy and hash test +if "baremetal" in target: + timeout = 60*180 +else: + timeout = 180 + +autotest.register("eal_report.rst", "EAL-%s"%(target), + [ SubTest("Boot", boot_autotest, "boot_autotest"), + SubTest("EAL Flags", default_autotest, "eal_flags_autotest"), + SubTest("Version", default_autotest, "version_autotest"), + SubTest("PCI", default_autotest, "pci_autotest"), + SubTest("Memory", memory_autotest, "memory_autotest"), + SubTest("Lcore launch", default_autotest, "per_lcore_autotest"), + SubTest("Spinlock", spinlock_autotest, "spinlock_autotest"), + SubTest("Rwlock", rwlock_autotest, "rwlock_autotest"), + SubTest("Atomic", default_autotest, "atomic_autotest"), + SubTest("Byte order", default_autotest, "byteorder_autotest"), + SubTest("Prefetch", default_autotest, "prefetch_autotest"), + SubTest("Debug", default_autotest, "debug_autotest"), + SubTest("Cycles", default_autotest, "cycles_autotest"), + SubTest("Logs", logs_autotest, "logs_autotest"), + SubTest("Memzone", default_autotest, "memzone_autotest"), + SubTest("Cpu flags", default_autotest, "cpuflags_autotest"), + SubTest("Memcpy", default_autotest, "memcpy_autotest", timeout), + SubTest("String Functions", default_autotest, "string_autotest"), + SubTest("Alarm", default_autotest, "alarm_autotest", 30), + SubTest("Interrupt", default_autotest, "interrupt_autotest"), + ]) + +autotest.register("ring_report.rst", "Ring-%s"%(target), + [ SubTest("Ring", ring_autotest, "ring_autotest", 30*60, + ring_genreport) + ]) + +if "baremetal" in target: + timeout = 60*60*3 +else: + timeout = 60*30 + +autotest.register("mempool_report.rst", "Mempool-%s"%(target), + [ SubTest("Mempool", default_autotest, "mempool_autotest", + timeout, mempool_genreport) + ]) +autotest.register("mbuf_report.rst", "Mbuf-%s"%(target), + [ SubTest("Mbuf", default_autotest, "mbuf_autotest", timeout=120) + ]) +autotest.register("timer_report.rst", "Timer-%s"%(target), + [ SubTest("Timer", timer_autotest, "timer_autotest") + ]) +autotest.register("malloc_report.rst", "Malloc-%s"%(target), + [ SubTest("Malloc", default_autotest, "malloc_autotest") + ]) + +# only do the hash autotest if supported by the platform +if not (platform.startswith("Intel(R) Core(TM)2 Quad CPU") or + platform.startswith("QEMU")): + autotest.register("hash_report.rst", "Hash-%s"%(target), + [ SubTest("Hash", default_autotest, "hash_autotest", timeout) + ]) + +autotest.register("lpm_report.rst", "LPM-%s"%(target), + [ SubTest("Lpm", default_autotest, "lpm_autotest", timeout) + ]) +autotest.register("eal2_report.rst", "EAL2-%s"%(target), + [ SubTest("TailQ", default_autotest, "tailq_autotest"), + SubTest("Errno", default_autotest, "errno_autotest"), + SubTest("Multiprocess", default_autotest, "multiprocess_autotest") + ]) + +autotest.start() +autotest.gen_report() + +quit(child) +child.terminate() +sys.exit(0) diff --git a/app/test/commands.c b/app/test/commands.c new file mode 100644 index 0000000000..a1d23d8af7 --- /dev/null +++ b/app/test/commands.c @@ -0,0 +1,391 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#ifndef __linux__ +#include +#endif +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "test.h" + +/****************/ + +struct cmd_autotest_result { + cmdline_fixed_string_t autotest; +}; + +static void cmd_autotest_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_autotest_result *res = parsed_result; + int ret = 0; + int all = 0; + + if (!strcmp(res->autotest, "all_autotests")) + all = 1; + + if (all || !strcmp(res->autotest, "version_autotest")) + ret |= test_version(); + if (all || !strcmp(res->autotest, "debug_autotest")) + ret |= test_debug(); + if (all || !strcmp(res->autotest, "pci_autotest")) + ret |= test_pci(); + if (all || !strcmp(res->autotest, "prefetch_autotest")) + ret |= test_prefetch(); + if (all || !strcmp(res->autotest, "byteorder_autotest")) + ret |= test_byteorder(); + if (all || !strcmp(res->autotest, "per_lcore_autotest")) + ret |= test_per_lcore(); + if (all || !strcmp(res->autotest, "atomic_autotest")) + ret |= test_atomic(); + if (all || !strcmp(res->autotest, "malloc_autotest")) + ret |= test_malloc(); + if (all || !strcmp(res->autotest, "spinlock_autotest")) + ret |= test_spinlock(); + if (all || !strcmp(res->autotest, "memory_autotest")) + ret |= test_memory(); + if (all || !strcmp(res->autotest, "memzone_autotest")) + ret |= test_memzone(); + if (all || !strcmp(res->autotest, "rwlock_autotest")) + ret |= test_rwlock(); + if (all || !strcmp(res->autotest, "mbuf_autotest")) + ret |= test_mbuf(); + if (all || !strcmp(res->autotest, "logs_autotest")) + ret |= test_logs(); + if (all || !strcmp(res->autotest, "errno_autotest")) + ret |= test_errno(); + if (all || !strcmp(res->autotest, "hash_autotest")) + ret |= test_hash(); + if (all || !strcmp(res->autotest, "lpm_autotest")) + ret |= test_lpm(); + if (all || !strcmp(res->autotest, "cpuflags_autotest")) + ret |= test_cpuflags(); + /* tailq autotest must go after all lpm and hashs tests or any other + * tests which need to create tailq objects (ring and mempool are implicitly + * created in earlier tests so can go later) + */ + if (all || !strcmp(res->autotest, "tailq_autotest")) + ret |= test_tailq(); + if (all || !strcmp(res->autotest, "multiprocess_autotest")) + ret |= test_mp_secondary(); + if (all || !strcmp(res->autotest, "memcpy_autotest")) + ret |= test_memcpy(); + if (all || !strcmp(res->autotest, "string_autotest")) + ret |= test_string_fns(); + if (all || !strcmp(res->autotest, "eal_flags_autotest")) + ret |= test_eal_flags(); + if (all || !strcmp(res->autotest, "alarm_autotest")) + ret |= test_alarm(); + if (all || !strcmp(res->autotest, "interrupt_autotest")) + ret |= test_interrupt(); + if (all || !strcmp(res->autotest, "cycles_autotest")) + ret |= test_cycles(); + if (all || !strcmp(res->autotest, "ring_autotest")) + ret |= test_ring(); + if (all || !strcmp(res->autotest, "timer_autotest")) + ret |= test_timer(); + if (all || !strcmp(res->autotest, "mempool_autotest")) + ret |= test_mempool(); + + if (ret == 0) + printf("Test OK\n"); + else + printf("Test Failed\n"); + fflush(stdout); +} + +cmdline_parse_token_string_t cmd_autotest_autotest = + TOKEN_STRING_INITIALIZER(struct cmd_autotest_result, autotest, + "pci_autotest#memory_autotest#" + "per_lcore_autotest#spinlock_autotest#" + "rwlock_autotest#atomic_autotest#" + "byteorder_autotest#prefetch_autotest#" + "cycles_autotest#logs_autotest#" + "memzone_autotest#ring_autotest#" + "mempool_autotest#mbuf_autotest#" + "timer_autotest#malloc_autotest#" + "memcpy_autotest#hash_autotest#" + "lpm_autotest#debug_autotest#" + "errno_autotest#tailq_autotest#" + "string_autotest#multiprocess_autotest#" + "cpuflags_autotest#eal_flags_autotest#" + "alarm_autotest#interrupt_autotest#" + "version_autotest#" + "all_autotests"); + +cmdline_parse_inst_t cmd_autotest = { + .f = cmd_autotest_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "launch autotest", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_autotest_autotest, + NULL, + }, +}; + +/****************/ + +struct cmd_dump_result { + cmdline_fixed_string_t dump; +}; + +static void +dump_struct_sizes(void) +{ +#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t)); + DUMP_SIZE(struct rte_mbuf); + DUMP_SIZE(struct rte_pktmbuf); + DUMP_SIZE(struct rte_ctrlmbuf); + DUMP_SIZE(struct rte_mempool); + DUMP_SIZE(struct rte_ring); +#undef DUMP_SIZE +} + +static void cmd_dump_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_dump_result *res = parsed_result; + + if (!strcmp(res->dump, "dump_physmem")) + rte_dump_physmem_layout(); + else if (!strcmp(res->dump, "dump_memzone")) + rte_memzone_dump(); + else if (!strcmp(res->dump, "dump_log_history")) + rte_log_dump_history(); + else if (!strcmp(res->dump, "dump_struct_sizes")) + dump_struct_sizes(); + else if (!strcmp(res->dump, "dump_ring")) + rte_ring_list_dump(); + else if (!strcmp(res->dump, "dump_mempool")) + rte_mempool_list_dump(); +} + +cmdline_parse_token_string_t cmd_dump_dump = + TOKEN_STRING_INITIALIZER(struct cmd_dump_result, dump, + "dump_physmem#dump_memzone#dump_log_history#" + "dump_struct_sizes#dump_ring#dump_mempool"); + +cmdline_parse_inst_t cmd_dump = { + .f = cmd_dump_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "dump status", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_dump_dump, + NULL, + }, +}; + +/****************/ + +struct cmd_dump_one_result { + cmdline_fixed_string_t dump; + cmdline_fixed_string_t name; +}; + +static void cmd_dump_one_parsed(void *parsed_result, struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_dump_one_result *res = parsed_result; + + if (!strcmp(res->dump, "dump_ring")) { + struct rte_ring *r; + r = rte_ring_lookup(res->name); + if (r == NULL) { + cmdline_printf(cl, "Cannot find ring\n"); + return; + } + rte_ring_dump(r); + } + else if (!strcmp(res->dump, "dump_mempool")) { + struct rte_mempool *mp; + mp = rte_mempool_lookup(res->name); + if (mp == NULL) { + cmdline_printf(cl, "Cannot find mempool\n"); + return; + } + rte_mempool_dump(mp); + } +} + +cmdline_parse_token_string_t cmd_dump_one_dump = + TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, dump, + "dump_ring#dump_mempool"); + +cmdline_parse_token_string_t cmd_dump_one_name = + TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, name, NULL); + +cmdline_parse_inst_t cmd_dump_one = { + .f = cmd_dump_one_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "dump one ring/mempool: dump_ring|dump_mempool ", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_dump_one_dump, + (void *)&cmd_dump_one_name, + NULL, + }, +}; + +/****************/ + +struct cmd_set_ring_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t name; + uint32_t value; +}; + +static void cmd_set_ring_parsed(void *parsed_result, struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_set_ring_result *res = parsed_result; + struct rte_ring *r; + int ret; + + r = rte_ring_lookup(res->name); + if (r == NULL) { + cmdline_printf(cl, "Cannot find ring\n"); + return; + } + + if (!strcmp(res->set, "set_quota")) { + ret = rte_ring_set_bulk_count(r, res->value); + if (ret != 0) + cmdline_printf(cl, "Cannot set quota\n"); + } + else if (!strcmp(res->set, "set_watermark")) { + ret = rte_ring_set_water_mark(r, res->value); + if (ret != 0) + cmdline_printf(cl, "Cannot set water mark\n"); + } +} + +cmdline_parse_token_string_t cmd_set_ring_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set, + "set_quota#set_watermark"); + +cmdline_parse_token_string_t cmd_set_ring_name = + TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL); + +cmdline_parse_token_num_t cmd_set_ring_value = + TOKEN_NUM_INITIALIZER(struct cmd_set_ring_result, value, UINT32); + +cmdline_parse_inst_t cmd_set_ring = { + .f = cmd_set_ring_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "set quota/watermark: " + "set_quota|set_watermark ", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_set_ring_set, + (void *)&cmd_set_ring_name, + (void *)&cmd_set_ring_value, + NULL, + }, +}; + +/****************/ + +struct cmd_quit_result { + cmdline_fixed_string_t quit; +}; + +static void +cmd_quit_parsed(__attribute__((unused)) void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + cmdline_quit(cl); +} + +cmdline_parse_token_string_t cmd_quit_quit = + TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, + "quit"); + +cmdline_parse_inst_t cmd_quit = { + .f = cmd_quit_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "exit application", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_quit_quit, + NULL, + }, +}; + +/****************/ + +cmdline_parse_ctx_t main_ctx[] = { + (cmdline_parse_inst_t *)&cmd_autotest, + (cmdline_parse_inst_t *)&cmd_dump, + (cmdline_parse_inst_t *)&cmd_dump_one, + (cmdline_parse_inst_t *)&cmd_set_ring, + (cmdline_parse_inst_t *)&cmd_quit, + NULL, +}; + diff --git a/app/test/graph_mempool.py b/app/test/graph_mempool.py new file mode 100755 index 0000000000..46e3e7bfab --- /dev/null +++ b/app/test/graph_mempool.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +import sys, re +import numpy as np +import matplotlib +matplotlib.use('Agg') # we don't want to use X11 +import matplotlib.pyplot as plt +from matplotlib.ticker import FuncFormatter + +INT = "([-+]?[0-9][0-9]*)" + +class MempoolTest: + l = [] + + def __init__(self): + pass + + # sort a test case list + def sort(self, x, y): + for t in [ "cache", "cores", "n_get_bulk", "n_put_bulk", + "n_keep", "rate" ]: + if x[t] > y[t]: + return 1 + if x[t] < y[t]: + return -1 + return 0 + + # add a test case + def add(self, **args): + self.l.append(args) + + # get an ordered list matching parameters + # ex: r.get(enq_core=1, deq_core=1) + def get(self, **args): + retlist = [] + for t in self.l: + add_it = 1 + for a in args: + if args[a] != t[a]: + add_it = 0 + break + if add_it: + retlist.append(t) + retlist.sort(cmp=self.sort) + return retlist + + # return an ordered list of all values for this param or param list + # ex: r.get_value_list("enq_core") + def get_value_list(self, param): + retlist = [] + if type(param) is not list: + param = [param] + for t in self.l: + entry = [] + for p in param: + entry.append(t[p]) + if len(entry) == 1: + entry = entry[0] + else: + entry = tuple(entry) + if not entry in retlist: + retlist.append(entry) + retlist.sort() + return retlist + +# read the file and return a MempoolTest object containing all data +def read_data_from_file(filename): + + mempool_test = MempoolTest() + + # parse the file: it produces a list of dict containing the data for + # each test case (each dict in the list corresponds to a line) + f = open(filename) + while True: + l = f.readline() + + if l == "": + break + + regexp = "mempool_autotest " + regexp += "cache=%s cores=%s "%(INT, INT) + regexp += "n_get_bulk=%s n_put_bulk=%s "%(INT, INT) + regexp += "n_keep=%s rate_persec=%s"%(INT, INT) + m = re.match(regexp, l) + if m == None: + continue + + mempool_test.add(cache = int(m.groups()[0]), + cores = int(m.groups()[1]), + n_get_bulk = int(m.groups()[2]), + n_put_bulk = int(m.groups()[3]), + n_keep = int(m.groups()[4]), + rate = int(m.groups()[5])) + + f.close() + return mempool_test + +def millions(x, pos): + return '%1.1fM' % (x*1e-6) + +# graph one, with specific parameters -> generate a .svg file +def graph_one(str, mempool_test, cache, cores, n_keep): + filename = "mempool_%d_%d_%d.svg"%(cache, cores, n_keep) + + n_get_bulk_list = mempool_test.get_value_list("n_get_bulk") + N_n_get_bulk = len(n_get_bulk_list) + get_names = map(lambda x:"get=%d"%x, n_get_bulk_list) + + n_put_bulk_list = mempool_test.get_value_list("n_put_bulk") + N_n_put_bulk = len(n_put_bulk_list) + put_names = map(lambda x:"put=%d"%x, n_put_bulk_list) + + N = N_n_get_bulk * (N_n_put_bulk + 1) + rates = [] + + colors = [] + for n_get_bulk in mempool_test.get_value_list("n_get_bulk"): + col = 0. + for n_put_bulk in mempool_test.get_value_list("n_put_bulk"): + col += 0.9 / len(mempool_test.get_value_list("n_put_bulk")) + r = mempool_test.get(cache=cache, cores=cores, + n_get_bulk=n_get_bulk, + n_put_bulk=n_put_bulk, n_keep=n_keep) + if len(r) != 0: + r = r[0]["rate"] + rates.append(r) + colors.append((1. - col, 0.2, col, 1.)) # rgba + + rates.append(0) + colors.append((0.,0.,0.,0.)) + + ind = np.arange(N) # the x locations for the groups + width = 1 # the width of the bars: can also be len(x) sequence + + + formatter = FuncFormatter(millions) + fig = plt.figure() + p = plt.bar(ind, tuple(rates), width, color=tuple(colors)) + fig.axes[0].yaxis.set_major_formatter(formatter) + + plt.ylabel('Obj/sec') + #plt.ylim(0, 400000000.) + title = "Mempool autotest \"%s\"\n"%(str) + title += "cache=%d, core(s)=%d, n_keep=%d"%(cache, cores, n_keep) + plt.title(title) + ind_names = np.arange(N_n_get_bulk) * (N_n_put_bulk+1) + (N_n_put_bulk+1) / 2 + plt.xticks(ind_names, tuple(get_names)) + plt.legend(tuple([p[i] for i in range(N_n_put_bulk)]), tuple(put_names), + loc="upper left") + plt.savefig(filename) + +if len(sys.argv) != 3: + print "usage: graph_mempool.py file title" + sys.exit(1) + +mempool_test = read_data_from_file(sys.argv[1]) + +for cache, cores, n_keep in mempool_test.get_value_list(["cache", "cores", + "n_keep"]): + graph_one(sys.argv[2], mempool_test, cache, cores, n_keep) diff --git a/app/test/graph_ring.py b/app/test/graph_ring.py new file mode 100755 index 0000000000..02c4228016 --- /dev/null +++ b/app/test/graph_ring.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +import sys, re +import numpy as np +import matplotlib +matplotlib.use('Agg') # we don't want to use X11 +import matplotlib.pyplot as plt +from matplotlib.ticker import FuncFormatter + +INT = "([-+]?[0-9][0-9]*)" + +class RingTest: + l = [] + + def __init__(self): + pass + + # sort a test case list + def sort(self, x, y): + for t in [ "enq_core", "deq_core", "enq_bulk", "deq_bulk", "rate" ]: + if x[t] > y[t]: + return 1 + if x[t] < y[t]: + return -1 + return 0 + + # add a test case + def add(self, **args): + self.l.append(args) + + # get an ordered list matching parameters + # ex: r.get(enq_core=1, deq_core=1) + def get(self, **args): + retlist = [] + for t in self.l: + add_it = 1 + for a in args: + if args[a] != t[a]: + add_it = 0 + break + if add_it: + retlist.append(t) + retlist.sort(cmp=self.sort) + return retlist + + # return an ordered list of all values for this param or param list + # ex: r.get_value_list("enq_core") + def get_value_list(self, param): + retlist = [] + if type(param) is not list: + param = [param] + for t in self.l: + entry = [] + for p in param: + entry.append(t[p]) + if len(entry) == 1: + entry = entry[0] + else: + entry = tuple(entry) + if not entry in retlist: + retlist.append(entry) + retlist.sort() + return retlist + +# read the file and return a RingTest object containing all data +def read_data_from_file(filename): + + ring_test = RingTest() + + # parse the file: it produces a list of dict containing the data for + # each test case (each dict in the list corresponds to a line) + f = open(filename) + while True: + l = f.readline() + + if l == "": + break + + regexp = "ring_autotest " + regexp += "e/d_core=%s,%s e/d_bulk=%s,%s "%(INT, INT, INT, INT) + regexp += "sp=%s sc=%s "%(INT, INT) + regexp += "rate_persec=%s"%(INT) + m = re.match(regexp, l) + if m == None: + continue + + ring_test.add(enq_core = int(m.groups()[0]), + deq_core = int(m.groups()[1]), + enq_bulk = int(m.groups()[2]), + deq_bulk = int(m.groups()[3]), + sp = int(m.groups()[4]), + sc = int(m.groups()[5]), + rate = int(m.groups()[6])) + + f.close() + return ring_test + +def millions(x, pos): + return '%1.1fM' % (x*1e-6) + +# graph one, with specific parameters -> generate a .svg file +def graph_one(str, ring_test, enq_core, deq_core, sp, sc): + filename = "ring_%d_%d"%(enq_core, deq_core) + if sp: + sp_str = "sp" + else: + sp_str = "mp" + if sc: + sc_str = "sc" + else: + sc_str = "mc" + filename += "_%s_%s.svg"%(sp_str, sc_str) + + + enq_bulk_list = ring_test.get_value_list("enq_bulk") + N_enq_bulk = len(enq_bulk_list) + enq_names = map(lambda x:"enq=%d"%x, enq_bulk_list) + + deq_bulk_list = ring_test.get_value_list("deq_bulk") + N_deq_bulk = len(deq_bulk_list) + deq_names = map(lambda x:"deq=%d"%x, deq_bulk_list) + + N = N_enq_bulk * (N_deq_bulk + 1) + rates = [] + + colors = [] + for enq_bulk in ring_test.get_value_list("enq_bulk"): + col = 0. + for deq_bulk in ring_test.get_value_list("deq_bulk"): + col += 0.9 / len(ring_test.get_value_list("deq_bulk")) + r = ring_test.get(enq_core=enq_core, deq_core=deq_core, + enq_bulk=enq_bulk, deq_bulk=deq_bulk, + sp=sp, sc=sc) + r = r[0]["rate"] + rates.append(r) + colors.append((1. - col, 0.2, col, 1.)) # rgba + + rates.append(0) + colors.append((0.,0.,0.,0.)) + + ind = np.arange(N) # the x locations for the groups + width = 1 # the width of the bars: can also be len(x) sequence + + + formatter = FuncFormatter(millions) + fig = plt.figure() + p = plt.bar(ind, tuple(rates), width, color=tuple(colors)) + fig.axes[0].yaxis.set_major_formatter(formatter) + + plt.ylabel('Obj/sec') + #plt.ylim(0, 400000000.) + plt.title("Ring autotest \"%s\"\nenq core(s)=%d, deq core(s)=%d, %s, %s"\ + %(str, enq_core, deq_core, sp_str, sc_str)) + ind_names = np.arange(N_enq_bulk) * (N_deq_bulk+1) + (N_deq_bulk+1) / 2 + plt.xticks(ind_names, tuple(enq_names)) + plt.legend(tuple([p[i] for i in range(N_deq_bulk)]), tuple(deq_names), + loc="upper left") + plt.savefig(filename) + +if len(sys.argv) != 3: + print "usage: graph_ring.py file title" + sys.exit(1) + +ring_test = read_data_from_file(sys.argv[1]) + +for enq_core, deq_core, sp, sc in \ + ring_test.get_value_list(["enq_core", "deq_core", "sp", "sc"]): + graph_one(sys.argv[2], ring_test, enq_core, deq_core, sp, sc) diff --git a/app/test/process.h b/app/test/process.h new file mode 100644 index 0000000000..0dbc89814a --- /dev/null +++ b/app/test/process.h @@ -0,0 +1,89 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _PROCESS_H_ +#define _PROCESS_H_ + +#ifndef RTE_EXEC_ENV_BAREMETAL + +/* + * launches a second copy of the test process using the given argv parameters, + * which should include argv[0] as the process name. To identify in the + * subprocess the source of the call, the env_value parameter is set in the + * environment as $RTE_TEST + */ +static inline int +process_dup(const char *const argv[], int numargs, const char *env_value) +{ + char *argv_cpy[numargs + 1]; + int i, fd, status; + char path[32]; + + pid_t pid = fork(); + if (pid < 0) + return -1; + else if (pid == 0) { + /* make a copy of the arguments to be passed to exec */ + for (i = 0; i < numargs; i++) + argv_cpy[i] = strdup(argv[i]); + argv_cpy[i] = NULL; + + /* close all open file descriptors, check /proc/self/fd to only + * call close on open fds. Exclude fds 0, 1 and 2*/ + for (fd = getdtablesize(); fd > 2; fd-- ) { + rte_snprintf(path, sizeof(path), "/proc/self/fd/%d", fd); + if (access(path, F_OK) == 0) + close(fd); + } + printf("Running binary with argv[]:"); + for (i = 0; i < numargs; i++) + printf("'%s' ", argv_cpy[i]); + printf("\n"); + + /* set the environment variable */ + if (setenv(RECURSIVE_ENV_VAR, env_value, 1) != 0) + rte_panic("Cannot export environment variable\n"); + if (execv("/proc/self/exe", argv_cpy) < 0) + rte_panic("Cannot exec\n"); + } + /* parent process does a wait */ + while (wait(&status) != pid) + ; + return status; +} + +#endif /* not baremetal */ + +#endif /* _PROCESS_H_ */ diff --git a/app/test/test.c b/app/test/test.c new file mode 100644 index 0000000000..f98656c47b --- /dev/null +++ b/app/test/test.c @@ -0,0 +1,153 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "test.h" + +const char *prgname; /* to be set to argv[0] */ + +#ifndef RTE_EXEC_ENV_BAREMETAL +static const char *recursive_call; /* used in linuxapp for MP and other tests */ + +static int +no_action(void){ return 0; } + +static int +do_recursive_call(void) +{ + unsigned i; + struct { + const char *env_var; + int (*action_fn)(void); + } actions[] = { + { "run_secondary_instances", test_mp_secondary }, + { "test_missing_c_flag", no_action }, + { "test_missing_n_flag", no_action }, + { "test_no_hpet_flag", no_action }, + { "test_invalid_b_flag", no_action }, + { "test_invalid_r_flag", no_action }, + { "test_misc_flags", no_action }, + }; + + if (recursive_call == NULL) + return -1; + for (i = 0; i < sizeof(actions)/sizeof(actions[0]); i++) { + if (strcmp(actions[i].env_var, recursive_call) == 0) + return (actions[i].action_fn)(); + } + return -1; +} +#endif + +void +test_hexdump(const char *title, const void *buf, unsigned int len) +{ + unsigned int i, out, ofs; + const unsigned char *data = buf; +#define LINE_LEN 80 + char line[LINE_LEN]; /* space needed 8+16*3+3+16 == 75 */ + + printf("%s at [%p], len=%u\n", title, data, len); + ofs = 0; + while (ofs < len) { + /* format 1 line in the buffer, then use printf to print them */ + out = rte_snprintf(line, LINE_LEN, "%08X", ofs); + for (i = 0; ofs+i < len && i < 16; i++) + out += rte_snprintf(line+out, LINE_LEN - out, " %02X", + data[ofs+i]&0xff); + for(; i <= 16; i++) + out += rte_snprintf(line+out, LINE_LEN - out, " "); + for(i = 0; ofs < len && i < 16; i++, ofs++) { + unsigned char c = data[ofs]; + if (!isascii(c) || !isprint(c)) + c = '.'; + out += rte_snprintf(line+out, LINE_LEN - out, "%c", c); + } + printf("%s\n", line); + } +} + +int +main(int argc, char **argv) +{ + struct cmdline *cl; + int ret; + + ret = rte_eal_init(argc, argv); + if (ret < 0) + return -1; + + rte_timer_subsystem_init(); + + argc -= ret; + argv += ret; + + prgname = argv[0]; + +#ifndef RTE_EXEC_ENV_BAREMETAL + if ((recursive_call = getenv(RECURSIVE_ENV_VAR)) != NULL) + return do_recursive_call(); +#endif + + cl = cmdline_stdin_new(main_ctx, "RTE>>"); + if (cl == NULL) { + return -1; + } + cmdline_interact(cl); + cmdline_stdin_exit(cl); + + return 0; +} diff --git a/app/test/test.h b/app/test/test.h new file mode 100644 index 0000000000..3c927d2830 --- /dev/null +++ b/app/test/test.h @@ -0,0 +1,85 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _TEST_H_ +#define _TEST_H_ + +/* icc on baremetal gives us troubles with function named 'main' */ +#ifdef RTE_EXEC_ENV_BAREMETAL +#define main _main +#endif + +#define RECURSIVE_ENV_VAR "RTE_TEST_RECURSIVE" + +extern const char *prgname; + +extern cmdline_parse_ctx_t main_ctx[]; + +void test_hexdump(const char *title, const void *buf, unsigned int len); + +int main(int argc, char **argv); + +int test_pci(void); +int test_memory(void); +int test_per_lcore(void); +int test_spinlock(void); +int test_rwlock(void); +int test_atomic(void); +int test_byteorder(void); +int test_prefetch(void); +int test_cycles(void); +int test_logs(void); +int test_memzone(void); +int test_ring(void); +int test_mempool(void); +int test_mbuf(void); +int test_timer(void); +int test_malloc(void); +int test_memcpy(void); +int test_hash(void); +int test_lpm(void); +int test_debug(void); +int test_errno(void); +int test_tailq(void); +int test_string_fns(void); +int test_mp_secondary(void); +int test_cpuflags(void); +int test_eal_flags(void); +int test_alarm(void); +int test_interrupt(void); +int test_version(void); +int test_pci_run; + +#endif diff --git a/app/test/test_alarm.c b/app/test/test_alarm.c new file mode 100644 index 0000000000..5e36a3ddcf --- /dev/null +++ b/app/test/test_alarm.c @@ -0,0 +1,258 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "test.h" + +#define US_PER_MS 1000 + +#define RTE_TEST_ALARM_TIMEOUT 3000 /* ms */ +#define RTE_TEST_CHECK_PERIOD 1000 /* ms */ + +static volatile int flag; + +static void +test_alarm_callback(void *cb_arg) +{ + flag = 1; + printf("Callback setting flag - OK. [cb_arg = %p]\n", cb_arg); +} + +static rte_atomic32_t cb_count; + +static void +test_multi_cb(void *arg) +{ + rte_atomic32_inc(&cb_count); + printf("In %s - arg = %p\n", __func__, arg); +} + +static volatile int recursive_error = 0; + +static void +test_remove_in_callback(void *arg) +{ + printf("In %s - arg = %p\n", __func__, arg); + if (rte_eal_alarm_cancel(test_remove_in_callback, arg) || + rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1)) { + printf("Error - cancelling callback from within function succeeded!\n"); + recursive_error = 1; + } + flag = (int)((uintptr_t)arg); +} + +static volatile int flag_2; + +static void +test_remove_in_callback_2(void *arg) +{ + if (rte_eal_alarm_cancel(test_remove_in_callback_2, arg) || rte_eal_alarm_cancel(test_remove_in_callback_2, (void *)-1)) { + printf("Error - cancelling callback of test_remove_in_callback_2\n"); + return; + } + flag_2 = 1; +} + +static int +test_multi_alarms(void) +{ + int rm_count = 0; + cb_count.cnt = 0; + + printf("Expect 6 callbacks in order...\n"); + /* add two alarms in order */ + rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1); + rte_eal_alarm_set(2000 * US_PER_MS, test_multi_cb, (void *)2); + + /* now add in reverse order */ + rte_eal_alarm_set(6000 * US_PER_MS, test_multi_cb, (void *)6); + rte_eal_alarm_set(5000 * US_PER_MS, test_multi_cb, (void *)5); + rte_eal_alarm_set(4000 * US_PER_MS, test_multi_cb, (void *)4); + rte_eal_alarm_set(3000 * US_PER_MS, test_multi_cb, (void *)3); + + /* wait for expiry */ + rte_delay_ms(6500); + if (cb_count.cnt != 6) { + printf("Missing callbacks\n"); + /* remove any callbacks that might remain */ + rte_eal_alarm_cancel(test_multi_cb, (void *)-1); + return -1; + } + + cb_count.cnt = 0; + printf("Expect only callbacks with args 1 and 3...\n"); + /* Add 3 flags, then delete one */ + rte_eal_alarm_set(3000 * US_PER_MS, test_multi_cb, (void *)3); + rte_eal_alarm_set(2000 * US_PER_MS, test_multi_cb, (void *)2); + rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1); + rm_count = rte_eal_alarm_cancel(test_multi_cb, (void *)2); + + rte_delay_ms(3500); + if (cb_count.cnt != 2 || rm_count != 1) { + printf("Error: invalid flags count or alarm removal failure" + " - flags value = %d, expected = %d\n", cb_count.cnt, 2); + /* remove any callbacks that might remain */ + rte_eal_alarm_cancel(test_multi_cb, (void *)-1); + return -1; + } + + printf("Testing adding and then removing multiple alarms\n"); + /* finally test that no callbacks are called if we delete them all*/ + rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1); + rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)2); + rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)3); + rm_count = rte_eal_alarm_cancel(test_alarm_callback, (void *)-1); + if (rm_count != 0) { + printf("Error removing non-existant alarm succeeded\n"); + rte_eal_alarm_cancel(test_multi_cb, (void *) -1); + return -1; + } + rm_count = rte_eal_alarm_cancel(test_multi_cb, (void *) -1); + if (rm_count != 3) { + printf("Error removing all pending alarm callbacks\n"); + return -1; + } + + /* Test that we cannot cancel an alarm from within the callback itself + * Also test that we can cancel head-of-line callbacks ok.*/ + flag = 0; + recursive_error = 0; + rte_eal_alarm_set(1000 * US_PER_MS, test_remove_in_callback, (void *)1); + rte_eal_alarm_set(2000 * US_PER_MS, test_remove_in_callback, (void *)2); + rm_count = rte_eal_alarm_cancel(test_remove_in_callback, (void *)1); + if (rm_count != 1) { + printf("Error cancelling head-of-list callback\n"); + return -1; + } + rte_delay_ms(1500); + if (flag != 0) { + printf("Error, cancelling head-of-list leads to premature callback\n"); + return -1; + } + rte_delay_ms(1000); + if (flag != 2) { + printf("Error - expected callback not called\n"); + rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1); + return -1; + } + if (recursive_error == 1) + return -1; + + /* Check if it can cancel all for the same callback */ + printf("Testing canceling all for the same callback\n"); + flag_2 = 0; + rte_eal_alarm_set(1000 * US_PER_MS, test_remove_in_callback, (void *)1); + rte_eal_alarm_set(2000 * US_PER_MS, test_remove_in_callback_2, (void *)2); + rte_eal_alarm_set(3000 * US_PER_MS, test_remove_in_callback_2, (void *)3); + rte_eal_alarm_set(4000 * US_PER_MS, test_remove_in_callback, (void *)4); + rm_count = rte_eal_alarm_cancel(test_remove_in_callback_2, (void *)-1); + if (rm_count != 2) { + printf("Error, cannot cancel all for the same callback\n"); + return -1; + } + rm_count = rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1); + if (rm_count != 2) { + printf("Error, cannot cancel all for the same callback\n"); + return -1; + } + + return 0; +} + +int +test_alarm(void) +{ + int count = 0; + + /* check if the callback will be called */ + printf("check if the callback will be called\n"); + flag = 0; + if (rte_eal_alarm_set(RTE_TEST_ALARM_TIMEOUT * US_PER_MS, + test_alarm_callback, NULL) < 0) { + printf("fail to set alarm callback\n"); + return -1; + } + while (flag == 0 && count ++ < 6) + rte_delay_ms(RTE_TEST_CHECK_PERIOD); + + if (flag == 0){ + printf("Callback not called\n"); + return -1; + } + + /* check if it will fail to set alarm with wrong us value */ + printf("check if it will fail to set alarm with wrong ms values\n"); + if (rte_eal_alarm_set(0, test_alarm_callback, + NULL) >= 0) { + printf("should not be successful with 0 us value\n"); + return -1; + } + if (rte_eal_alarm_set(UINT64_MAX - 1, test_alarm_callback, + NULL) >= 0) { + printf("should not be successful with (UINT64_MAX-1) us value\n"); + return -1; + } + + /* check if it will fail to set alarm with null callback parameter */ + printf("check if it will fail to set alarm with null callback parameter\n"); + if (rte_eal_alarm_set(RTE_TEST_ALARM_TIMEOUT, NULL, NULL) >= 0) { + printf("should not be successful to set alarm with null callback parameter\n"); + return -1; + } + + /* check if it will fail to remove alarm with null callback parameter */ + printf("check if it will fail to remove alarm with null callback parameter\n"); + if (rte_eal_alarm_cancel(NULL, NULL) == 0) { + printf("should not be successful to remove alarm with null callback parameter"); + return -1; + } + + if (test_multi_alarms() != 0) + return -1; + + return 0; +} + diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c new file mode 100644 index 0000000000..b64f361744 --- /dev/null +++ b/app/test/test_atomic.c @@ -0,0 +1,381 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +/* + * Atomic Variables + * ================ + * + * - The main test function performs three subtests. The first test + * checks that the usual inc/dec/add/sub functions are working + * correctly: + * + * - Initialize 16-bit, 32-bit and 64-bit atomic variables to specific + * values. + * + * - These variables are incremented and decremented on each core at + * the same time in ``test_atomic_usual()``. + * + * - The function checks that once all lcores finish their function, + * the value of the atomic variables are still the same. + * + * - The second test verifies the behavior of "test and set" functions. + * + * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero. + * + * - Invoke ``test_atomic_tas()`` on each lcore: before doing anything + * else. The cores are waiting a synchro using ``while + * (rte_atomic32_read(&val) == 0)`` which is triggered by the main test + * function. Then all cores do a + * ``rte_atomicXX_test_and_set()`` at the same time. If it is successful, + * it increments another atomic counter. + * + * - The main function checks that the atomic counter was incremented + * twice only (one for 16-bit, one for 32-bit and one for 64-bit values). + * + * - Test "add/sub and return" + * + * - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero. + * + * - Invoke ``test_atomic_addsub_return()`` on each lcore. Before doing + * anything else, the cores are waiting a synchro. Each lcore does + * this operation several times:: + * + * tmp = rte_atomicXX_add_return(&a, 1); + * atomic_add(&count, tmp); + * tmp = rte_atomicXX_sub_return(&a, 1); + * atomic_sub(&count, tmp+1); + * + * - At the end of the test, the *count* value must be 0. + */ + +#define NUM_ATOMIC_TYPES 3 + +#define N 10000 + +static rte_atomic16_t a16; +static rte_atomic32_t a32; +static rte_atomic64_t a64; +static rte_atomic32_t count; +static rte_atomic32_t synchro; + +static int +test_atomic_usual(__attribute__((unused)) void *arg) +{ + unsigned i; + + while (rte_atomic32_read(&synchro) == 0) + ; + + for (i = 0; i < N; i++) + rte_atomic16_inc(&a16); + for (i = 0; i < N; i++) + rte_atomic16_dec(&a16); + for (i = 0; i < (N / 5); i++) + rte_atomic16_add(&a16, 5); + for (i = 0; i < (N / 5); i++) + rte_atomic16_sub(&a16, 5); + + for (i = 0; i < N; i++) + rte_atomic32_inc(&a32); + for (i = 0; i < N; i++) + rte_atomic32_dec(&a32); + for (i = 0; i < (N / 5); i++) + rte_atomic32_add(&a32, 5); + for (i = 0; i < (N / 5); i++) + rte_atomic32_sub(&a32, 5); + + for (i = 0; i < N; i++) + rte_atomic64_inc(&a64); + for (i = 0; i < N; i++) + rte_atomic64_dec(&a64); + for (i = 0; i < (N / 5); i++) + rte_atomic64_add(&a64, 5); + for (i = 0; i < (N / 5); i++) + rte_atomic64_sub(&a64, 5); + + return 0; +} + +static int +test_atomic_tas(__attribute__((unused)) void *arg) +{ + while (rte_atomic32_read(&synchro) == 0) + ; + + if (rte_atomic16_test_and_set(&a16)) + rte_atomic32_inc(&count); + if (rte_atomic32_test_and_set(&a32)) + rte_atomic32_inc(&count); + if (rte_atomic64_test_and_set(&a64)) + rte_atomic32_inc(&count); + + return 0; +} + +static int +test_atomic_addsub_and_return(__attribute__((unused)) void *arg) +{ + uint32_t tmp16; + uint32_t tmp32; + uint64_t tmp64; + unsigned i; + + while (rte_atomic32_read(&synchro) == 0) + ; + + for (i = 0; i < N; i++) { + tmp16 = rte_atomic16_add_return(&a16, 1); + rte_atomic32_add(&count, tmp16); + + tmp16 = rte_atomic16_sub_return(&a16, 1); + rte_atomic32_sub(&count, tmp16+1); + + tmp32 = rte_atomic32_add_return(&a32, 1); + rte_atomic32_add(&count, tmp32); + + tmp32 = rte_atomic32_sub_return(&a32, 1); + rte_atomic32_sub(&count, tmp32+1); + + tmp64 = rte_atomic64_add_return(&a64, 1); + rte_atomic32_add(&count, tmp64); + + tmp64 = rte_atomic64_sub_return(&a64, 1); + rte_atomic32_sub(&count, tmp64+1); + } + + return 0; +} + +/* + * rte_atomic32_inc_and_test() would increase a 32 bits counter by one and then + * test if that counter is equal to 0. It would return true if the counter is 0 + * and false if the counter is not 0. rte_atomic64_inc_and_test() could do the + * same thing but for a 64 bits counter. + * Here checks that if the 32/64 bits counter is equal to 0 after being atomically + * increased by one. If it is, increase the variable of "count" by one which would + * be checked as the result later. + * + */ +static int +test_atomic_inc_and_test(__attribute__((unused)) void *arg) +{ + while (rte_atomic32_read(&synchro) == 0) + ; + + if (rte_atomic16_inc_and_test(&a16)) { + rte_atomic32_inc(&count); + } + if (rte_atomic32_inc_and_test(&a32)) { + rte_atomic32_inc(&count); + } + if (rte_atomic64_inc_and_test(&a64)) { + rte_atomic32_inc(&count); + } + + return 0; +} + +/* + * rte_atomicXX_dec_and_test() should decrease a 32 bits counter by one and then + * test if that counter is equal to 0. It should return true if the counter is 0 + * and false if the counter is not 0. + * This test checks if the counter is equal to 0 after being atomically + * decreased by one. If it is, increase the value of "count" by one which is to + * be checked as the result later. + */ +static int +test_atomic_dec_and_test(__attribute__((unused)) void *arg) +{ + while (rte_atomic32_read(&synchro) == 0) + ; + + if (rte_atomic16_dec_and_test(&a16)) + rte_atomic32_inc(&count); + + if (rte_atomic32_dec_and_test(&a32)) + rte_atomic32_inc(&count); + + if (rte_atomic64_dec_and_test(&a64)) + rte_atomic32_inc(&count); + + return 0; +} + +int +test_atomic(void) +{ + rte_atomic16_init(&a16); + rte_atomic32_init(&a32); + rte_atomic64_init(&a64); + rte_atomic32_init(&count); + rte_atomic32_init(&synchro); + + rte_atomic16_set(&a16, 1UL << 10); + rte_atomic32_set(&a32, 1UL << 10); + rte_atomic64_set(&a64, 1ULL << 33); + + printf("usual inc/dec/add/sub functions\n"); + + rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MASTER); + rte_atomic32_set(&synchro, 1); + rte_eal_mp_wait_lcore(); + rte_atomic32_set(&synchro, 0); + + if (rte_atomic16_read(&a16) != 1UL << 10) { + printf("Atomic16 usual functions failed\n"); + return -1; + } + + if (rte_atomic32_read(&a32) != 1UL << 10) { + printf("Atomic32 usual functions failed\n"); + return -1; + } + + if (rte_atomic64_read(&a64) != 1ULL << 33) { + printf("Atomic64 usual functions failed\n"); + return -1; + } + + printf("test and set\n"); + + rte_atomic64_set(&a64, 0); + rte_atomic32_set(&a32, 0); + rte_atomic16_set(&a16, 0); + rte_atomic32_set(&count, 0); + rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER); + rte_atomic32_set(&synchro, 1); + rte_eal_mp_wait_lcore(); + rte_atomic32_set(&synchro, 0); + + if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) { + printf("Atomic test and set failed\n"); + return -1; + } + + printf("add/sub and return\n"); + + rte_atomic64_set(&a64, 0); + rte_atomic32_set(&a32, 0); + rte_atomic16_set(&a16, 0); + rte_atomic32_set(&count, 0); + rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL, + SKIP_MASTER); + rte_atomic32_set(&synchro, 1); + rte_eal_mp_wait_lcore(); + rte_atomic32_set(&synchro, 0); + + if (rte_atomic32_read(&count) != 0) { + printf("Atomic add/sub+return failed\n"); + return -1; + } + + /* + * Set a64, a32 and a16 with the same value of minus "number of slave + * lcores", launch all slave lcores to atomically increase by one and + * test them respectively. + * Each lcore should have only one chance to increase a64 by one and + * then check if it is equal to 0, but there should be only one lcore + * that finds that it is 0. It is similar for a32 and a16. + * Then a variable of "count", initialized to zero, is increased by + * one if a64, a32 or a16 is 0 after being increased and tested + * atomically. + * We can check if "count" is finally equal to 3 to see if all slave + * lcores performed "atomic inc and test" right. + */ + printf("inc and test\n"); + + rte_atomic64_clear(&a64); + rte_atomic32_clear(&a32); + rte_atomic16_clear(&a16); + rte_atomic32_clear(&synchro); + rte_atomic32_clear(&count); + + rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count())); + rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count())); + rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count())); + rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MASTER); + rte_atomic32_set(&synchro, 1); + rte_eal_mp_wait_lcore(); + rte_atomic32_clear(&synchro); + + if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) { + printf("Atomic inc and test failed %d\n", count.cnt); + return -1; + } + + /* + * Same as above, but this time we set the values to "number of slave + * lcores", and decrement instead of increment. + */ + printf("dec and test\n"); + + rte_atomic32_clear(&synchro); + rte_atomic32_clear(&count); + + rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1)); + rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1)); + rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1)); + rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MASTER); + rte_atomic32_set(&synchro, 1); + rte_eal_mp_wait_lcore(); + rte_atomic32_clear(&synchro); + + if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) { + printf("Atomic dec and test failed\n"); + return -1; + } + + return 0; +} + diff --git a/app/test/test_byteorder.c b/app/test/test_byteorder.c new file mode 100644 index 0000000000..593e26f393 --- /dev/null +++ b/app/test/test_byteorder.c @@ -0,0 +1,97 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include + +#include + +#include "test.h" + +static volatile uint16_t u16 = 0x1337; +static volatile uint32_t u32 = 0xdeadbeefUL; +static volatile uint64_t u64 = 0xdeadcafebabefaceULL; + +/* + * Byteorder functions + * =================== + * + * - check that optimized byte swap functions are working for each + * size (16, 32, 64 bits) + */ + +int +test_byteorder(void) +{ + uint16_t res_u16; + uint32_t res_u32; + uint64_t res_u64; + + res_u16 = rte_bswap16(u16); + printf("%"PRIx16" -> %"PRIx16"\n", u16, res_u16); + if (res_u16 != 0x3713) + return -1; + + res_u32 = rte_bswap32(u32); + printf("%"PRIx32" -> %"PRIx32"\n", u32, res_u32); + if (res_u32 != 0xefbeaddeUL) + return -1; + + res_u64 = rte_bswap64(u64); + printf("%"PRIx64" -> %"PRIx64"\n", u64, res_u64); + if (res_u64 != 0xcefabebafecaaddeULL) + return -1; + + res_u16 = rte_bswap16(0x1337); + printf("const %"PRIx16" -> %"PRIx16"\n", 0x1337, res_u16); + if (res_u16 != 0x3713) + return -1; + + res_u32 = rte_bswap32(0xdeadbeefUL); + printf("const %"PRIx32" -> %"PRIx32"\n", (uint32_t) 0xdeadbeef, res_u32); + if (res_u32 != 0xefbeaddeUL) + return -1; + + res_u64 = rte_bswap64(0xdeadcafebabefaceULL); + printf("const %"PRIx64" -> %"PRIx64"\n", (uint64_t) 0xdeadcafebabefaceULL, res_u64); + if (res_u64 != 0xcefabebafecaaddeULL) + return -1; + + return 0; +} diff --git a/app/test/test_cpuflags.c b/app/test/test_cpuflags.c new file mode 100644 index 0000000000..d15d6e44bb --- /dev/null +++ b/app/test/test_cpuflags.c @@ -0,0 +1,134 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include +#include +#include +#include +#include + +#include "test.h" + + +/* convenience define */ +#define CHECK_FOR_FLAG(x) \ + result = rte_cpu_get_flag_enabled(x); \ + printf("%s\n", cpu_flag_result(result)); \ + if (result == -ENOENT) \ + return -1; + +/* + * Helper function to display result + */ +static inline const char * +cpu_flag_result(int result) +{ + switch (result) { + case 0: + return "NOT PRESENT"; + case 1: + return "OK"; + default: + return "ERROR"; + } +} + + + +/* + * CPUID test + * =========== + * + * - Check flags from different registers with rte_cpu_get_flag_enabled() + * - Check if register and CPUID functions fail properly + */ + +int +test_cpuflags(void) +{ + int result; + printf("\nChecking for flags from different registers...\n"); + + printf("Check for SSE:\t\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_SSE); + + printf("Check for SSE2:\t\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_SSE2); + + printf("Check for SSE3:\t\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_SSE3); + + printf("Check for SSE4.1:\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_SSE4_1); + + printf("Check for SSE4.2:\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_SSE4_2); + + printf("Check for AVX:\t\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_AVX); + + printf("Check for AVX2:\t\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_AVX2); + + printf("Check for TRBOBST:\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_TRBOBST); + + printf("Check for ENERGY_EFF:\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_ENERGY_EFF); + + printf("Check for LAHF_SAHF:\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_LAHF_SAHF); + + printf("Check for 1GB_PG:\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_1GB_PG); + + printf("Check for INVTSC:\t"); + CHECK_FOR_FLAG(RTE_CPUFLAG_INVTSC); + + + + /* + * Check if invalid data is handled properly + */ + printf("\nCheck for invalid flag:\t"); + result = rte_cpu_get_flag_enabled(RTE_CPUFLAG_NUMFLAGS); + printf("%s\n", cpu_flag_result(result)); + if (result != -ENOENT) + return -1; + + return 0; +} diff --git a/app/test/test_cycles.c b/app/test/test_cycles.c new file mode 100644 index 0000000000..f48040204d --- /dev/null +++ b/app/test/test_cycles.c @@ -0,0 +1,94 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include + +#include + +#include +#include + +#include "test.h" + +#define N 10000 + +/* + * Cycles test + * =========== + * + * - Loop N times and check that the timer alway increments and + * never decrements during this loop. + * + * - Wait one second using rte_usleep() and check that the increment + * of cycles is correct with regard to the frequency of the timer. + */ + +int +test_cycles(void) +{ + unsigned i; + uint64_t start_cycles, cycles, prev_cycles; + uint64_t hz = rte_get_hpet_hz(); + uint64_t max_inc = (hz / 100); /* 10 ms max between 2 reads */ + + /* check that the timer is always incrementing */ + start_cycles = rte_get_hpet_cycles(); + prev_cycles = start_cycles; + for (i=0; i max_inc) { + printf("increment too high or going backwards\n"); + return -1; + } + prev_cycles = cycles; + } + + /* check that waiting 1 second is precise */ + prev_cycles = rte_get_hpet_cycles(); + rte_delay_us(1000000); + cycles = rte_get_hpet_cycles(); + if ((uint64_t)(cycles - prev_cycles) > (hz + max_inc)) { + printf("delay_us is not accurate\n"); + return -1; + } + cycles = rte_get_hpet_cycles(); + if ((uint64_t)(cycles - prev_cycles) < (hz)) { + printf("delay_us is not accurate\n"); + return -1; + } + + return 0; +} diff --git a/app/test/test_debug.c b/app/test/test_debug.c new file mode 100644 index 0000000000..153c562cbe --- /dev/null +++ b/app/test/test_debug.c @@ -0,0 +1,150 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include + +#include +#include + +#include "test.h" + +/* + * Debug test + * ========== + * + * - Call rte_dump_stack() and rte_dump_registers(). The result is not checked + * currently, as the functions are not implemented on baremetal. + * - Check that rte_panic() terminates the program using a non-zero error code. + * (Only implemented on linux, since it requires the fork() system call) + */ + +#ifdef RTE_EXEC_ENV_BAREMETAL + +/* baremetal - don't test rte_panic or rte_exit */ +static int +test_panic(void) +{ + return 0; +} + +static int +test_exit(void) +{ + return 0; +} + +#else + +/* linuxapp - use fork() to test rte_panic() */ +static int +test_panic(void) +{ + int pid; + int status; + + pid = fork(); + + if (pid == 0) + rte_panic("Test Debug\n"); + else if (pid < 0){ + printf("Fork Failed\n"); + return -1; + } + wait(&status); + if(status == 0){ + printf("Child process terminated normally!\n"); + return -1; + } else + printf("Child process terminated as expected - Test passed!\n"); + + return 0; +} + +/* linuxapp - use fork() to test rte_exit() */ +static int +test_exit_val(int exit_val) +{ + int pid; + int status; + + pid = fork(); + + if (pid == 0) + rte_exit(exit_val, __func__); + else if (pid < 0){ + printf("Fork Failed\n"); + return -1; + } + wait(&status); + printf("Child process status: %d\n", status); + if(!WIFEXITED(status) || WEXITSTATUS(status) != (uint8_t)exit_val){ + printf("Child process terminated with incorrect return code!\n"); + return -1; + } + + return 0; +} + +static int +test_exit(void) +{ + int test_vals[] = { 0, 1, 2, 255, -1 }; + unsigned i; + for (i = 0; i < sizeof(test_vals) / sizeof(test_vals[0]); i++){ + if (test_exit_val(test_vals[i]) < 0) + return -1; + } + printf("%s Passed\n", __func__); + return 0; +} + +#endif + +int +test_debug(void) +{ + rte_dump_stack(); + rte_dump_registers(); + if (test_panic() < 0) + return -1; + if (test_exit() < 0) + return -1; + return 0; +} diff --git a/app/test/test_eal_flags.c b/app/test/test_eal_flags.c new file mode 100644 index 0000000000..37b9aaf1dd --- /dev/null +++ b/app/test/test_eal_flags.c @@ -0,0 +1,303 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ +#include + +#include + +#include "test.h" + +#ifndef RTE_EXEC_ENV_BAREMETAL +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "process.h" + +#define mp_flag "--proc-type=secondary" +#define no_hpet "--no-hpet" +#define no_huge "--no-huge" +#define no_shconf "--no-shconf" +#define launch_proc(ARGV) process_dup(ARGV, \ + sizeof(ARGV)/(sizeof(ARGV[0])), __func__) + +/* + * Test that the app doesn't run without invalid blacklist option. + * Final test ensures it does run with valid options as sanity check + */ +static int +test_invalid_b_flag(void) +{ + const char *blinval[][8] = { + {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "error"}, + {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "0:0:0"}, + {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "0:error:0.1"}, + {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "0:0:0.1error"}, + {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "error0:0:0.1"}, + {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "0:0:0.1.2"}, + }; + /* Test with valid blacklist option */ + const char *blval[] = {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "FF:09:0B.3"}; + + int i; + + for (i = 0; i != sizeof (blinval) / sizeof (blinval[0]); i++) { + if (launch_proc(blinval[i]) == 0) { + printf("Error - process did run ok with invalid " + "blacklist parameter\n"); + return -1; + } + } + if (launch_proc(blval) != 0) { + printf("Error - process did not run ok with valid blacklist value\n"); + return -1; + } + return 0; +} + + +/* + * Test that the app doesn't run with invalid -r option. + */ +static int +test_invalid_r_flag(void) +{ + const char *rinval[][8] = { + {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "error"}, + {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "0"}, + {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "-1"}, + {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "17"}, + }; + /* Test with valid blacklist option */ + const char *rval[] = {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "16"}; + + int i; + + for (i = 0; i != sizeof (rinval) / sizeof (rinval[0]); i++) { + if (launch_proc(rinval[i]) == 0) { + printf("Error - process did run ok with invalid " + "-r (rank) parameter\n"); + return -1; + } + } + if (launch_proc(rval) != 0) { + printf("Error - process did not run ok with valid -r (rank) value\n"); + return -1; + } + return 0; +} + +/* + * Test that the app doesn't run without the coremask flag. In all cases + * should give an error and fail to run + */ +static int +test_missing_c_flag(void) +{ + /* -c flag but no coremask value */ + const char *argv1[] = { prgname, mp_flag, "-n", "3", "-c"}; + /* No -c flag at all */ + const char *argv2[] = { prgname, mp_flag, "-n", "3"}; + /* bad coremask value */ + const char *argv3[] = { prgname, mp_flag, "-n", "3", "-c", "error" }; + /* sanity check of tests - valid coremask value */ + const char *argv4[] = { prgname, mp_flag, "-n", "3", "-c", "1" }; + + if (launch_proc(argv1) == 0 + || launch_proc(argv2) == 0 + || launch_proc(argv3) == 0) { + printf("Error - process ran without error when missing -c flag\n"); + return -1; + } + if (launch_proc(argv4) != 0) { + printf("Error - process did not run ok with valid coremask value\n"); + return -1; + } + return 0; +} + +/* + * Test that the app doesn't run without the -n flag. In all cases + * should give an error and fail to run. + * Since -n is not compulsory for MP, we instead use --no-huge and --no-shconf + * flags. + */ +static int +test_missing_n_flag(void) +{ + /* -n flag but no value */ + const char *argv1[] = { prgname, no_huge, no_shconf, "-c", "1", "-n"}; + /* No -n flag at all */ + const char *argv2[] = { prgname, no_huge, no_shconf, "-c", "1"}; + /* bad numeric value */ + const char *argv3[] = { prgname, no_huge, no_shconf, "-c", "1", "-n", "e" }; + /* out-of-range value */ + const char *argv4[] = { prgname, no_huge, no_shconf, "-c", "1", "-n", "9" }; + /* sanity test - check with good value */ + const char *argv5[] = { prgname, no_huge, no_shconf, "-c", "1", "-n", "2" }; + + if (launch_proc(argv1) == 0 + || launch_proc(argv2) == 0 + || launch_proc(argv3) == 0 + || launch_proc(argv4) == 0) { + printf("Error - process ran without error when missing -n flag\n"); + return -1; + } + if (launch_proc(argv5) != 0) { + printf("Error - process did not run ok with valid num-channel value\n"); + return -1; + } + return 0; +} + +/* + * Test that the app runs with HPET, and without HPET + */ +static int +test_no_hpet_flag(void) +{ + /* With --no-hpet */ + const char *argv1[] = {prgname, mp_flag, no_hpet, "-c", "1", "-n", "2"}; + /* Without --no-hpet */ + const char *argv2[] = {prgname, mp_flag, "-c", "1", "-n", "2"}; + + if (launch_proc(argv1) != 0) { + printf("Error - process did not run ok with --no-hpet flag\n"); + return -1; + } + if (launch_proc(argv2) != 0) { + printf("Error - process did not run ok without --no-hpet flag\n"); + return -1; + } + return 0; +} + +static int +test_misc_flags(void) +{ + /* check that some general flags don't prevent things from working. + * All cases, apart from the first, app should run. + * No futher testing of output done. + */ + /* sanity check - failure with invalid option */ + const char *argv0[] = {prgname, mp_flag, "-c", "1", "--invalid-opt"}; + + /* With --no-pci */ + const char *argv1[] = {prgname, mp_flag, "-c", "1", "--no-pci"}; + /* With -v */ + const char *argv2[] = {prgname, mp_flag, "-c", "1", "-v"}; + /* With -m - ignored for secondary processes */ + const char *argv3[] = {prgname, mp_flag, "-c", "1", "-m", "32"}; + + if (launch_proc(argv0) == 0) { + printf("Error - process ran ok with invalid flag\n"); + return -1; + } + if (launch_proc(argv1) != 0) { + printf("Error - process did not run ok with --no-pci flag\n"); + return -1; + } + if (launch_proc(argv2) != 0) { + printf("Error - process did not run ok with -v flag\n"); + return -1; + } + if (launch_proc(argv3) != 0) { + printf("Error - process did not run ok with -m flag\n"); + return -1; + } + return 0; +} + +int +test_eal_flags(void) +{ + int ret = 0; + + ret = test_missing_c_flag(); + if (ret < 0) { + printf("Error in test_missing_c_flag()"); + return ret; + } + + ret = test_missing_n_flag(); + if (ret < 0) { + printf("Error in test_missing_n_flag()"); + return ret; + } + + ret = test_no_hpet_flag(); + if (ret < 0) { + printf("Error in test_no_hpet_flag()"); + return ret; + } + + ret = test_invalid_b_flag(); + if (ret < 0) { + printf("Error in test_invalid_b_flag()"); + return ret; + } + + ret = test_invalid_r_flag(); + if (ret < 0) { + printf("Error in test_invalid_r_flag()"); + return ret; + } + + ret = test_misc_flags(); + if (ret < 0) { + printf("Error in test_misc_flags()"); + return ret; + } + + return ret; +} + +#else +/* Baremetal version + * Multiprocess not applicable, so just return 0 always + */ +int +test_eal_flags(void) +{ + printf("Multi-process not possible for baremetal, cannot test EAL flags\n"); + return 0; +} + +#endif diff --git a/app/test/test_errno.c b/app/test/test_errno.c new file mode 100644 index 0000000000..4233dc1432 --- /dev/null +++ b/app/test/test_errno.c @@ -0,0 +1,110 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "test.h" + +int +test_errno(void) +{ + const char *rte_retval; + const char *libc_retval; + const char unknown_code_result[] = "Unknown error %d"; + char expected_libc_retval[sizeof(unknown_code_result)+3]; + + /* use a small selection of standard errors for testing */ + int std_errs[] = {EAGAIN, EBADF, EACCES, EINTR, EINVAL}; + /* test ALL registered RTE error codes for overlap */ + int rte_errs[] = {E_RTE_SECONDARY, E_RTE_NO_CONFIG, E_RTE_NO_TAILQ}; + unsigned i; + + rte_errno = 0; + if (rte_errno != 0) + return -1; + /* check for standard errors we return the same as libc */ + for (i = 0; i < sizeof(std_errs)/sizeof(std_errs[0]); i++){ + rte_retval = rte_strerror(std_errs[i]); + libc_retval = strerror(std_errs[i]); + printf("rte_strerror: '%s', strerror: '%s'\n", + rte_retval, libc_retval); + if (strcmp(rte_retval, libc_retval) != 0) + return -1; + } + /* for rte-specific errors ensure we return a different string + * and that the string for libc is for an unknown error + */ + for (i = 0; i < sizeof(rte_errs)/sizeof(rte_errs[0]); i++){ + rte_retval = rte_strerror(rte_errs[i]); + libc_retval = strerror(rte_errs[i]); + printf("rte_strerror: '%s', strerror: '%s'\n", + rte_retval, libc_retval); + if (strcmp(rte_retval, libc_retval) == 0) + return -1; + /* generate appropriate error string for unknown error number + * and then check that this is what we got back. If not, we have + * a duplicate error number that conflicts with errno.h */ + rte_snprintf(expected_libc_retval, sizeof(expected_libc_retval), + unknown_code_result, rte_errs[i]); + if (strcmp(expected_libc_retval, libc_retval) != 0){ + printf("Error, duplicate error code %d\n", rte_errs[i]); + return -1; + } + } + + /* ensure that beyond RTE_MAX_ERRNO, we always get an unknown code */ + rte_retval = rte_strerror(RTE_MAX_ERRNO + 1); + libc_retval = strerror(RTE_MAX_ERRNO + 1); + rte_snprintf(expected_libc_retval, sizeof(expected_libc_retval), + unknown_code_result, RTE_MAX_ERRNO + 1); + printf("rte_strerror: '%s', strerror: '%s'\n", + rte_retval, libc_retval); + if ((strcmp(rte_retval, libc_retval) != 0) || + (strcmp(expected_libc_retval, libc_retval) != 0)){ + printf("Failed test for RTE_MAX_ERRNO + 1 value\n"); + return -1; + } + + return 0; +} diff --git a/app/test/test_hash.c b/app/test/test_hash.c new file mode 100644 index 0000000000..5992fa3fa4 --- /dev/null +++ b/app/test/test_hash.c @@ -0,0 +1,1785 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "test.h" + +#ifdef RTE_LIBRTE_HASH + +/* Types of hash table performance test that can be performed */ +enum hash_test_t { + ADD_ON_EMPTY, /*< Add keys to empty table */ + DELETE_ON_EMPTY, /*< Attempt to delete keys from empty table */ + LOOKUP_ON_EMPTY, /*< Attempt to find keys in an empty table */ + ADD_UPDATE, /*< Add/update keys in a full table */ + DELETE, /*< Delete keys from a full table */ + LOOKUP /*< Find keys in a full table */ +}; + +/* Function type for hash table operations. */ +typedef int32_t (*hash_operation)(const struct rte_hash *h, const void *key); + +/* Structure to hold parameters used to run a hash table performance test */ +struct tbl_perf_test_params { + enum hash_test_t test_type; + uint32_t num_iterations; + uint32_t entries; + uint32_t bucket_entries; + uint32_t key_len; + rte_hash_function hash_func; + uint32_t hash_func_init_val; +}; + +#define ITERATIONS 10000 +#define LOCAL_FBK_HASH_ENTRIES_MAX (1 << 15) + +/******************************************************************************* + * Hash table performance test configuration section. + */ +struct tbl_perf_test_params tbl_perf_params[] = +{ +/* Small table, add */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ ADD_ON_EMPTY, 1024, 1024, 1, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 2, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 4, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 8, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 16, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 1, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 2, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 4, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 8, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 16, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 1, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 2, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 4, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 8, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 16, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 1, 64, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 2, 64, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 4, 64, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 8, 64, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 16, 64, rte_jhash, 0}, +/* Small table, update */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ ADD_UPDATE, ITERATIONS, 1024, 1, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 2, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 4, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 8, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 16, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 1, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 2, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 4, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 8, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 16, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 1, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 2, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 4, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 8, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 16, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 1, 64, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 2, 64, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 4, 64, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 8, 64, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 16, 64, rte_jhash, 0}, +/* Small table, lookup */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ LOOKUP, ITERATIONS, 1024, 1, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 2, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 4, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 8, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 16, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 1, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 2, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 4, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 8, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 16, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 1, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 2, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 4, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 8, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 16, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 1, 64, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 2, 64, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 4, 64, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 8, 64, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1024, 16, 64, rte_jhash, 0}, +/* Big table, add */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ ADD_ON_EMPTY, 1048576, 1048576, 1, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 2, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 4, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 8, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 16, 16, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 1, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 2, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 4, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 8, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 16, 32, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 1, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 2, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 4, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 8, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 16, 48, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 1, 64, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 2, 64, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 4, 64, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 8, 64, rte_jhash, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 16, 64, rte_jhash, 0}, +/* Big table, update */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ ADD_UPDATE, ITERATIONS, 1048576, 1, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 2, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 4, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 8, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 16, 16, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 1, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 2, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 4, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 8, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 16, 32, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 1, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 2, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 4, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 8, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 16, 48, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 1, 64, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 2, 64, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 4, 64, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 8, 64, rte_jhash, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 16, 64, rte_jhash, 0}, +/* Big table, lookup */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ LOOKUP, ITERATIONS, 1048576, 1, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 2, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 4, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 8, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 16, 16, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 1, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 2, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 4, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 8, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 16, 32, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 1, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 2, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 4, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 8, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 16, 48, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 1, 64, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 2, 64, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 4, 64, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 8, 64, rte_jhash, 0}, +{ LOOKUP, ITERATIONS, 1048576, 16, 64, rte_jhash, 0}, + +/* Small table, add */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ ADD_ON_EMPTY, 1024, 1024, 1, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 2, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 4, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 8, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 16, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 1, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 2, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 4, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 8, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 16, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 1, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 2, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 4, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 8, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 16, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 1, 64, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 2, 64, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 4, 64, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 8, 64, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1024, 1024, 16, 64, rte_hash_crc, 0}, +/* Small table, update */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ ADD_UPDATE, ITERATIONS, 1024, 1, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 2, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 4, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 8, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 16, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 1, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 2, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 4, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 8, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 16, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 1, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 2, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 4, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 8, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 16, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 1, 64, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 2, 64, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 4, 64, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 8, 64, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1024, 16, 64, rte_hash_crc, 0}, +/* Small table, lookup */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ LOOKUP, ITERATIONS, 1024, 1, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 2, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 4, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 8, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 16, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 1, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 2, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 4, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 8, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 16, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 1, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 2, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 4, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 8, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 16, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 1, 64, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 2, 64, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 4, 64, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 8, 64, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1024, 16, 64, rte_hash_crc, 0}, +/* Big table, add */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ ADD_ON_EMPTY, 1048576, 1048576, 1, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 2, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 4, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 8, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 16, 16, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 1, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 2, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 4, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 8, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 16, 32, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 1, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 2, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 4, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 8, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 16, 48, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 1, 64, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 2, 64, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 4, 64, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 8, 64, rte_hash_crc, 0}, +{ ADD_ON_EMPTY, 1048576, 1048576, 16, 64, rte_hash_crc, 0}, +/* Big table, update */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ ADD_UPDATE, ITERATIONS, 1048576, 1, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 2, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 4, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 8, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 16, 16, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 1, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 2, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 4, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 8, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 16, 32, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 1, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 2, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 4, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 8, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 16, 48, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 1, 64, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 2, 64, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 4, 64, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 8, 64, rte_hash_crc, 0}, +{ ADD_UPDATE, ITERATIONS, 1048576, 16, 64, rte_hash_crc, 0}, +/* Big table, lookup */ +/* Test type | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */ +{ LOOKUP, ITERATIONS, 1048576, 1, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 2, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 4, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 8, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 16, 16, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 1, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 2, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 4, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 8, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 16, 32, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 1, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 2, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 4, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 8, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 16, 48, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 1, 64, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 2, 64, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 4, 64, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 8, 64, rte_hash_crc, 0}, +{ LOOKUP, ITERATIONS, 1048576, 16, 64, rte_hash_crc, 0}, +}; + +/******************************************************************************/ + +/******************************************************************************* + * Hash function performance test configuration section. Each performance test + * will be performed HASHTEST_ITERATIONS times. + * + * The five arrays below control what tests are performed. Every combination + * from the array entries is tested. + */ +#define HASHTEST_ITERATIONS 1000000 + +#ifdef RTE_MACHINE_CPUFLAG_SSE4_2 +static rte_hash_function hashtest_funcs[] = {rte_jhash, rte_hash_crc}; +#else +static rte_hash_function hashtest_funcs[] = {rte_jhash}; +#endif +static uint32_t hashtest_initvals[] = {0}; +static uint32_t hashtest_key_lens[] = {2, 4, 5, 6, 7, 8, 10, 11, 15, 16, 21, 31, 32, 33, 63, 64}; +/******************************************************************************/ + +/* + * Check condition and return an error if true. Assumes that "handle" is the + * name of the hash structure pointer to be freed. + */ +#define RETURN_IF_ERROR(cond, str, ...) do { \ + if (cond) { \ + printf("ERROR line %d: " str "\n", __LINE__, ##__VA_ARGS__); \ + if (handle) rte_hash_free(handle); \ + return -1; \ + } \ +} while(0) + +#define RETURN_IF_ERROR_FBK(cond, str, ...) do { \ + if (cond) { \ + printf("ERROR line %d: " str "\n", __LINE__, ##__VA_ARGS__); \ + if (handle) rte_fbk_hash_free(handle); \ + return -1; \ + } \ +} while(0) + +/* 5-tuple key type */ +struct flow_key { + uint32_t ip_src; + uint32_t ip_dst; + uint16_t port_src; + uint16_t port_dst; + uint8_t proto; +} __attribute__((packed)); + +/* + * Hash function that always returns the same value, to easily test what + * happens when a bucket is full. + */ +static uint32_t pseudo_hash(__attribute__((unused)) const void *keys, + __attribute__((unused)) uint32_t key_len, + __attribute__((unused)) uint32_t init_val) +{ + return 3; +} + +/* + * Print out result of unit test hash operation. + */ +#if defined(UNIT_TEST_HASH_VERBOSE) +static void print_key_info(const char *msg, const struct flow_key *key, + int32_t pos) +{ + uint8_t *p = (uint8_t *)key; + unsigned i; + + printf("%s key:0x", msg); + for (i = 0; i < sizeof(struct flow_key); i++) { + printf("%02X", p[i]); + } + printf(" @ pos %d\n", pos); +} +#else +static void print_key_info(__attribute__((unused)) const char *msg, + __attribute__((unused)) const struct flow_key *key, + __attribute__((unused)) int32_t pos) +{ +} +#endif + +/* Keys used by unit test functions */ +static struct flow_key keys[5] = { { + .ip_src = IPv4(0x03, 0x02, 0x01, 0x00), + .ip_dst = IPv4(0x07, 0x06, 0x05, 0x04), + .port_src = 0x0908, + .port_dst = 0x0b0a, + .proto = 0x0c, +}, { + .ip_src = IPv4(0x13, 0x12, 0x11, 0x10), + .ip_dst = IPv4(0x17, 0x16, 0x15, 0x14), + .port_src = 0x1918, + .port_dst = 0x1b1a, + .proto = 0x1c, +}, { + .ip_src = IPv4(0x23, 0x22, 0x21, 0x20), + .ip_dst = IPv4(0x27, 0x26, 0x25, 0x24), + .port_src = 0x2928, + .port_dst = 0x2b2a, + .proto = 0x2c, +}, { + .ip_src = IPv4(0x33, 0x32, 0x31, 0x30), + .ip_dst = IPv4(0x37, 0x36, 0x35, 0x34), + .port_src = 0x3938, + .port_dst = 0x3b3a, + .proto = 0x3c, +}, { + .ip_src = IPv4(0x43, 0x42, 0x41, 0x40), + .ip_dst = IPv4(0x47, 0x46, 0x45, 0x44), + .port_src = 0x4948, + .port_dst = 0x4b4a, + .proto = 0x4c, +} }; + +/* Parameters used for hash table in unit test functions. Name set later. */ +static struct rte_hash_parameters ut_params = { + .entries = 64, + .bucket_entries = 4, + .key_len = sizeof(struct flow_key), /* 13 */ + .hash_func = rte_jhash, + .hash_func_init_val = 0, + .socket_id = 0, +}; + +/* + * Basic sequence of operations for a single key: + * - add + * - lookup (hit) + * - delete + * - lookup (miss) + */ +static int test_add_delete(void) +{ + struct rte_hash *handle; + int pos0, expectedPos0; + + ut_params.name = "test1"; + handle = rte_hash_create(&ut_params); + RETURN_IF_ERROR(handle == NULL, "hash creation failed"); + + pos0 = rte_hash_add_key(handle, &keys[0]); + print_key_info("Add", &keys[0], pos0); + RETURN_IF_ERROR(pos0 < 0, "failed to add key (pos0=%d)", pos0); + expectedPos0 = pos0; + + pos0 = rte_hash_lookup(handle, &keys[0]); + print_key_info("Lkp", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != expectedPos0, + "failed to find key (pos0=%d)", pos0); + + pos0 = rte_hash_del_key(handle, &keys[0]); + print_key_info("Del", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != expectedPos0, + "failed to delete key (pos0=%d)", pos0); + + pos0 = rte_hash_lookup(handle, &keys[0]); + print_key_info("Lkp", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != -ENOENT, + "fail: found key after deleting! (pos0=%d)", pos0); + + rte_hash_free(handle); + return 0; +} + +/* + * Sequence of operations for a single key: + * - delete: miss + * - add + * - lookup: hit + * - add: update + * - lookup: hit (updated data) + * - delete: hit + * - delete: miss + * - lookup: miss + */ +static int test_add_update_delete(void) +{ + struct rte_hash *handle; + int pos0, expectedPos0; + + ut_params.name = "test2"; + handle = rte_hash_create(&ut_params); + RETURN_IF_ERROR(handle == NULL, "hash creation failed"); + + pos0 = rte_hash_del_key(handle, &keys[0]); + print_key_info("Del", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != -ENOENT, + "fail: found non-existent key (pos0=%d)", pos0); + + pos0 = rte_hash_add_key(handle, &keys[0]); + print_key_info("Add", &keys[0], pos0); + RETURN_IF_ERROR(pos0 < 0, "failed to add key (pos0=%d)", pos0); + expectedPos0 = pos0; + + pos0 = rte_hash_lookup(handle, &keys[0]); + print_key_info("Lkp", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != expectedPos0, + "failed to find key (pos0=%d)", pos0); + + pos0 = rte_hash_add_key(handle, &keys[0]); + print_key_info("Add", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != expectedPos0, + "failed to re-add key (pos0=%d)", pos0); + + pos0 = rte_hash_lookup(handle, &keys[0]); + print_key_info("Lkp", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != expectedPos0, + "failed to find key (pos0=%d)", pos0); + + pos0 = rte_hash_del_key(handle, &keys[0]); + print_key_info("Del", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != expectedPos0, + "failed to delete key (pos0=%d)", pos0); + + pos0 = rte_hash_del_key(handle, &keys[0]); + print_key_info("Del", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != -ENOENT, + "fail: deleted already deleted key (pos0=%d)", pos0); + + pos0 = rte_hash_lookup(handle, &keys[0]); + print_key_info("Lkp", &keys[0], pos0); + RETURN_IF_ERROR(pos0 != -ENOENT, + "fail: found key after deleting! (pos0=%d)", pos0); + + rte_hash_free(handle); + return 0; +} + +/* + * Sequence of operations for find existing hash table + * + * - create table + * - find existing table: hit + * - find non-existing table: miss + * + */ +static int test_hash_find_existing(void) +{ + struct rte_hash *handle = NULL, *result = NULL; + + /* Create hash table. */ + ut_params.name = "hash_find_existing"; + handle = rte_hash_create(&ut_params); + RETURN_IF_ERROR(handle == NULL, "hash creation failed"); + + /* Try to find existing hash table */ + result = rte_hash_find_existing("hash_find_existing"); + RETURN_IF_ERROR(result != handle, "could not find existing hash table"); + + /* Try to find non-existing hash table */ + result = rte_hash_find_existing("hash_find_non_existing"); + RETURN_IF_ERROR(!(result == NULL), "found table that shouldn't exist"); + + /* Cleanup. */ + rte_hash_free(handle); + + return 0; +} + +/* + * Sequence of operations for 5 keys + * - add keys + * - lookup keys: hit + * - add keys (update) + * - lookup keys: hit (updated data) + * - delete keys : hit + * - lookup keys: miss + */ +static int test_five_keys(void) +{ + struct rte_hash *handle; + const void *key_array[5] = {0}; + int pos[5]; + int expected_pos[5]; + unsigned i; + int ret; + + ut_params.name = "test3"; + handle = rte_hash_create(&ut_params); + RETURN_IF_ERROR(handle == NULL, "hash creation failed"); + + /* Add */ + for (i = 0; i < 5; i++) { + pos[i] = rte_hash_add_key(handle, &keys[i]); + print_key_info("Add", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] < 0, + "failed to add key (pos[%u]=%d)", i, pos[i]); + expected_pos[i] = pos[i]; + } + + /* Lookup */ + for(i = 0; i < 5; i++) + key_array[i] = &keys[i]; + + ret = rte_hash_lookup_multi(handle, &key_array[0], 5, (int32_t *)pos); + if(ret == 0) + for(i = 0; i < 5; i++) { + print_key_info("Lkp", key_array[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to find key (pos[%u]=%d)", i, pos[i]); + } + + /* Add - update */ + for (i = 0; i < 5; i++) { + pos[i] = rte_hash_add_key(handle, &keys[i]); + print_key_info("Add", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to add key (pos[%u]=%d)", i, pos[i]); + } + + /* Lookup */ + for (i = 0; i < 5; i++) { + pos[i] = rte_hash_lookup(handle, &keys[i]); + print_key_info("Lkp", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to find key (pos[%u]=%d)", i, pos[i]); + } + + /* Delete */ + for (i = 0; i < 5; i++) { + pos[i] = rte_hash_del_key(handle, &keys[i]); + print_key_info("Del", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to delete key (pos[%u]=%d)", i, pos[i]); + } + + /* Lookup */ + for (i = 0; i < 5; i++) { + pos[i] = rte_hash_lookup(handle, &keys[i]); + print_key_info("Lkp", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != -ENOENT, + "failed to find key (pos[%u]=%d)", i, pos[i]); + } + + rte_hash_free(handle); + + return 0; +} + +/* + * Add keys to the same bucket until bucket full. + * - add 5 keys to the same bucket (hash created with 4 keys per bucket): + * first 4 successful, 5th unsuccessful + * - lookup the 5 keys: 4 hits, 1 miss + * - add the 5 keys again: 4 OK, one error as bucket is full + * - lookup the 5 keys: 4 hits (updated data), 1 miss + * - delete the 5 keys: 5 OK (even if the 5th is not in the table) + * - lookup the 5 keys: 5 misses + * - add the 5th key: OK + * - lookup the 5th key: hit + */ +static int test_full_bucket(void) +{ + struct rte_hash_parameters params_pseudo_hash = { + .name = "test4", + .entries = 64, + .bucket_entries = 4, + .key_len = sizeof(struct flow_key), /* 13 */ + .hash_func = pseudo_hash, + .hash_func_init_val = 0, + .socket_id = 0, + }; + struct rte_hash *handle; + int pos[5]; + int expected_pos[5]; + unsigned i; + + handle = rte_hash_create(¶ms_pseudo_hash); + RETURN_IF_ERROR(handle == NULL, "hash creation failed"); + + /* Fill bucket*/ + for (i = 0; i < 4; i++) { + pos[i] = rte_hash_add_key(handle, &keys[i]); + print_key_info("Add", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] < 0, + "failed to add key (pos[%u]=%d)", i, pos[i]); + expected_pos[i] = pos[i]; + } + /* This shouldn't work because the bucket is full */ + pos[4] = rte_hash_add_key(handle, &keys[4]); + print_key_info("Add", &keys[4], pos[4]); + RETURN_IF_ERROR(pos[4] != -ENOSPC, + "fail: added key to full bucket (pos[4]=%d)", pos[4]); + + /* Lookup */ + for (i = 0; i < 4; i++) { + pos[i] = rte_hash_lookup(handle, &keys[i]); + print_key_info("Lkp", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to find key (pos[%u]=%d)", i, pos[i]); + } + pos[4] = rte_hash_lookup(handle, &keys[4]); + print_key_info("Lkp", &keys[4], pos[4]); + RETURN_IF_ERROR(pos[4] != -ENOENT, + "fail: found non-existent key (pos[4]=%d)", pos[4]); + + /* Add - update */ + for (i = 0; i < 4; i++) { + pos[i] = rte_hash_add_key(handle, &keys[i]); + print_key_info("Add", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to add key (pos[%u]=%d)", i, pos[i]); + } + pos[4] = rte_hash_add_key(handle, &keys[4]); + print_key_info("Add", &keys[4], pos[4]); + RETURN_IF_ERROR(pos[4] != -ENOSPC, + "fail: added key to full bucket (pos[4]=%d)", pos[4]); + + /* Lookup */ + for (i = 0; i < 4; i++) { + pos[i] = rte_hash_lookup(handle, &keys[i]); + print_key_info("Lkp", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to find key (pos[%u]=%d)", i, pos[i]); + } + pos[4] = rte_hash_lookup(handle, &keys[4]); + print_key_info("Lkp", &keys[4], pos[4]); + RETURN_IF_ERROR(pos[4] != -ENOENT, + "fail: found non-existent key (pos[4]=%d)", pos[4]); + + /* Delete 1 key, check other keys are still found */ + pos[1] = rte_hash_del_key(handle, &keys[1]); + print_key_info("Del", &keys[1], pos[1]); + RETURN_IF_ERROR(pos[1] != expected_pos[1], + "failed to delete key (pos[1]=%d)", pos[1]); + pos[3] = rte_hash_lookup(handle, &keys[3]); + print_key_info("Lkp", &keys[3], pos[3]); + RETURN_IF_ERROR(pos[3] != expected_pos[3], + "failed lookup after deleting key from same bucket " + "(pos[3]=%d)", pos[3]); + + /* Go back to previous state */ + pos[1] = rte_hash_add_key(handle, &keys[1]); + print_key_info("Add", &keys[1], pos[1]); + expected_pos[1] = pos[1]; + RETURN_IF_ERROR(pos[1] < 0, "failed to add key (pos[1]=%d)", pos[1]); + + /* Delete */ + for (i = 0; i < 4; i++) { + pos[i] = rte_hash_del_key(handle, &keys[i]); + print_key_info("Del", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != expected_pos[i], + "failed to delete key (pos[%u]=%d)", i, pos[i]); + } + pos[4] = rte_hash_del_key(handle, &keys[4]); + print_key_info("Del", &keys[4], pos[4]); + RETURN_IF_ERROR(pos[4] != -ENOENT, + "fail: deleted non-existent key (pos[4]=%d)", pos[4]); + + /* Lookup */ + for (i = 0; i < 4; i++) { + pos[i] = rte_hash_lookup(handle, &keys[i]); + print_key_info("Lkp", &keys[i], pos[i]); + RETURN_IF_ERROR(pos[i] != -ENOENT, + "fail: found non-existent key (pos[%u]=%d)", i, pos[i]); + } + + /* Add and lookup the 5th key */ + pos[4] = rte_hash_add_key(handle, &keys[4]); + print_key_info("Add", &keys[4], pos[4]); + RETURN_IF_ERROR(pos[4] < 0, "failed to add key (pos[4]=%d)", pos[4]); + expected_pos[4] = pos[4]; + pos[4] = rte_hash_lookup(handle, &keys[4]); + print_key_info("Lkp", &keys[4], pos[4]); + RETURN_IF_ERROR(pos[4] != expected_pos[4], + "failed to find key (pos[4]=%d)", pos[4]); + + rte_hash_free(handle); + + /* Cover the NULL case. */ + rte_hash_free(0); + return 0; +} + +/* + * To help print out name of hash functions. + */ +static const char *get_hash_name(rte_hash_function f) +{ + if (f == rte_jhash) + return "jhash"; + + if (f == rte_hash_crc) + return "rte_hash_crc"; + + return "UnknownHash"; +} + +/* + * Find average of array of numbers. + */ +static double +get_avg(const uint32_t *array, uint32_t size) +{ + double sum = 0; + unsigned i; + for (i = 0; i < size; i++) + sum += array[i]; + return sum / (double)size; +} + +/* + * Do a single performance test, of one type of operation. + * + * @param h + * hash table to run test on + * @param func + * function to call (add, delete or lookup function) + * @param avg_occupancy + * The average number of entries in each bucket of the hash table + * @param invalid_pos_count + * The amount of errors (e.g. due to a full bucket). + * @return + * The average number of ticks per hash function call. A negative number + * signifies failure. + */ +static double +run_single_tbl_perf_test(const struct rte_hash *h, hash_operation func, + const struct tbl_perf_test_params *params, double *avg_occupancy, + uint32_t *invalid_pos_count) +{ + uint64_t begin, end, ticks = 0; + uint8_t *key = NULL; + uint32_t *bucket_occupancies = NULL; + uint32_t num_buckets, i, j; + int32_t pos; + + /* Initialise */ + num_buckets = params->entries / params->bucket_entries; + key = (uint8_t *) rte_zmalloc("hash key", + params->key_len * sizeof(uint8_t), 16); + if (key == NULL) + return -1; + + bucket_occupancies = (uint32_t *) rte_zmalloc("bucket occupancies", + num_buckets * sizeof(uint32_t), 16); + if (bucket_occupancies == NULL) { + rte_free(key); + return -1; + } + + ticks = 0; + *invalid_pos_count = 0; + + for (i = 0; i < params->num_iterations; i++) { + /* Prepare inputs for the current iteration */ + for (j = 0; j < params->key_len; j++) + key[j] = (uint8_t) rte_rand(); + + /* Perform operation, and measure time it takes */ + begin = rte_rdtsc(); + pos = func(h, key); + end = rte_rdtsc(); + ticks += end - begin; + + /* Other work per iteration */ + if (pos < 0) + *invalid_pos_count += 1; + else + bucket_occupancies[pos / params->bucket_entries]++; + } + *avg_occupancy = get_avg(bucket_occupancies, num_buckets); + + rte_free(bucket_occupancies); + rte_free(key); + + return (double)ticks / params->num_iterations; +} + +/* + * To help print out what tests are being done. + */ +static const char * +get_tbl_perf_test_desc(enum hash_test_t type) +{ + switch (type){ + case ADD_ON_EMPTY: return "Add on Empty"; + case DELETE_ON_EMPTY: return "Delete on Empty"; + case LOOKUP_ON_EMPTY: return "Lookup on Empty"; + case ADD_UPDATE: return "Add Update"; + case DELETE: return "Delete"; + case LOOKUP: return "Lookup"; + default: return "UNKNOWN"; + } +} + +/* + * Run a hash table performance test based on params. + */ +static int +run_tbl_perf_test(struct tbl_perf_test_params *params) +{ + static unsigned calledCount = 5; + struct rte_hash_parameters hash_params = { + .entries = params->entries, + .bucket_entries = params->bucket_entries, + .key_len = params->key_len, + .hash_func = params->hash_func, + .hash_func_init_val = params->hash_func_init_val, + .socket_id = 0, + }; + struct rte_hash *handle; + double avg_occupancy = 0, ticks = 0; + uint32_t num_iterations, invalid_pos; + char name[RTE_HASH_NAMESIZE]; + char hashname[RTE_HASH_NAMESIZE]; + + rte_snprintf(name, 32, "test%u", calledCount++); + hash_params.name = name; + + handle = rte_hash_create(&hash_params); + RETURN_IF_ERROR(handle == NULL, "hash creation failed"); + + switch (params->test_type){ + case ADD_ON_EMPTY: + ticks = run_single_tbl_perf_test(handle, rte_hash_add_key, + params, &avg_occupancy, &invalid_pos); + break; + case DELETE_ON_EMPTY: + ticks = run_single_tbl_perf_test(handle, rte_hash_del_key, + params, &avg_occupancy, &invalid_pos); + break; + case LOOKUP_ON_EMPTY: + ticks = run_single_tbl_perf_test(handle, rte_hash_lookup, + params, &avg_occupancy, &invalid_pos); + break; + case ADD_UPDATE: + num_iterations = params->num_iterations; + params->num_iterations = params->entries; + run_single_tbl_perf_test(handle, rte_hash_add_key, params, + &avg_occupancy, &invalid_pos); + params->num_iterations = num_iterations; + ticks = run_single_tbl_perf_test(handle, rte_hash_add_key, + params, &avg_occupancy, &invalid_pos); + break; + case DELETE: + num_iterations = params->num_iterations; + params->num_iterations = params->entries; + run_single_tbl_perf_test(handle, rte_hash_add_key, params, + &avg_occupancy, &invalid_pos); + + params->num_iterations = num_iterations; + ticks = run_single_tbl_perf_test(handle, rte_hash_del_key, + params, &avg_occupancy, &invalid_pos); + break; + case LOOKUP: + num_iterations = params->num_iterations; + params->num_iterations = params->entries; + run_single_tbl_perf_test(handle, rte_hash_add_key, params, + &avg_occupancy, &invalid_pos); + + params->num_iterations = num_iterations; + ticks = run_single_tbl_perf_test(handle, rte_hash_lookup, + params, &avg_occupancy, &invalid_pos); + break; + default: return -1; + } + + rte_snprintf(hashname, RTE_HASH_NAMESIZE, "%s", get_hash_name(params->hash_func)); + + printf("%-12s, %-15s, %-16u, %-7u, %-18u, %-8u, %-19.2f, %.2f\n", + hashname, + get_tbl_perf_test_desc(params->test_type), + (unsigned) params->key_len, + (unsigned) params->entries, + (unsigned) params->bucket_entries, + (unsigned) invalid_pos, + avg_occupancy, + ticks + ); + + /* Free */ + rte_hash_free(handle); + return 0; +} + +/* + * Run all hash table performance tests. + */ +static int run_all_tbl_perf_tests(void) +{ + unsigned i; + + printf(" *** Hash table performance test results ***\n"); + printf("Hash Func. , Operation , Key size (bytes), Entries, " + "Entries per bucket, Errors , Avg. bucket entries, Ticks/Op.\n"); + + /* Loop through every combination of test parameters */ + for (i = 0; + i < sizeof(tbl_perf_params) / sizeof(struct tbl_perf_test_params); + i++) { + + /* Perform test */ + if (run_tbl_perf_test(&tbl_perf_params[i]) < 0) + return -1; + } + return 0; +} + +/* + * Test a hash function. + */ +static void run_hash_func_test(rte_hash_function f, uint32_t init_val, + uint32_t key_len) +{ + static uint8_t key[RTE_HASH_KEY_LENGTH_MAX]; + uint64_t ticks = 0, start, end; + unsigned i, j; + + for (i = 0; i < HASHTEST_ITERATIONS; i++) { + + for (j = 0; j < key_len; j++) + key[j] = (uint8_t) rte_rand(); + + start = rte_rdtsc(); + f(key, key_len, init_val); + end = rte_rdtsc(); + ticks += end - start; + } + + printf("%-12s, %-18u, %-13u, %.02f\n", get_hash_name(f), (unsigned) key_len, + (unsigned) init_val, (double)ticks / HASHTEST_ITERATIONS); +} + +/* + * Test all hash functions. + */ +static void run_hash_func_tests(void) +{ + unsigned i, j, k; + + printf("\n\n *** Hash function performance test results ***\n"); + printf(" Number of iterations for each test = %d\n", + HASHTEST_ITERATIONS); + printf("Hash Func. , Key Length (bytes), Initial value, Ticks/Op.\n"); + + for (i = 0; + i < sizeof(hashtest_funcs) / sizeof(rte_hash_function); + i++) { + for (j = 0; + j < sizeof(hashtest_initvals) / sizeof(uint32_t); + j++) { + for (k = 0; + k < sizeof(hashtest_key_lens) / sizeof(uint32_t); + k++) { + run_hash_func_test(hashtest_funcs[i], + hashtest_initvals[j], + hashtest_key_lens[k]); + } + } + } +} + +/******************************************************************************/ +static int +fbk_hash_unit_test(void) +{ + struct rte_fbk_hash_params params = { + .name = "fbk_hash_test", + .entries = LOCAL_FBK_HASH_ENTRIES_MAX, + .entries_per_bucket = 4, + .socket_id = 0, + }; + + struct rte_fbk_hash_params invalid_params_1 = { + .name = "invalid_1", + .entries = LOCAL_FBK_HASH_ENTRIES_MAX + 1, /* Not power of 2 */ + .entries_per_bucket = 4, + .socket_id = 0, + }; + + struct rte_fbk_hash_params invalid_params_2 = { + .name = "invalid_4", + .entries = 4, + .entries_per_bucket = 3, /* Not power of 2 */ + .socket_id = 0, + }; + + struct rte_fbk_hash_params invalid_params_3 = { + .name = "invalid_2", + .entries = 0, /* Entries is 0 */ + .entries_per_bucket = 4, + .socket_id = 0, + }; + + struct rte_fbk_hash_params invalid_params_4 = { + .name = "invalid_3", + .entries = LOCAL_FBK_HASH_ENTRIES_MAX, + .entries_per_bucket = 0, /* Entries per bucket is 0 */ + .socket_id = 0, + }; + + struct rte_fbk_hash_params invalid_params_5 = { + .name = "invalid_4", + .entries = 4, + .entries_per_bucket = 8, /* Entries per bucket > entries */ + .socket_id = 0, + }; + + struct rte_fbk_hash_params invalid_params_6 = { + .name = "invalid_5", + .entries = RTE_FBK_HASH_ENTRIES_MAX * 2, /* Entries > max allowed */ + .entries_per_bucket = 4, + .socket_id = 0, + }; + + struct rte_fbk_hash_params params_jhash = { + .name = "valid", + .entries = LOCAL_FBK_HASH_ENTRIES_MAX, + .entries_per_bucket = 4, + .socket_id = 0, + .hash_func = rte_jhash_1word, /* Tests for different hash_func */ + .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT, + }; + + struct rte_fbk_hash_params params_nohash = { + .name = "valid nohash", + .entries = LOCAL_FBK_HASH_ENTRIES_MAX, + .entries_per_bucket = 4, + .socket_id = 0, + .hash_func = 0, /* Tests for null hash_func */ + .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT, + }; + + struct rte_fbk_hash_table *handle; + uint32_t keys[5] = + {0xc6e18639, 0xe67c201c, 0xd4c8cffd, 0x44728691, 0xd5430fa9}; + uint16_t vals[5] = {28108, 5699, 38490, 2166, 61571}; + int status; + unsigned i; + double used_entries; + + /* Try creating hashes with invalid parameters */ + handle = rte_fbk_hash_create(&invalid_params_1); + RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed"); + + handle = rte_fbk_hash_create(&invalid_params_2); + RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed"); + + handle = rte_fbk_hash_create(&invalid_params_3); + RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed"); + + handle = rte_fbk_hash_create(&invalid_params_4); + RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed"); + + handle = rte_fbk_hash_create(&invalid_params_5); + RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed"); + + handle = rte_fbk_hash_create(&invalid_params_6); + RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed"); + + /* Create empty jhash hash. */ + handle = rte_fbk_hash_create(¶ms_jhash); + RETURN_IF_ERROR_FBK(handle == NULL, "fbk jhash hash creation failed"); + + /* Cleanup. */ + rte_fbk_hash_free(handle); + + /* Create empty jhash hash. */ + handle = rte_fbk_hash_create(¶ms_nohash); + RETURN_IF_ERROR_FBK(handle == NULL, "fbk nohash hash creation failed"); + + /* Cleanup. */ + rte_fbk_hash_free(handle); + + /* Create empty hash. */ + handle = rte_fbk_hash_create(¶ms); + RETURN_IF_ERROR_FBK(handle == NULL, "fbk hash creation failed"); + + used_entries = rte_fbk_hash_get_load_factor(handle) * LOCAL_FBK_HASH_ENTRIES_MAX; + RETURN_IF_ERROR_FBK((unsigned)used_entries != 0, \ + "load factor right after creation is not zero but it should be"); + /* Add keys. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_add_key(handle, keys[i], vals[i]); + RETURN_IF_ERROR_FBK(status != 0, "fbk hash add failed"); + } + + used_entries = rte_fbk_hash_get_load_factor(handle) * LOCAL_FBK_HASH_ENTRIES_MAX; + RETURN_IF_ERROR_FBK((unsigned)used_entries != (unsigned)((((double)5)/LOCAL_FBK_HASH_ENTRIES_MAX)*LOCAL_FBK_HASH_ENTRIES_MAX), \ + "load factor now is not as expected"); + /* Find value of added keys. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_lookup(handle, keys[i]); + RETURN_IF_ERROR_FBK(status != vals[i], + "fbk hash lookup failed"); + } + + /* Change value of added keys. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_add_key(handle, keys[i], vals[4 - i]); + RETURN_IF_ERROR_FBK(status != 0, "fbk hash update failed"); + } + + /* Find new values. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_lookup(handle, keys[i]); + RETURN_IF_ERROR_FBK(status != vals[4-i], + "fbk hash lookup failed"); + } + + /* Delete keys individually. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_delete_key(handle, keys[i]); + RETURN_IF_ERROR_FBK(status != 0, "fbk hash delete failed"); + } + + used_entries = rte_fbk_hash_get_load_factor(handle) * LOCAL_FBK_HASH_ENTRIES_MAX; + RETURN_IF_ERROR_FBK((unsigned)used_entries != 0, \ + "load factor right after deletion is not zero but it should be"); + /* Lookup should now fail. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_lookup(handle, keys[i]); + RETURN_IF_ERROR_FBK(status == 0, + "fbk hash lookup should have failed"); + } + + /* Add keys again. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_add_key(handle, keys[i], vals[i]); + RETURN_IF_ERROR_FBK(status != 0, "fbk hash add failed"); + } + + /* Make sure they were added. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_lookup(handle, keys[i]); + RETURN_IF_ERROR_FBK(status != vals[i], + "fbk hash lookup failed"); + } + + /* Clear all entries. */ + rte_fbk_hash_clear_all(handle); + + /* Lookup should fail. */ + for (i = 0; i < 5; i++) { + status = rte_fbk_hash_lookup(handle, keys[i]); + RETURN_IF_ERROR_FBK(status == 0, + "fbk hash lookup should have failed"); + } + + /* Cleanup. */ + rte_fbk_hash_free(handle); + + /* Cover the NULL case. */ + rte_fbk_hash_free(0); + + return 0; +} + +/* Control operation of performance testing of fbk hash. */ +#define LOAD_FACTOR 0.667 /* How full to make the hash table. */ +#define TEST_SIZE 1000000 /* How many operations to time. */ +#define TEST_ITERATIONS 30 /* How many measurements to take. */ +#define ENTRIES (1 << 15) /* How many entries. */ + +static int +fbk_hash_perf_test(void) +{ + struct rte_fbk_hash_params params = { + .name = "fbk_hash_test", + .entries = ENTRIES, + .entries_per_bucket = 4, + .socket_id = 0, + }; + struct rte_fbk_hash_table *handle; + uint32_t keys[ENTRIES] = {0}; + unsigned indexes[TEST_SIZE]; + uint64_t lookup_time = 0; + unsigned added = 0; + unsigned value = 0; + unsigned i, j; + + handle = rte_fbk_hash_create(¶ms); + RETURN_IF_ERROR_FBK(handle == NULL, "fbk hash creation failed"); + + /* Generate random keys and values. */ + for (i = 0; i < ENTRIES; i++) { + uint32_t key = (uint32_t)rte_rand(); + key = ((uint64_t)key << 32) | (uint64_t)rte_rand(); + uint16_t val = (uint16_t)rte_rand(); + + if (rte_fbk_hash_add_key(handle, key, val) == 0) { + keys[added] = key; + added++; + } + if (added > (LOAD_FACTOR * ENTRIES)) { + break; + } + } + + for (i = 0; i < TEST_ITERATIONS; i++) { + uint64_t begin; + uint64_t end; + + /* Generate random indexes into keys[] array. */ + for (j = 0; j < TEST_SIZE; j++) { + indexes[j] = rte_rand() % added; + } + + begin = rte_rdtsc(); + /* Do lookups */ + for (j = 0; j < TEST_SIZE; j++) { + value += rte_fbk_hash_lookup(handle, keys[indexes[j]]); + } + end = rte_rdtsc(); + lookup_time += (double)(end - begin); + } + + printf("\n\n *** FBK Hash function performance test results ***\n"); + /* + * The use of the 'value' variable ensures that the hash lookup is not + * being optimised out by the compiler. + */ + if (value != 0) + printf("Number of ticks per lookup = %g\n", + (double)lookup_time / + ((double)TEST_ITERATIONS * (double)TEST_SIZE)); + + rte_fbk_hash_free(handle); + + return 0; +} + +/* + * Sequence of operations for find existing fbk hash table + * + * - create table + * - find existing table: hit + * - find non-existing table: miss + * + */ +static int test_fbk_hash_find_existing(void) +{ + struct rte_fbk_hash_params params = { + .name = "fbk_hash_find_existing", + .entries = LOCAL_FBK_HASH_ENTRIES_MAX, + .entries_per_bucket = 4, + .socket_id = 0, + }; + struct rte_fbk_hash_table *handle = NULL, *result = NULL; + + /* Create hash table. */ + handle = rte_fbk_hash_create(¶ms); + RETURN_IF_ERROR_FBK(handle == NULL, "fbk hash creation failed"); + + /* Try to find existing fbk hash table */ + result = rte_fbk_hash_find_existing("fbk_hash_find_existing"); + RETURN_IF_ERROR_FBK(result != handle, "could not find existing fbk hash table"); + + /* Try to find non-existing fbk hash table */ + result = rte_fbk_hash_find_existing("fbk_hash_find_non_existing"); + RETURN_IF_ERROR_FBK(!(result == NULL), "found fbk table that shouldn't exist"); + + /* Cleanup. */ + rte_fbk_hash_free(handle); + + return 0; +} + +/* + * Do tests for hash creation with bad parameters. + */ +static int test_hash_creation_with_bad_parameters(void) +{ + struct rte_hash *handle; + struct rte_hash_parameters params; + + handle = rte_hash_create(NULL); + if (handle != NULL) { + rte_hash_free(handle); + printf("Impossible creating hash sucessfully without any parameter\n"); + return -1; + } + + memcpy(¶ms, &ut_params, sizeof(params)); + params.name = "creation_with_bad_parameters_0"; + params.entries = RTE_HASH_ENTRIES_MAX + 1; + handle = rte_hash_create(¶ms); + if (handle != NULL) { + rte_hash_free(handle); + printf("Impossible creating hash sucessfully with entries in parameter exceeded\n"); + return -1; + } + + memcpy(¶ms, &ut_params, sizeof(params)); + params.name = "creation_with_bad_parameters_1"; + params.bucket_entries = RTE_HASH_BUCKET_ENTRIES_MAX + 1; + handle = rte_hash_create(¶ms); + if (handle != NULL) { + rte_hash_free(handle); + printf("Impossible creating hash sucessfully with bucket_entries in parameter exceeded\n"); + return -1; + } + + memcpy(¶ms, &ut_params, sizeof(params)); + params.name = "creation_with_bad_parameters_2"; + params.entries = params.bucket_entries - 1; + handle = rte_hash_create(¶ms); + if (handle != NULL) { + rte_hash_free(handle); + printf("Impossible creating hash sucessfully if entries less than bucket_entries in parameter\n"); + return -1; + } + + memcpy(¶ms, &ut_params, sizeof(params)); + params.name = "creation_with_bad_parameters_3"; + params.entries = params.entries - 1; + handle = rte_hash_create(¶ms); + if (handle != NULL) { + rte_hash_free(handle); + printf("Impossible creating hash sucessfully if entries in parameter is not power of 2\n"); + return -1; + } + + memcpy(¶ms, &ut_params, sizeof(params)); + params.name = "creation_with_bad_parameters_4"; + params.bucket_entries = params.bucket_entries - 1; + handle = rte_hash_create(¶ms); + if (handle != NULL) { + rte_hash_free(handle); + printf("Impossible creating hash sucessfully if bucket_entries in parameter is not power of 2\n"); + return -1; + } + + memcpy(¶ms, &ut_params, sizeof(params)); + params.name = "creation_with_bad_parameters_5"; + params.key_len = 0; + handle = rte_hash_create(¶ms); + if (handle != NULL) { + rte_hash_free(handle); + printf("Impossible creating hash sucessfully if key_len in parameter is zero\n"); + return -1; + } + + memcpy(¶ms, &ut_params, sizeof(params)); + params.name = "creation_with_bad_parameters_6"; + params.key_len = RTE_HASH_KEY_LENGTH_MAX + 1; + handle = rte_hash_create(¶ms); + if (handle != NULL) { + rte_hash_free(handle); + printf("Impossible creating hash sucessfully if key_len is greater than the maximun\n"); + return -1; + } + + return 0; +} + +static uint8_t key[16] = {0x00, 0x01, 0x02, 0x03, + 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, + 0x0c, 0x0d, 0x0e, 0x0f}; +static struct rte_hash_parameters hash_params_ex = { + .name = NULL, + .entries = 64, + .bucket_entries = 4, + .key_len = 0, + .hash_func = NULL, + .hash_func_init_val = 0, + .socket_id = 0, +}; + +/* + * add/delete key with jhash2 + */ +static int +test_hash_add_delete_jhash2(void) +{ + int ret = -1; + struct rte_hash *handle; + int32_t pos1, pos2; + + hash_params_ex.name = "hash_test_jhash2"; + hash_params_ex.key_len = 4; + hash_params_ex.hash_func = (rte_hash_function)rte_jhash2; + + handle = rte_hash_create(&hash_params_ex); + if (handle == NULL) { + printf("test_hash_add_delete_jhash2 fail to create hash\n"); + goto fail_jhash2; + } + pos1 = rte_hash_add_key(handle, (void *)&key[0]); + if (pos1 < 0) { + printf("test_hash_add_delete_jhash2 fail to add hash key\n"); + goto fail_jhash2; + } + + pos2 = rte_hash_del_key(handle, (void *)&key[0]); + if (pos2 < 0 || pos1 != pos2) { + printf("test_hash_add_delete_jhash2 delete different key from being added\n"); + goto fail_jhash2; + } + ret = 0; + +fail_jhash2: + if (handle != NULL) + rte_hash_free(handle); + + return ret; +} + +/* + * add/delete (2) key with jhash2 + */ +static int +test_hash_add_delete_2_jhash2(void) +{ + int ret = -1; + struct rte_hash *handle; + int32_t pos1, pos2; + + hash_params_ex.name = "hash_test_2_jhash2"; + hash_params_ex.key_len = 8; + hash_params_ex.hash_func = (rte_hash_function)rte_jhash2; + + handle = rte_hash_create(&hash_params_ex); + if (handle == NULL) + goto fail_2_jhash2; + + pos1 = rte_hash_add_key(handle, (void *)&key[0]); + if (pos1 < 0) + goto fail_2_jhash2; + + pos2 = rte_hash_del_key(handle, (void *)&key[0]); + if (pos2 < 0 || pos1 != pos2) + goto fail_2_jhash2; + + ret = 0; + +fail_2_jhash2: + if (handle != NULL) + rte_hash_free(handle); + + return ret; +} + +static uint32_t +test_hash_jhash_1word(const void *key, uint32_t length, uint32_t initval) +{ + const uint32_t *k = key; + + length =length; + + return rte_jhash_1word(k[0], initval); +} + +static uint32_t +test_hash_jhash_2word(const void *key, uint32_t length, uint32_t initval) +{ + const uint32_t *k = key; + + length =length; + + return rte_jhash_2words(k[0], k[1], initval); +} + +static uint32_t +test_hash_jhash_3word(const void *key, uint32_t length, uint32_t initval) +{ + const uint32_t *k = key; + + length =length; + + return rte_jhash_3words(k[0], k[1], k[2], initval); +} + +/* + * add/delete key with jhash 1word + */ +static int +test_hash_add_delete_jhash_1word(void) +{ + int ret = -1; + struct rte_hash *handle; + int32_t pos1, pos2; + + hash_params_ex.name = "hash_test_jhash_1word"; + hash_params_ex.key_len = 4; + hash_params_ex.hash_func = test_hash_jhash_1word; + + handle = rte_hash_create(&hash_params_ex); + if (handle == NULL) + goto fail_jhash_1word; + + pos1 = rte_hash_add_key(handle, (void *)&key[0]); + if (pos1 < 0) + goto fail_jhash_1word; + + pos2 = rte_hash_del_key(handle, (void *)&key[0]); + if (pos2 < 0 || pos1 != pos2) + goto fail_jhash_1word; + + ret = 0; + +fail_jhash_1word: + if (handle != NULL) + rte_hash_free(handle); + + return ret; +} + +/* + * add/delete key with jhash 2word + */ +static int +test_hash_add_delete_jhash_2word(void) +{ + int ret = -1; + struct rte_hash *handle; + int32_t pos1, pos2; + + hash_params_ex.name = "hash_test_jhash_2word"; + hash_params_ex.key_len = 8; + hash_params_ex.hash_func = test_hash_jhash_2word; + + handle = rte_hash_create(&hash_params_ex); + if (handle == NULL) + goto fail_jhash_2word; + + pos1 = rte_hash_add_key(handle, (void *)&key[0]); + if (pos1 < 0) + goto fail_jhash_2word; + + pos2 = rte_hash_del_key(handle, (void *)&key[0]); + if (pos2 < 0 || pos1 != pos2) + goto fail_jhash_2word; + + ret = 0; + +fail_jhash_2word: + if (handle != NULL) + rte_hash_free(handle); + + return ret; +} + +/* + * add/delete key with jhash 3word + */ +static int +test_hash_add_delete_jhash_3word(void) +{ + int ret = -1; + struct rte_hash *handle; + int32_t pos1, pos2; + + hash_params_ex.name = "hash_test_jhash_3word"; + hash_params_ex.key_len = 12; + hash_params_ex.hash_func = test_hash_jhash_3word; + + handle = rte_hash_create(&hash_params_ex); + if (handle == NULL) + goto fail_jhash_3word; + + pos1 = rte_hash_add_key(handle, (void *)&key[0]); + if (pos1 < 0) + goto fail_jhash_3word; + + pos2 = rte_hash_del_key(handle, (void *)&key[0]); + if (pos2 < 0 || pos1 != pos2) + goto fail_jhash_3word; + + ret = 0; + +fail_jhash_3word: + if (handle != NULL) + rte_hash_free(handle); + + return ret; +} + +/* + * Do all unit and performance tests. + */ +int test_hash(void) +{ + if (test_add_delete() < 0) + return -1; + if (test_hash_add_delete_jhash2() < 0) + return -1; + if (test_hash_add_delete_2_jhash2() < 0) + return -1; + if (test_hash_add_delete_jhash_1word() < 0) + return -1; + if (test_hash_add_delete_jhash_2word() < 0) + return -1; + if (test_hash_add_delete_jhash_3word() < 0) + return -1; + if (test_hash_find_existing() < 0) + return -1; + if (test_add_update_delete() < 0) + return -1; + if (test_five_keys() < 0) + return -1; + if (test_full_bucket() < 0) + return -1; + if (run_all_tbl_perf_tests() < 0) + return -1; + run_hash_func_tests(); + + if (test_fbk_hash_find_existing() < 0) + return -1; + if (fbk_hash_unit_test() < 0) + return -1; + if (fbk_hash_perf_test() < 0) + return -1; + if (test_hash_creation_with_bad_parameters() < 0) + return -1; + return 0; +} +#else + +int +test_hash(void) +{ + printf("The Hash library is not included in this build\n"); + return 0; +} + +#endif diff --git a/app/test/test_interrupts.c b/app/test/test_interrupts.c new file mode 100644 index 0000000000..c52ec712d0 --- /dev/null +++ b/app/test/test_interrupts.c @@ -0,0 +1,419 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include + +#include + +#include +#include +#include + +#include "test.h" + +#define TEST_INTERRUPT_CHECK_INTERVAL 1000 /* ms */ + +enum test_interrupt_handl_type { + TEST_INTERRUPT_HANDLE_INVALID, + TEST_INTERRUPT_HANDLE_VALID, + TEST_INTERRUPT_HANDLE_CASE1, + TEST_INTERRUPT_HANDLE_MAX +}; + +static volatile int flag; +static struct rte_intr_handle intr_handles[TEST_INTERRUPT_HANDLE_MAX]; + +#ifdef RTE_EXEC_ENV_LINUXAPP +union intr_pipefds{ + struct { + int pipefd[2]; + }; + struct { + int readfd; + int writefd; + }; +}; + +static union intr_pipefds pfds; + +static inline int +test_interrupt_handle_sanity_check(struct rte_intr_handle *intr_handle) +{ + if (!intr_handle || intr_handle->fd < 0) + return -1; + + return 0; +} + +static int +test_interrupt_init(void) +{ + if (pipe(pfds.pipefd) < 0) + return -1; + + intr_handles[TEST_INTERRUPT_HANDLE_INVALID].fd = -1; + intr_handles[TEST_INTERRUPT_HANDLE_INVALID].type = RTE_INTR_HANDLE_UNKNOWN; + + intr_handles[TEST_INTERRUPT_HANDLE_VALID].fd = pfds.readfd; + intr_handles[TEST_INTERRUPT_HANDLE_VALID].type = RTE_INTR_HANDLE_UNKNOWN; + + intr_handles[TEST_INTERRUPT_HANDLE_CASE1].fd = pfds.readfd; + intr_handles[TEST_INTERRUPT_HANDLE_CASE1].type = RTE_INTR_HANDLE_ALARM; + + return 0; +} + +static int +test_interrupt_deinit(void) +{ + close(pfds.pipefd[0]); + close(pfds.pipefd[1]); + + return 0; +} + +static int +test_interrupt_trigger_interrupt(void) +{ + if (write(pfds.writefd, "1", 1) < 0) + return -1; + + return 0; +} + +static int +test_interrupt_handle_compare(struct rte_intr_handle *intr_handle_l, + struct rte_intr_handle *intr_handle_r) +{ + if (!intr_handle_l || !intr_handle_r) + return -1; + + if (intr_handle_l->fd != intr_handle_r->fd || + intr_handle_l->type != intr_handle_r->type) + return -1; + + return 0; +} + +#else +/* to be implemented for baremetal later */ +static inline int +test_interrupt_handle_sanity_check(struct rte_intr_handle *intr_handle) +{ + RTE_SET_USED(intr_handle); + + return 0; +} + +static int +test_interrupt_init(void) +{ + return 0; +} + +static int +test_interrupt_deinit(void) +{ + return 0; +} + +static int +test_interrupt_trigger_interrupt(void) +{ + return 0; +} + +static int +test_interrupt_handle_compare(struct rte_intr_handle *intr_handle_l, + struct rte_intr_handle *intr_handle_r) +{ + (void)intr_handle_l; + (void)intr_handle_r; + + return 0; +} +#endif /* RTE_EXEC_ENV_LINUXAPP */ + +static void +test_interrupt_callback(struct rte_intr_handle *intr_handle, void *arg) +{ + if (test_interrupt_handle_sanity_check(intr_handle) < 0) { + printf("null or invalid intr_handle for %s\n", __FUNCTION__); + return; + } + + if (rte_intr_callback_unregister(intr_handle, + test_interrupt_callback, arg) <= 0) { + printf("fail to unregister callback\n"); + return; + } + + if (test_interrupt_handle_compare(intr_handle, + &(intr_handles[TEST_INTERRUPT_HANDLE_VALID])) == 0) { + flag = 1; + } +} + +static void +test_interrupt_callback_1(struct rte_intr_handle *intr_handle, void *arg) +{ + if (test_interrupt_handle_sanity_check(intr_handle) < 0) { + printf("null or invalid intr_handle for %s\n", __FUNCTION__); + return; + } + if (rte_intr_callback_unregister(intr_handle, + test_interrupt_callback_1, arg) <= 0) { + printf("fail to unregister callback\n"); + return; + } +} + +static int +test_interrupt_enable(void) +{ + struct rte_intr_handle test_intr_handle; + + /* check with null intr_handle */ + if (rte_intr_enable(NULL) == 0) { + printf("unexpectedly enable null intr_handle successfully\n"); + return -1; + } + + /* check with invalid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID]; + if (rte_intr_enable(&test_intr_handle) == 0) { + printf("unexpectedly enable invalid intr_handle " + "successfully\n"); + return -1; + } + + /* check with valid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID]; + if (rte_intr_enable(&test_intr_handle) == 0) { + printf("unexpectedly enable a specific intr_handle " + "successfully\n"); + return -1; + } + + /* check with specific valid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1]; + if (rte_intr_enable(&test_intr_handle) == 0) { + printf("unexpectedly enable a specific intr_handle " + "successfully\n"); + return -1; + } + + return 0; +} + +static int +test_interrupt_disable(void) +{ + struct rte_intr_handle test_intr_handle; + + /* check with null intr_handle */ + if (rte_intr_disable(NULL) == 0) { + printf("unexpectedly disable null intr_handle " + "successfully\n"); + return -1; + } + + /* check with invalid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID]; + if (rte_intr_disable(&test_intr_handle) == 0) { + printf("unexpectedly disable invalid intr_handle " + "successfully\n"); + return -1; + } + + /* check with valid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID]; + if (rte_intr_disable(&test_intr_handle) == 0) { + printf("unexpectedly disable a specific intr_handle " + "successfully\n"); + return -1; + } + + /* check with specific valid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1]; + if (rte_intr_disable(&test_intr_handle) == 0) { + printf("unexpectedly disable a specific intr_handle " + "successfully\n"); + return -1; + } + + return 0; +} + +int +test_interrupt(void) +{ + int count = 0, ret = -1; + struct rte_intr_handle test_intr_handle; + + if (test_interrupt_init() < 0) { + printf("fail to do test init\n"); + return -1; + } + + printf("check if callback registered can be called\n"); + + /* check if callback registered can be called */ + flag = 0; + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID]; + if (rte_intr_callback_register(&test_intr_handle, + test_interrupt_callback, NULL) < 0) { + printf("fail to register callback\n"); + goto out; + } + /* trigger an interrupt and then check if the callback can be called */ + if (test_interrupt_trigger_interrupt() < 0) { + printf("fail to trigger an interrupt\n"); + goto out; + } + /* check flag in 3 seconds */ + while (flag == 0 && count++ < 3) + rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL); + if (flag == 0) { + printf("registered callback has not been called\n"); + goto out; + } + rte_delay_ms(1000); + + printf("start register/unregister test\n"); + + /* check if it will fail to register cb with intr_handle = NULL */ + if (rte_intr_callback_register(NULL, test_interrupt_callback, + NULL) == 0) { + printf("unexpectedly register successfully with null " + "intr_handle\n"); + goto out; + } + + /* check if it will fail to register cb with invalid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID]; + if (rte_intr_callback_register(&test_intr_handle, + test_interrupt_callback, NULL) == 0) { + printf("unexpectedly register successfully with invalid " + "intr_handle\n"); + goto out; + } + + /* check if it will fail to register without callback */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID]; + if (rte_intr_callback_register(&test_intr_handle, NULL, NULL) == 0) { + printf("unexpectedly register successfully with " + "null callback\n"); + goto out; + } + + /* check if it will fail to unregister cb with intr_handle = NULL */ + if (rte_intr_callback_unregister(NULL, + test_interrupt_callback, NULL) > 0) { + printf("unexpectedly unregister successfully with " + "null intr_handle\n"); + goto out; + } + + /* check if it will fail to unregister cb with invalid intr_handle */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID]; + if (rte_intr_callback_unregister(&test_intr_handle, + test_interrupt_callback, NULL) > 0) { + printf("unexpectedly unregister successfully with " + "invalid intr_handle\n"); + goto out; + } + + /* check if it is ok to register the same intr_handle twice */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID]; + if (rte_intr_callback_register(&test_intr_handle, + test_interrupt_callback, NULL) < 0) { + printf("it fails to register test_interrupt_callback\n"); + goto out; + } + if (rte_intr_callback_register(&test_intr_handle, + test_interrupt_callback_1, NULL) < 0) { + printf("it fails to register test_interrupt_callback_1\n"); + goto out; + } + /* check if it will fail to unregister with invalid parameter */ + if (rte_intr_callback_unregister(&test_intr_handle, + test_interrupt_callback, (void *)0xff) != 0) { + printf("unexpectedly unregisters successfully with invalid arg\n"); + goto out; + } + if (rte_intr_callback_unregister(&test_intr_handle, + test_interrupt_callback, NULL) <= 0) { + printf("it fails to unregister test_interrupt_callback\n"); + goto out; + } + if (rte_intr_callback_unregister(&test_intr_handle, + test_interrupt_callback_1, (void *)-1) <= 0) { + printf("it fails to unregister test_interrupt_callback_1 " + "for all\n"); + goto out; + } + rte_delay_ms(1000); + + printf("start interrupt enable/disable test\n"); + + /* check interrupt enable/disable functions */ + if (test_interrupt_enable() < 0) + goto out; + rte_delay_ms(1000); + + if (test_interrupt_disable() < 0) + goto out; + rte_delay_ms(1000); + + ret = 0; + +out: + /* clear registered callbacks */ + test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID]; + rte_intr_callback_unregister(&test_intr_handle, + test_interrupt_callback, (void *)-1); + rte_intr_callback_unregister(&test_intr_handle, + test_interrupt_callback_1, (void *)-1); + + rte_delay_ms(2000); + /* deinit */ + test_interrupt_deinit(); + + return ret; +} + diff --git a/app/test/test_logs.c b/app/test/test_logs.c new file mode 100644 index 0000000000..c5aac9c9d2 --- /dev/null +++ b/app/test/test_logs.c @@ -0,0 +1,96 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +#define RTE_LOGTYPE_TESTAPP1 RTE_LOGTYPE_USER1 +#define RTE_LOGTYPE_TESTAPP2 RTE_LOGTYPE_USER2 + +/* + * Logs + * ==== + * + * - Enable log types. + * - Set log level. + * - Send logs with different types and levels, some should not be displayed. + */ + +int +test_logs(void) +{ + /* enable these logs type */ + rte_set_log_type(RTE_LOGTYPE_TESTAPP1, 1); + rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 1); + + /* log in debug level */ + rte_set_log_level(RTE_LOG_DEBUG); + RTE_LOG(DEBUG, TESTAPP1, "this is a debug level message\n"); + RTE_LOG(INFO, TESTAPP1, "this is a info level message\n"); + RTE_LOG(WARNING, TESTAPP1, "this is a warning level message\n"); + + /* log in info level */ + rte_set_log_level(RTE_LOG_INFO); + RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n"); + RTE_LOG(INFO, TESTAPP2, "this is a info level message\n"); + RTE_LOG(WARNING, TESTAPP2, "this is a warning level message\n"); + + /* disable one log type */ + rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 0); + + /* log in debug level */ + rte_set_log_level(RTE_LOG_DEBUG); + RTE_LOG(DEBUG, TESTAPP1, "this is a debug level message\n"); + RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n"); + + rte_log_dump_history(); + + return 0; +} diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c new file mode 100644 index 0000000000..3a9400feab --- /dev/null +++ b/app/test/test_lpm.c @@ -0,0 +1,1365 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#ifdef RTE_LIBRTE_LPM + +#include "rte_lpm.h" +#include "test_lpm_routes.h" + +#include "test.h" + +#define ITERATIONS (1 << 20) +#define BATCH_SIZE (1 << 13) + +#define TEST_LPM_ASSERT(cond) do { \ + if (!(cond)) { \ + printf("Error at line %d: \n", __LINE__); \ + return -1; \ + } \ +} while(0) + + + +typedef int32_t (* rte_lpm_test)(void); + +static int32_t test0(void); +static int32_t test1(void); +static int32_t test2(void); +static int32_t test3(void); +static int32_t test4(void); +static int32_t test5(void); +static int32_t test6(void); +static int32_t test7(void); +static int32_t test8(void); +static int32_t test9(void); +static int32_t test10(void); +static int32_t test11(void); +static int32_t test12(void); +static int32_t test13(void); +static int32_t test14(void); +static int32_t test15(void); +static int32_t test16(void); +static int32_t test17(void); +static int32_t test18(void); + +rte_lpm_test tests[] = { +/* Test Cases */ + test0, + test1, + test2, + test3, + test4, + test5, + test6, + test7, + test8, + test9, + test10, + test11, + test12, + test13, + test14, + test15, + test16, + test17, + test18 +}; + +#define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0])) +#define MAX_DEPTH 32 +#define MAX_RULES 256 +#define PASS 0 + +/* + * TEST 0 + * + * Check that rte_lpm_create fails gracefully for incorrect user input + * arguments + */ +int32_t +test0(void) +{ + struct rte_lpm *lpm = NULL; + + /* rte_lpm_create: lpm name == NULL */ + lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm == NULL); + + /* rte_lpm_create: max_rules = 0 */ + /* Note: __func__ inserts the function name, in this case "test0". */ + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm == NULL); + + /* rte_lpm_create: mem_location is not RTE_LPM_HEAP or not MEMZONE */ + /* Note: __func__ inserts the function name, in this case "test0". */ + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 2); + TEST_LPM_ASSERT(lpm == NULL); + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, -1); + TEST_LPM_ASSERT(lpm == NULL); + + /* socket_id < -1 is invalid */ + lpm = rte_lpm_create(__func__, -2, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm == NULL); + + return PASS; +} + +/* TEST 1 + * + * Create lpm table then delete lpm table 100 times + * Use a slightly different rules size each time + * */ +int32_t +test1(void) +{ + struct rte_lpm *lpm = NULL; + int32_t i; + + /* rte_lpm_free: Free NULL */ + for (i = 0; i < 100; i++) { + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i, + RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + rte_lpm_free(lpm); + } + + /* Can not test free so return success */ + return PASS; +} + +/* TEST 2 + * + * Call rte_lpm_free for NULL pointer user input. Note: free has no return and + * therefore it is impossible to check for failure but this test is added to + * increase function coverage metrics and to validate that freeing null does + * not crash. + */ +int32_t +test2(void) +{ + struct rte_lpm *lpm = NULL; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + rte_lpm_free(lpm); + rte_lpm_free(NULL); + return PASS; +} + +/* TEST 3 + * + * Check that rte_lpm_add fails gracefully for incorrect user input arguments + */ +int32_t +test3(void) +{ + struct rte_lpm *lpm = NULL; + uint32_t ip = IPv4(0, 0, 0, 0); + uint8_t depth = 24, next_hop = 100; + int32_t status = 0; + + /* rte_lpm_add: lpm == NULL */ + status = rte_lpm_add(NULL, ip, depth, next_hop); + TEST_LPM_ASSERT(status < 0); + + /*Create vaild lpm to use in rest of test. */ + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + /* rte_lpm_add: depth < 1 */ + status = rte_lpm_add(lpm, ip, 0, next_hop); + TEST_LPM_ASSERT(status < 0); + + /* rte_lpm_add: depth > MAX_DEPTH */ + status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop); + TEST_LPM_ASSERT(status < 0); + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 4 + * + * Check that rte_lpm_delete fails gracefully for incorrect user input + * arguments + */ +int32_t +test4(void) +{ + struct rte_lpm *lpm = NULL; + uint32_t ip = IPv4(0, 0, 0, 0); + uint8_t depth = 24; + int32_t status = 0; + + /* rte_lpm_delete: lpm == NULL */ + status = rte_lpm_delete(NULL, ip, depth); + TEST_LPM_ASSERT(status < 0); + + /*Create vaild lpm to use in rest of test. */ + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + /* rte_lpm_delete: depth < 1 */ + status = rte_lpm_delete(lpm, ip, 0); + TEST_LPM_ASSERT(status < 0); + + /* rte_lpm_delete: depth > MAX_DEPTH */ + status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1)); + TEST_LPM_ASSERT(status < 0); + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 5 + * + * Check that rte_lpm_lookup fails gracefully for incorrect user input + * arguments + */ +int32_t +test5(void) +{ +#if defined(RTE_LIBRTE_LPM_DEBUG) + struct rte_lpm *lpm = NULL; + uint32_t ip = IPv4(0, 0, 0, 0); + uint8_t next_hop_return = 0; + int32_t status = 0; + + /* rte_lpm_lookup: lpm == NULL */ + status = rte_lpm_lookup(NULL, ip, &next_hop_return); + TEST_LPM_ASSERT(status < 0); + + /*Create vaild lpm to use in rest of test. */ + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + /* rte_lpm_lookup: depth < 1 */ + status = rte_lpm_lookup(lpm, ip, NULL); + TEST_LPM_ASSERT(status < 0); + + rte_lpm_free(lpm); +#endif + return PASS; +} + + + +/* TEST 6 + * + * Call add, lookup and delete for a single rule with depth <= 24 + */ +int32_t +test6(void) +{ + struct rte_lpm *lpm = NULL; + uint32_t ip = IPv4(0, 0, 0, 0); + uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0; + int32_t status = 0; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 7 + * + * Call add, lookup and delete for a single rule with depth > 24 + */ + +int32_t +test7(void) +{ + struct rte_lpm *lpm = NULL; + uint32_t ip = IPv4(0, 0, 0, 0); + uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0; + int32_t status = 0; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 8 + * + * Use rte_lpm_add to add rules which effect only the second half of the lpm + * table. Use all possible depths ranging from 1..32. Set the next hop = to the + * depth. Check lookup hit for on every add and check for lookup miss on the + * first half of the lpm table after each add. Finally delete all rules going + * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each + * delete. The lookup should return the next_hop_add value related to the + * previous depth value (i.e. depth -1). + */ +int32_t +test8(void) +{ + struct rte_lpm *lpm = NULL; + uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0); + uint8_t depth, next_hop_add, next_hop_return; + int32_t status = 0; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + /* Loop with rte_lpm_add. */ + for (depth = 1; depth <= 32; depth++) { + /* Let the next_hop_add value = depth. Just for change. */ + next_hop_add = depth; + + status = rte_lpm_add(lpm, ip2, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + /* Check IP in first half of tbl24 which should be empty. */ + status = rte_lpm_lookup(lpm, ip1, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + status = rte_lpm_lookup(lpm, ip2, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && + (next_hop_return == next_hop_add)); + } + + /* Loop with rte_lpm_delete. */ + for (depth = 32; depth >= 1; depth--) { + next_hop_add = (uint8_t) (depth - 1); + + status = rte_lpm_delete(lpm, ip2, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip2, &next_hop_return); + + if (depth != 1) { + TEST_LPM_ASSERT((status == 0) && + (next_hop_return == next_hop_add)); + } + else { + TEST_LPM_ASSERT(status == -ENOENT); + } + + status = rte_lpm_lookup(lpm, ip1, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + } + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 9 + * + * - Add & lookup to hit invalid TBL24 entry + * - Add & lookup to hit valid TBL24 entry not extended + * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry + * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry + * + */ +int32_t +test9(void) +{ + struct rte_lpm *lpm = NULL; + uint32_t ip, ip_1, ip_2; + uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1, + next_hop_add_2, next_hop_return; + int32_t status = 0; + + /* Add & lookup to hit invalid TBL24 entry */ + ip = IPv4(128, 0, 0, 0); + depth = 24; + next_hop_add = 100; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_delete_all(lpm); + + /* Add & lookup to hit valid TBL24 entry not extended */ + ip = IPv4(128, 0, 0, 0); + depth = 23; + next_hop_add = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + depth = 24; + next_hop_add = 101; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + depth = 24; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + depth = 23; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_delete_all(lpm); + + /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8 + * entry */ + ip = IPv4(128, 0, 0, 0); + depth = 32; + next_hop_add = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + ip = IPv4(128, 0, 0, 5); + depth = 32; + next_hop_add = 101; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + ip = IPv4(128, 0, 0, 0); + depth = 32; + next_hop_add = 100; + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_delete_all(lpm); + + /* Add & lookup to hit valid extended TBL24 entry with valid TBL8 + * entry */ + ip_1 = IPv4(128, 0, 0, 0); + depth_1 = 25; + next_hop_add_1 = 101; + + ip_2 = IPv4(128, 0, 0, 5); + depth_2 = 32; + next_hop_add_2 = 102; + + next_hop_return = 0; + + status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip_1, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); + + status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip_2, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2)); + + status = rte_lpm_delete(lpm, ip_2, depth_2); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip_2, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); + + status = rte_lpm_delete(lpm, ip_1, depth_1); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip_1, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_free(lpm); + + return PASS; +} + + +/* TEST 10 + * + * - Add rule that covers a TBL24 range previously invalid & lookup (& delete & + * lookup) + * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup) + * - Add rule that extends a TBL24 valid entry & lookup for both rules (& + * delete & lookup) + * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup) + * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup) + * - Delete a rule that is not present in the TBL24 & lookup + * - Delete a rule that is not present in the TBL8 & lookup + * + */ +int32_t +test10(void) +{ + + struct rte_lpm *lpm = NULL; + uint32_t ip; + uint8_t depth, next_hop_add, next_hop_return; + int32_t status = 0; + + /* Add rule that covers a TBL24 range previously invalid & lookup + * (& delete & lookup) */ + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + ip = IPv4(128, 0, 0, 0); + depth = 16; + next_hop_add = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_delete_all(lpm); + + ip = IPv4(128, 0, 0, 0); + depth = 25; + next_hop_add = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + rte_lpm_delete_all(lpm); + + /* Add rule that extends a TBL24 valid entry & lookup for both rules + * (& delete & lookup) */ + + ip = IPv4(128, 0, 0, 0); + depth = 24; + next_hop_add = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + ip = IPv4(128, 0, 0, 10); + depth = 32; + next_hop_add = 101; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + ip = IPv4(128, 0, 0, 0); + next_hop_add = 100; + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + ip = IPv4(128, 0, 0, 0); + depth = 24; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + ip = IPv4(128, 0, 0, 10); + depth = 32; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_delete_all(lpm); + + /* Add rule that updates the next hop in TBL24 & lookup + * (& delete & lookup) */ + + ip = IPv4(128, 0, 0, 0); + depth = 24; + next_hop_add = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + next_hop_add = 101; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_delete_all(lpm); + + /* Add rule that updates the next hop in TBL8 & lookup + * (& delete & lookup) */ + + ip = IPv4(128, 0, 0, 0); + depth = 32; + next_hop_add = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + next_hop_add = 101; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_delete_all(lpm); + + /* Delete a rule that is not present in the TBL24 & lookup */ + + ip = IPv4(128, 0, 0, 0); + depth = 24; + next_hop_add = 100; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status < 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_delete_all(lpm); + + /* Delete a rule that is not present in the TBL8 & lookup */ + + ip = IPv4(128, 0, 0, 0); + depth = 32; + next_hop_add = 100; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status < 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 11 + * + * Add two rules, lookup to hit the more specific one, lookup to hit the less + * specific one delete the less specific rule and lookup previous values again; + * add a more specific rule than the existing rule, lookup again + * + * */ +int32_t +test11(void) +{ + + struct rte_lpm *lpm = NULL; + uint32_t ip; + uint8_t depth, next_hop_add, next_hop_return; + int32_t status = 0; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + ip = IPv4(128, 0, 0, 0); + depth = 24; + next_hop_add = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + ip = IPv4(128, 0, 0, 10); + depth = 32; + next_hop_add = 101; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + ip = IPv4(128, 0, 0, 0); + next_hop_add = 100; + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); + + ip = IPv4(128, 0, 0, 0); + depth = 24; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + ip = IPv4(128, 0, 0, 10); + depth = 32; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 12 + * + * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete, + * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension + * and contraction. + * + * */ + +int32_t +test12(void) +{ + struct rte_lpm *lpm = NULL; + uint32_t ip, i; + uint8_t depth, next_hop_add, next_hop_return; + int32_t status = 0; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + ip = IPv4(128, 0, 0, 0); + depth = 32; + next_hop_add = 100; + + for (i = 0; i < 1000; i++) { + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && + (next_hop_return == next_hop_add)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + } + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 13 + * + * Add a rule to tbl24, lookup (hit), then add a rule that will extend this + * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension, + * lookup (miss) and repeat for loop of 1000 times. This will check tbl8 + * extension and contraction. + * + * */ + +int32_t +test13(void) +{ + struct rte_lpm *lpm = NULL; + uint32_t ip, i; + uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return; + int32_t status = 0; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + ip = IPv4(128, 0, 0, 0); + depth = 24; + next_hop_add_1 = 100; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add_1); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); + + depth = 32; + next_hop_add_2 = 101; + + for (i = 0; i < 1000; i++) { + status = rte_lpm_add(lpm, ip, depth, next_hop_add_2); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && + (next_hop_return == next_hop_add_2)); + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && + (next_hop_return == next_hop_add_1)); + } + + depth = 24; + + status = rte_lpm_delete(lpm, ip, depth); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT(status == -ENOENT); + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST 14 + * + * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension. + * No more tbl8 extensions will be allowed. Now add one more rule that required + * a tbl8 extension and get fail. + * */ +int32_t +test14(void) +{ + + /* We only use depth = 32 in the loop below so we must make sure + * that we have enough storage for all rules at that depth*/ + + struct rte_lpm *lpm = NULL; + uint32_t ip; + uint8_t depth, next_hop_add, next_hop_return; + int32_t status = 0; + + /* Add enough space for 256 rules for every depth */ + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + ip = IPv4(0, 0, 0, 0); + depth = 32; + next_hop_add = 100; + + /* Add 256 rules that require a tbl8 extension */ + for (ip = 0; ip <= IPv4(0, 0, 255, 0); ip += 256) { + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip, &next_hop_return); + TEST_LPM_ASSERT((status == 0) && + (next_hop_return == next_hop_add)); + } + + /* All tbl8 extensions have been used above. Try to add one more and + * we get a fail */ + ip = IPv4(1, 0, 0, 0); + depth = 32; + + status = rte_lpm_add(lpm, ip, depth, next_hop_add); + TEST_LPM_ASSERT(status < 0); + + rte_lpm_free(lpm); + + return PASS; +} + +/* TEST test15 + * + * Lookup performance test using Mae West Routing Table + */ +static inline uint32_t +depth_to_mask(uint8_t depth) { + return (int)0x80000000 >> (depth - 1); +} + +static uint32_t +rule_table_check_for_duplicates(const struct route_rule *table, uint32_t n){ + unsigned i, j, count; + + count = 0; + for (i = 0; i < (n - 1); i++) { + uint8_t depth1 = table[i].depth; + uint32_t ip1_masked = table[i].ip & depth_to_mask(depth1); + + for (j = (i + 1); j tbl24[i].valid) + lpm_used_entries++; + + if (i % 32 == 0){ + if (count < lpm_used_entries) { + cache_line_counter++; + count = lpm_used_entries; + } + } + } + + printf("Number of table 24 entries = %u\n", + (unsigned) RTE_LPM_TBL24_NUM_ENTRIES); + printf("Used table 24 entries = %u\n", + (unsigned) lpm_used_entries); + printf("Percentage of table 24 entries used = %u\n", + (unsigned) div64((lpm_used_entries * 100) , + RTE_LPM_TBL24_NUM_ENTRIES)); + printf("64 byte Cache entries used = %u \n", + (unsigned) cache_line_counter); + printf("Cache Required = %u bytes\n\n", + (unsigned) cache_line_counter * 64); + + printf("Average LPM Add: %u cycles\n", avg_ticks); + + /* Lookup */ + + /* Choose random seed. */ + rte_srand(0); + total_time = 0; + status = 0; + for (i = 0; i < (ITERATIONS / BATCH_SIZE); i ++) { + static uint32_t ip_batch[BATCH_SIZE]; + uint64_t begin_batch, end_batch; + + /* Generate a batch of random numbers */ + for (j = 0; j < BATCH_SIZE; j ++) { + ip_batch[j] = rte_rand(); + } + + /* Lookup per batch */ + begin_batch = rte_rdtsc(); + + for (j = 0; j < BATCH_SIZE; j ++) { + status += rte_lpm_lookup(lpm, ip_batch[j], + &next_hop_return); + } + + end_batch = rte_rdtsc(); + printf("status = %d\r", next_hop_return); + TEST_LPM_ASSERT(status < 1); + + /* Accumulate batch time */ + total_time += (end_batch - begin_batch); + + TEST_LPM_ASSERT((status < -ENOENT) || + (next_hop_return == next_hop_add)); + } + + avg_ticks = (uint32_t) div64(total_time, ITERATIONS); + printf("Average LPM Lookup: %u cycles\n", avg_ticks); + + /* Delete */ + status = 0; + begin = rte_rdtsc(); + + for (i = 0; i < NUM_ROUTE_ENTRIES; i++) { + /* rte_lpm_delete(lpm, ip, depth) */ + status += rte_lpm_delete(lpm, mae_west_tbl[i].ip, + mae_west_tbl[i].depth); + } + + end = rte_rdtsc(); + + TEST_LPM_ASSERT(status == 0); + + avg_ticks = (uint32_t) div64((end - begin), NUM_ROUTE_ENTRIES); + + printf("Average LPM Delete: %u cycles\n", avg_ticks); + + rte_lpm_delete_all(lpm); + rte_lpm_free(lpm); + + return PASS; +} + + + +/* + * Sequence of operations for find existing fbk hash table + * + * - create table + * - find existing table: hit + * - find non-existing table: miss + * + */ +int32_t test16(void) +{ + struct rte_lpm *lpm = NULL, *result = NULL; + + /* Create lpm */ + lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + /* Try to find existing lpm */ + result = rte_lpm_find_existing("lpm_find_existing"); + TEST_LPM_ASSERT(result == lpm); + + /* Try to find non-existing lpm */ + result = rte_lpm_find_existing("lpm_find_non_existing"); + TEST_LPM_ASSERT(result == NULL); + + /* Cleanup. */ + rte_lpm_delete_all(lpm); + rte_lpm_free(lpm); + + return PASS; +} + +/* + * test failure condition of overloading the tbl8 so no more will fit + * Check we get an error return value in that case + */ +static int32_t +test17(void) +{ + uint32_t ip; + struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, + 256 * 32, RTE_LPM_HEAP); + + printf("Testing filling tbl8's\n"); + + /* ip loops through all positibilities for top 24 bits of address */ + for (ip = 0; ip < 0xFFFFFF; ip++){ + /* add an entrey within a different tbl8 each time, since + * depth >24 and the top 24 bits are different */ + if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0) + break; + } + + if (ip != RTE_LPM_TBL8_NUM_GROUPS) { + printf("Error, unexpected failure with filling tbl8 groups\n"); + printf("Failed after %u additions, expected after %u\n", + (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS); + } + + rte_lpm_free(lpm); + return 0; +} + +/* + * Test 18 + * Test for overwriting of tbl8: + * - add rule /32 and lookup + * - add new rule /24 and lookup + * - add third rule /25 and lookup + * - lookup /32 and /24 rule to ensure the table has not been overwritten. + */ +int32_t +test18(void) +{ + struct rte_lpm *lpm = NULL; + const uint32_t ip_10_32 = IPv4(10, 10, 10, 2); + const uint32_t ip_10_24 = IPv4(10, 10, 10, 0); + const uint32_t ip_20_25 = IPv4(10, 10, 20, 2); + const uint8_t d_ip_10_32 = 32, + d_ip_10_24 = 24, + d_ip_20_25 = 25; + const uint8_t next_hop_ip_10_32 = 100, + next_hop_ip_10_24 = 105, + next_hop_ip_20_25 = 111; + uint8_t next_hop_return = 0; + int32_t status = 0; + + lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP); + TEST_LPM_ASSERT(lpm != NULL); + + status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, next_hop_ip_10_32); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); + TEST_LPM_ASSERT(status == 0); + TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); + + status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, next_hop_ip_10_24); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); + TEST_LPM_ASSERT(status == 0); + TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); + + status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, next_hop_ip_20_25); + TEST_LPM_ASSERT(status == 0); + + status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return); + TEST_LPM_ASSERT(status == 0); + TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25); + + status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); + TEST_LPM_ASSERT(status == 0); + TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); + + status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); + TEST_LPM_ASSERT(status == 0); + TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); + + rte_lpm_free(lpm); + + printf("%s PASSED\n", __func__); + return PASS; +} + + +/* + * Do all unit and performance tests. + */ + +int +test_lpm(void) +{ + unsigned test_num; + int status, global_status; + + printf("Running LPM tests...\n" + "Total number of test = %u\n", (unsigned) NUM_LPM_TESTS); + + global_status = 0; + + for (test_num = 0; test_num < NUM_LPM_TESTS; test_num++) { + + status = tests[test_num](); + + printf("LPM Test %u: %s\n", test_num, + (status < 0) ? "FAIL" : "PASS"); + + if (status < 0) { + global_status = status; + } + } + + return global_status; +} + +#else + +int +test_lpm(void) +{ + printf("The LPM library is not included in this build\n"); + return 0; +} + +#endif diff --git a/app/test/test_lpm_routes.h b/app/test/test_lpm_routes.h new file mode 100644 index 0000000000..85e885ab9d --- /dev/null +++ b/app/test/test_lpm_routes.h @@ -0,0 +1,28947 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _TEST_LPM_ROUTES_H_ +#define _TEST_LPM_ROUTES_H_ + +#include + +struct route_rule { + uint32_t ip; + uint8_t depth; +}; + +static const struct route_rule mae_west_tbl[] = +{ + { IPv4(6,1,0,0),16 }, + { IPv4(6,2,0,0),22 }, + { IPv4(6,3,0,0),18 }, + { IPv4(6,4,0,0),16 }, + { IPv4(6,5,0,0),19 }, + { IPv4(6,8,0,0),20 }, + { IPv4(6,9,0,0),20 }, + { IPv4(6,10,0,0),15 }, + { IPv4(6,14,0,0),15 }, + { IPv4(6,133,0,0),21 }, + { IPv4(6,151,0,0),16 }, + { IPv4(6,152,0,0),16 }, + { IPv4(9,141,128,0),24 }, + { IPv4(12,0,0,0),8 }, + { IPv4(12,0,252,0),23 }, + { IPv4(12,1,83,0),24 }, + { IPv4(12,1,245,0),24 }, + { IPv4(12,1,248,0),24 }, + { IPv4(12,2,6,0),24 }, + { IPv4(12,2,7,0),24 }, + { IPv4(12,2,41,0),24 }, + { IPv4(12,2,88,0),22 }, + { IPv4(12,2,94,0),23 }, + { IPv4(12,2,97,0),24 }, + { IPv4(12,2,99,0),24 }, + { IPv4(12,2,109,0),24 }, + { IPv4(12,2,110,0),24 }, + { IPv4(12,2,142,0),24 }, + { IPv4(12,2,169,0),24 }, + { IPv4(12,2,216,0),23 }, + { IPv4(12,2,220,0),22 }, + { IPv4(12,2,246,0),24 }, + { IPv4(12,3,33,0),24 }, + { IPv4(12,3,59,0),24 }, + { IPv4(12,3,65,0),24 }, + { IPv4(12,3,80,0),22 }, + { IPv4(12,3,119,0),24 }, + { IPv4(12,3,217,0),24 }, + { IPv4(12,4,5,0),24 }, + { IPv4(12,4,114,0),24 }, + { IPv4(12,4,119,0),24 }, + { IPv4(12,4,126,0),23 }, + { IPv4(12,4,196,0),22 }, + { IPv4(12,4,228,0),24 }, + { IPv4(12,5,39,0),24 }, + { IPv4(12,5,48,0),21 }, + { IPv4(12,5,136,0),24 }, + { IPv4(12,5,144,0),24 }, + { IPv4(12,5,164,0),24 }, + { IPv4(12,5,165,0),24 }, + { IPv4(12,6,97,0),24 }, + { IPv4(12,6,102,0),24 }, + { IPv4(12,6,103,0),24 }, + { IPv4(12,6,108,0),24 }, + { IPv4(12,6,109,0),24 }, + { IPv4(12,6,110,0),24 }, + { IPv4(12,6,111,0),24 }, + { IPv4(12,6,121,0),24 }, + { IPv4(12,6,124,0),24 }, + { IPv4(12,6,125,0),24 }, + { IPv4(12,6,206,0),24 }, + { IPv4(12,6,227,0),24 }, + { IPv4(12,7,5,0),24 }, + { IPv4(12,7,133,0),24 }, + { IPv4(12,7,216,0),21 }, + { IPv4(12,8,9,0),24 }, + { IPv4(12,8,13,0),24 }, + { IPv4(12,8,184,0),24 }, + { IPv4(12,8,188,0),24 }, + { IPv4(12,8,189,0),24 }, + { IPv4(12,9,136,0),24 }, + { IPv4(12,9,138,0),24 }, + { IPv4(12,9,139,0),24 }, + { IPv4(12,10,150,0),24 }, + { IPv4(12,10,152,0),21 }, + { IPv4(12,11,130,0),24 }, + { IPv4(12,11,131,0),24 }, + { IPv4(12,11,138,0),24 }, + { IPv4(12,11,162,0),24 }, + { IPv4(12,13,56,0),24 }, + { IPv4(12,13,57,0),24 }, + { IPv4(12,13,58,0),24 }, + { IPv4(12,13,59,0),24 }, + { IPv4(12,13,74,0),24 }, + { IPv4(12,13,82,0),23 }, + { IPv4(12,13,84,0),24 }, + { IPv4(12,13,224,0),19 }, + { IPv4(12,13,224,0),21 }, + { IPv4(12,13,236,0),22 }, + { IPv4(12,13,240,0),22 }, + { IPv4(12,13,244,0),23 }, + { IPv4(12,13,246,0),23 }, + { IPv4(12,13,248,0),22 }, + { IPv4(12,14,190,0),23 }, + { IPv4(12,14,214,0),24 }, + { IPv4(12,14,215,0),24 }, + { IPv4(12,14,232,0),23 }, + { IPv4(12,14,237,0),24 }, + { IPv4(12,14,238,0),23 }, + { IPv4(12,15,28,0),24 }, + { IPv4(12,15,46,0),23 }, + { IPv4(12,16,40,0),24 }, + { IPv4(12,16,41,0),24 }, + { IPv4(12,16,76,0),23 }, + { IPv4(12,16,132,0),24 }, + { IPv4(12,16,133,0),24 }, + { IPv4(12,16,134,0),24 }, + { IPv4(12,16,135,0),24 }, + { IPv4(12,16,160,0),24 }, + { IPv4(12,16,161,0),24 }, + { IPv4(12,16,162,0),24 }, + { IPv4(12,16,163,0),24 }, + { IPv4(12,16,168,0),24 }, + { IPv4(12,16,188,0),24 }, + { IPv4(12,16,189,0),24 }, + { IPv4(12,16,190,0),24 }, + { IPv4(12,16,191,0),24 }, + { IPv4(12,17,20,0),24 }, + { IPv4(12,18,36,0),24 }, + { IPv4(12,18,36,0),22 }, + { IPv4(12,18,90,0),23 }, + { IPv4(12,18,96,0),22 }, + { IPv4(12,18,110,0),23 }, + { IPv4(12,18,120,0),24 }, + { IPv4(12,18,155,0),24 }, + { IPv4(12,18,170,0),24 }, + { IPv4(12,18,171,0),24 }, + { IPv4(12,18,177,0),24 }, + { IPv4(12,18,216,0),24 }, + { IPv4(12,18,217,0),24 }, + { IPv4(12,18,240,0),22 }, + { IPv4(12,18,244,0),22 }, + { IPv4(12,19,136,0),23 }, + { IPv4(12,19,138,0),24 }, + { IPv4(12,19,208,0),24 }, + { IPv4(12,19,211,0),24 }, + { IPv4(12,20,16,0),22 }, + { IPv4(12,20,55,0),24 }, + { IPv4(12,20,92,0),24 }, + { IPv4(12,20,200,0),22 }, + { IPv4(12,20,229,0),24 }, + { IPv4(12,21,14,0),23 }, + { IPv4(12,21,85,0),24 }, + { IPv4(12,21,202,0),24 }, + { IPv4(12,21,208,0),23 }, + { IPv4(12,21,210,0),23 }, + { IPv4(12,21,212,0),23 }, + { IPv4(12,21,216,0),24 }, + { IPv4(12,21,217,0),24 }, + { IPv4(12,21,218,0),24 }, + { IPv4(12,21,219,0),24 }, + { IPv4(12,21,222,0),24 }, + { IPv4(12,21,223,0),24 }, + { IPv4(12,22,96,0),24 }, + { IPv4(12,23,26,0),23 }, + { IPv4(12,23,66,0),23 }, + { IPv4(12,23,70,0),23 }, + { IPv4(12,23,72,0),23 }, + { IPv4(12,23,78,0),24 }, + { IPv4(12,23,108,0),23 }, + { IPv4(12,23,136,0),22 }, + { IPv4(12,23,189,0),24 }, + { IPv4(12,23,194,0),24 }, + { IPv4(12,23,251,0),24 }, + { IPv4(12,24,96,0),24 }, + { IPv4(12,24,112,0),22 }, + { IPv4(12,24,180,0),23 }, + { IPv4(12,24,252,0),22 }, + { IPv4(12,25,49,0),24 }, + { IPv4(12,25,98,0),24 }, + { IPv4(12,25,105,0),24 }, + { IPv4(12,25,136,0),21 }, + { IPv4(12,25,196,0),22 }, + { IPv4(12,25,212,0),23 }, + { IPv4(12,25,230,0),24 }, + { IPv4(12,25,232,0),21 }, + { IPv4(12,25,241,0),24 }, + { IPv4(12,26,7,0),24 }, + { IPv4(12,26,25,0),24 }, + { IPv4(12,26,53,0),24 }, + { IPv4(12,26,84,0),24 }, + { IPv4(12,26,86,0),24 }, + { IPv4(12,26,100,0),22 }, + { IPv4(12,26,128,0),22 }, + { IPv4(12,26,136,0),22 }, + { IPv4(12,26,144,0),22 }, + { IPv4(12,26,144,0),23 }, + { IPv4(12,27,38,0),24 }, + { IPv4(12,27,40,0),24 }, + { IPv4(12,27,41,0),24 }, + { IPv4(12,27,42,0),24 }, + { IPv4(12,27,43,0),24 }, + { IPv4(12,27,66,0),24 }, + { IPv4(12,27,88,0),24 }, + { IPv4(12,27,89,0),24 }, + { IPv4(12,27,90,0),24 }, + { IPv4(12,28,146,0),23 }, + { IPv4(12,28,148,0),24 }, + { IPv4(12,28,242,0),24 }, + { IPv4(12,29,100,0),24 }, + { IPv4(12,29,101,0),24 }, + { IPv4(12,29,102,0),24 }, + { IPv4(12,29,106,0),24 }, + { IPv4(12,29,190,0),24 }, + { IPv4(12,29,194,0),24 }, + { IPv4(12,30,0,0),23 }, + { IPv4(12,30,1,0),24 }, + { IPv4(12,30,105,0),24 }, + { IPv4(12,30,159,0),24 }, + { IPv4(12,30,198,0),23 }, + { IPv4(12,30,205,0),24 }, + { IPv4(12,30,208,0),21 }, + { IPv4(12,30,224,0),22 }, + { IPv4(12,30,228,0),22 }, + { IPv4(12,31,21,0),24 }, + { IPv4(12,31,24,0),24 }, + { IPv4(12,31,25,0),24 }, + { IPv4(12,31,125,0),24 }, + { IPv4(12,31,126,0),24 }, + { IPv4(12,31,143,0),24 }, + { IPv4(12,31,159,0),24 }, + { IPv4(12,31,160,0),24 }, + { IPv4(12,31,161,0),24 }, + { IPv4(12,31,202,0),24 }, + { IPv4(12,32,72,0),23 }, + { IPv4(12,32,90,0),24 }, + { IPv4(12,32,104,0),22 }, + { IPv4(12,32,231,0),24 }, + { IPv4(12,32,241,0),24 }, + { IPv4(12,33,46,0),24 }, + { IPv4(12,33,56,0),22 }, + { IPv4(12,33,114,0),24 }, + { IPv4(12,33,194,0),24 }, + { IPv4(12,33,195,0),24 }, + { IPv4(12,34,2,0),24 }, + { IPv4(12,34,8,0),21 }, + { IPv4(12,34,100,0),24 }, + { IPv4(12,34,101,0),24 }, + { IPv4(12,34,119,0),24 }, + { IPv4(12,34,154,0),24 }, + { IPv4(12,34,155,0),24 }, + { IPv4(12,34,159,0),24 }, + { IPv4(12,34,233,0),24 }, + { IPv4(12,35,37,0),24 }, + { IPv4(12,35,65,0),24 }, + { IPv4(12,35,96,0),24 }, + { IPv4(12,35,114,0),24 }, + { IPv4(12,35,145,0),24 }, + { IPv4(12,35,159,0),24 }, + { IPv4(12,36,56,0),22 }, + { IPv4(12,36,116,0),24 }, + { IPv4(12,36,118,0),24 }, + { IPv4(12,36,129,0),24 }, + { IPv4(12,36,133,0),24 }, + { IPv4(12,36,160,0),22 }, + { IPv4(12,36,203,0),24 }, + { IPv4(12,37,27,0),24 }, + { IPv4(12,37,28,0),22 }, + { IPv4(12,37,61,0),24 }, + { IPv4(12,37,113,0),24 }, + { IPv4(12,37,211,0),24 }, + { IPv4(12,37,228,0),22 }, + { IPv4(12,37,232,64),26 }, + { IPv4(12,37,238,0),23 }, + { IPv4(12,38,48,0),22 }, + { IPv4(12,38,64,0),22 }, + { IPv4(12,38,112,0),24 }, + { IPv4(12,38,144,0),24 }, + { IPv4(12,38,145,0),24 }, + { IPv4(12,38,245,0),24 }, + { IPv4(12,38,246,0),24 }, + { IPv4(12,38,247,0),24 }, + { IPv4(12,39,37,0),24 }, + { IPv4(12,39,65,0),24 }, + { IPv4(12,39,106,0),24 }, + { IPv4(12,40,114,0),24 }, + { IPv4(12,40,116,0),24 }, + { IPv4(12,40,121,0),24 }, + { IPv4(12,40,174,0),24 }, + { IPv4(12,40,179,0),24 }, + { IPv4(12,41,3,0),24 }, + { IPv4(12,41,48,0),24 }, + { IPv4(12,41,49,0),24 }, + { IPv4(12,41,50,0),24 }, + { IPv4(12,41,51,0),24 }, + { IPv4(12,41,66,0),23 }, + { IPv4(12,41,122,0),24 }, + { IPv4(12,41,162,0),23 }, + { IPv4(12,41,162,0),24 }, + { IPv4(12,41,163,0),24 }, + { IPv4(12,41,188,0),24 }, + { IPv4(12,41,193,0),24 }, + { IPv4(12,41,194,0),23 }, + { IPv4(12,41,194,0),24 }, + { IPv4(12,41,195,0),24 }, + { IPv4(12,42,26,0),24 }, + { IPv4(12,42,50,0),24 }, + { IPv4(12,42,51,0),24 }, + { IPv4(12,42,52,0),24 }, + { IPv4(12,42,58,0),24 }, + { IPv4(12,42,59,0),24 }, + { IPv4(12,42,130,0),24 }, + { IPv4(12,42,144,0),22 }, + { IPv4(12,42,152,0),24 }, + { IPv4(12,42,240,0),23 }, + { IPv4(12,43,20,0),23 }, + { IPv4(12,43,128,0),24 }, + { IPv4(12,43,128,0),20 }, + { IPv4(12,43,130,0),24 }, + { IPv4(12,43,144,0),20 }, + { IPv4(12,43,146,0),24 }, + { IPv4(12,45,103,0),24 }, + { IPv4(12,45,108,0),24 }, + { IPv4(12,45,121,0),24 }, + { IPv4(12,45,134,0),24 }, + { IPv4(12,46,144,0),21 }, + { IPv4(12,46,160,0),24 }, + { IPv4(12,46,162,0),23 }, + { IPv4(12,46,164,0),22 }, + { IPv4(12,46,168,0),21 }, + { IPv4(12,47,101,0),24 }, + { IPv4(12,47,192,0),21 }, + { IPv4(12,47,217,0),24 }, + { IPv4(12,47,220,0),22 }, + { IPv4(12,47,220,0),24 }, + { IPv4(12,47,221,0),24 }, + { IPv4(12,47,222,0),24 }, + { IPv4(12,64,96,0),19 }, + { IPv4(12,64,128,0),18 }, + { IPv4(12,64,192,0),18 }, + { IPv4(12,65,0,0),18 }, + { IPv4(12,65,64,0),19 }, + { IPv4(12,65,96,0),19 }, + { IPv4(12,65,128,0),18 }, + { IPv4(12,65,192,0),19 }, + { IPv4(12,65,224,0),20 }, + { IPv4(12,65,240,0),20 }, + { IPv4(12,66,0,0),19 }, + { IPv4(12,67,1,0),24 }, + { IPv4(12,67,4,0),24 }, + { IPv4(12,67,5,0),24 }, + { IPv4(12,67,6,0),24 }, + { IPv4(12,67,7,0),24 }, + { IPv4(12,96,40,0),22 }, + { IPv4(12,96,160,0),21 }, + { IPv4(12,96,169,0),24 }, + { IPv4(12,104,36,0),24 }, + { IPv4(12,104,78,0),23 }, + { IPv4(12,104,82,0),23 }, + { IPv4(12,104,96,0),24 }, + { IPv4(12,105,115,0),24 }, + { IPv4(12,105,138,0),24 }, + { IPv4(12,105,139,0),24 }, + { IPv4(12,105,185,0),24 }, + { IPv4(12,106,16,0),24 }, + { IPv4(12,106,96,0),24 }, + { IPv4(12,106,130,0),24 }, + { IPv4(12,107,20,0),22 }, + { IPv4(12,107,40,0),22 }, + { IPv4(12,107,44,0),22 }, + { IPv4(12,107,82,0),24 }, + { IPv4(12,107,130,0),24 }, + { IPv4(12,107,140,0),22 }, + { IPv4(12,107,160,0),22 }, + { IPv4(12,107,180,0),22 }, + { IPv4(12,107,188,0),22 }, + { IPv4(12,107,232,0),24 }, + { IPv4(12,108,132,0),22 }, + { IPv4(12,108,188,0),23 }, + { IPv4(12,108,237,0),24 }, + { IPv4(12,109,19,0),24 }, + { IPv4(12,109,107,0),24 }, + { IPv4(12,109,109,0),24 }, + { IPv4(12,109,224,0),22 }, + { IPv4(12,110,6,0),23 }, + { IPv4(12,110,23,0),24 }, + { IPv4(12,110,40,0),24 }, + { IPv4(12,110,74,0),23 }, + { IPv4(12,110,253,0),24 }, + { IPv4(12,111,50,0),24 }, + { IPv4(12,111,132,0),23 }, + { IPv4(12,129,0,0),18 }, + { IPv4(12,129,64,0),18 }, + { IPv4(12,129,192,0),18 }, + { IPv4(12,144,16,0),22 }, + { IPv4(12,144,24,0),22 }, + { IPv4(12,144,144,0),24 }, + { IPv4(12,144,148,0),22 }, + { IPv4(12,145,65,0),24 }, + { IPv4(12,145,188,0),24 }, + { IPv4(12,147,44,0),24 }, + { IPv4(12,147,52,0),24 }, + { IPv4(12,147,53,0),24 }, + { IPv4(12,147,54,0),24 }, + { IPv4(12,147,55,0),24 }, + { IPv4(12,148,204,0),23 }, + { IPv4(12,149,4,0),23 }, + { IPv4(12,150,116,0),24 }, + { IPv4(12,151,64,0),21 }, + { IPv4(12,151,96,0),24 }, + { IPv4(12,152,100,0),23 }, + { IPv4(12,152,102,0),23 }, + { IPv4(12,152,104,0),23 }, + { IPv4(12,152,164,0),23 }, + { IPv4(12,152,236,0),24 }, + { IPv4(12,152,237,0),24 }, + { IPv4(12,152,238,0),24 }, + { IPv4(12,152,239,0),24 }, + { IPv4(12,152,240,0),24 }, + { IPv4(12,153,0,0),21 }, + { IPv4(12,153,8,0),23 }, + { IPv4(12,153,192,0),20 }, + { IPv4(12,153,244,0),22 }, + { IPv4(12,154,224,0),23 }, + { IPv4(12,155,49,0),24 }, + { IPv4(12,155,226,0),24 }, + { IPv4(12,158,136,0),22 }, + { IPv4(12,158,192,0),21 }, + { IPv4(12,158,224,0),23 }, + { IPv4(12,159,64,0),21 }, + { IPv4(12,159,80,0),24 }, + { IPv4(12,161,8,0),21 }, + { IPv4(12,161,222,0),24 }, + { IPv4(12,162,160,0),19 }, + { IPv4(12,242,16,0),24 }, + { IPv4(12,242,17,0),24 }, + { IPv4(12,242,18,0),24 }, + { IPv4(13,181,8,0),21 }, + { IPv4(13,181,20,0),24 }, + { IPv4(13,181,32,0),24 }, + { IPv4(13,181,36,0),24 }, + { IPv4(13,181,40,0),24 }, + { IPv4(13,181,64,0),23 }, + { IPv4(13,181,66,0),24 }, + { IPv4(13,181,68,0),24 }, + { IPv4(13,181,76,0),23 }, + { IPv4(13,181,78,0),24 }, + { IPv4(13,181,80,0),24 }, + { IPv4(13,181,100,0),24 }, + { IPv4(13,181,108,0),24 }, + { IPv4(15,0,0,0),8 }, + { IPv4(15,211,128,0),20 }, + { IPv4(15,220,0,0),16 }, + { IPv4(15,232,0,0),13 }, + { IPv4(15,240,0,0),12 }, + { IPv4(15,248,0,0),20 }, + { IPv4(15,251,128,0),20 }, + { IPv4(15,252,0,0),18 }, + { IPv4(15,252,64,0),20 }, + { IPv4(15,252,240,0),20 }, + { IPv4(16,0,0,0),8 }, + { IPv4(17,0,0,0),8 }, + { IPv4(17,103,0,0),16 }, + { IPv4(17,104,0,0),16 }, + { IPv4(17,112,0,0),16 }, + { IPv4(17,126,0,0),15 }, + { IPv4(24,0,0,0),18 }, + { IPv4(24,0,0,0),12 }, + { IPv4(24,0,64,0),19 }, + { IPv4(24,0,96,0),20 }, + { IPv4(24,0,128,0),18 }, + { IPv4(24,0,192,0),19 }, + { IPv4(24,1,0,0),19 }, + { IPv4(24,1,32,0),19 }, + { IPv4(24,1,128,0),17 }, + { IPv4(24,2,32,0),19 }, + { IPv4(24,2,64,0),19 }, + { IPv4(24,2,96,0),19 }, + { IPv4(24,2,128,0),18 }, + { IPv4(24,2,192,0),19 }, + { IPv4(24,2,224,0),19 }, + { IPv4(24,3,0,0),18 }, + { IPv4(24,3,64,0),18 }, + { IPv4(24,3,128,0),18 }, + { IPv4(24,3,192,0),19 }, + { IPv4(24,4,0,0),18 }, + { IPv4(24,4,64,0),19 }, + { IPv4(24,4,128,0),19 }, + { IPv4(24,4,192,0),19 }, + { IPv4(24,5,32,0),19 }, + { IPv4(24,5,128,0),18 }, + { IPv4(24,6,32,0),19 }, + { IPv4(24,6,64,0),19 }, + { IPv4(24,6,96,0),19 }, + { IPv4(24,7,160,0),19 }, + { IPv4(24,8,160,0),19 }, + { IPv4(24,8,192,0),19 }, + { IPv4(24,9,0,0),22 }, + { IPv4(24,10,96,0),19 }, + { IPv4(24,10,192,0),19 }, + { IPv4(24,11,0,0),19 }, + { IPv4(24,11,32,0),19 }, + { IPv4(24,11,96,0),19 }, + { IPv4(24,11,128,0),18 }, + { IPv4(24,11,224,0),19 }, + { IPv4(24,12,0,0),19 }, + { IPv4(24,12,32,0),19 }, + { IPv4(24,12,64,0),19 }, + { IPv4(24,12,96,0),19 }, + { IPv4(24,12,128,0),19 }, + { IPv4(24,12,160,0),19 }, + { IPv4(24,12,224,0),19 }, + { IPv4(24,13,0,0),19 }, + { IPv4(24,13,32,0),19 }, + { IPv4(24,13,64,0),19 }, + { IPv4(24,13,96,0),19 }, + { IPv4(24,13,128,0),19 }, + { IPv4(24,13,160,0),19 }, + { IPv4(24,13,192,0),19 }, + { IPv4(24,14,64,0),19 }, + { IPv4(24,14,96,0),19 }, + { IPv4(24,14,160,0),19 }, + { IPv4(24,16,0,0),18 }, + { IPv4(24,16,0,0),13 }, + { IPv4(24,16,64,0),18 }, + { IPv4(24,16,160,0),19 }, + { IPv4(24,16,192,0),18 }, + { IPv4(24,17,0,0),17 }, + { IPv4(24,17,128,0),18 }, + { IPv4(24,17,192,0),19 }, + { IPv4(24,17,224,0),19 }, + { IPv4(24,18,0,0),18 }, + { IPv4(24,18,64,0),19 }, + { IPv4(24,18,96,0),19 }, + { IPv4(24,18,128,0),18 }, + { IPv4(24,18,192,0),18 }, + { IPv4(24,19,0,0),17 }, + { IPv4(24,19,128,0),19 }, + { IPv4(24,19,160,0),19 }, + { IPv4(24,20,0,0),19 }, + { IPv4(24,20,32,0),19 }, + { IPv4(24,20,128,0),18 }, + { IPv4(24,20,192,0),18 }, + { IPv4(24,21,0,0),17 }, + { IPv4(24,21,128,0),18 }, + { IPv4(24,21,192,0),18 }, + { IPv4(24,22,0,0),16 }, + { IPv4(24,23,0,0),19 }, + { IPv4(24,23,32,0),19 }, + { IPv4(24,23,64,0),18 }, + { IPv4(24,23,128,0),18 }, + { IPv4(24,23,192,0),19 }, + { IPv4(24,23,224,0),19 }, + { IPv4(24,24,0,0),19 }, + { IPv4(24,24,32,0),19 }, + { IPv4(24,24,64,0),19 }, + { IPv4(24,24,96,0),19 }, + { IPv4(24,24,128,0),18 }, + { IPv4(24,24,192,0),20 }, + { IPv4(24,24,208,0),20 }, + { IPv4(24,24,224,0),19 }, + { IPv4(24,25,0,0),19 }, + { IPv4(24,25,32,0),19 }, + { IPv4(24,25,64,0),19 }, + { IPv4(24,25,96,0),19 }, + { IPv4(24,25,128,0),19 }, + { IPv4(24,25,160,0),19 }, + { IPv4(24,25,192,0),19 }, + { IPv4(24,25,224,0),19 }, + { IPv4(24,26,0,0),19 }, + { IPv4(24,26,32,0),19 }, + { IPv4(24,26,64,0),19 }, + { IPv4(24,26,96,0),19 }, + { IPv4(24,26,128,0),19 }, + { IPv4(24,26,160,0),19 }, + { IPv4(24,26,192,0),19 }, + { IPv4(24,26,224,0),19 }, + { IPv4(24,27,0,0),18 }, + { IPv4(24,27,64,0),18 }, + { IPv4(24,27,128,0),19 }, + { IPv4(24,27,160,0),19 }, + { IPv4(24,27,192,0),19 }, + { IPv4(24,27,224,0),20 }, + { IPv4(24,27,240,0),20 }, + { IPv4(24,28,0,0),18 }, + { IPv4(24,28,64,0),19 }, + { IPv4(24,28,96,0),19 }, + { IPv4(24,28,128,0),19 }, + { IPv4(24,28,160,0),19 }, + { IPv4(24,28,192,0),19 }, + { IPv4(24,28,224,0),19 }, + { IPv4(24,29,0,0),19 }, + { IPv4(24,29,32,0),19 }, + { IPv4(24,29,64,0),19 }, + { IPv4(24,29,96,0),19 }, + { IPv4(24,29,128,0),19 }, + { IPv4(24,29,160,0),20 }, + { IPv4(24,29,176,0),20 }, + { IPv4(24,29,192,0),18 }, + { IPv4(24,30,0,0),18 }, + { IPv4(24,30,64,0),19 }, + { IPv4(24,30,96,0),19 }, + { IPv4(24,30,128,0),19 }, + { IPv4(24,30,160,0),19 }, + { IPv4(24,30,192,0),19 }, + { IPv4(24,30,224,0),19 }, + { IPv4(24,31,0,0),19 }, + { IPv4(24,31,32,0),19 }, + { IPv4(24,31,64,0),19 }, + { IPv4(24,31,96,0),19 }, + { IPv4(24,31,128,0),19 }, + { IPv4(24,31,160,0),19 }, + { IPv4(24,31,192,0),19 }, + { IPv4(24,31,224,0),19 }, + { IPv4(24,33,0,0),19 }, + { IPv4(24,34,0,0),16 }, + { IPv4(24,36,0,0),14 }, + { IPv4(24,36,0,0),16 }, + { IPv4(24,37,0,0),17 }, + { IPv4(24,37,128,0),17 }, + { IPv4(24,38,0,0),17 }, + { IPv4(24,38,128,0),18 }, + { IPv4(24,38,192,0),18 }, + { IPv4(24,39,0,0),17 }, + { IPv4(24,39,128,0),17 }, + { IPv4(24,40,0,0),18 }, + { IPv4(24,40,64,0),20 }, + { IPv4(24,41,0,0),18 }, + { IPv4(24,41,64,0),19 }, + { IPv4(24,42,0,0),15 }, + { IPv4(24,56,0,0),18 }, + { IPv4(24,60,0,0),16 }, + { IPv4(24,61,0,0),17 }, + { IPv4(24,61,128,0),19 }, + { IPv4(24,64,0,0),19 }, + { IPv4(24,64,0,0),13 }, + { IPv4(24,64,32,0),19 }, + { IPv4(24,64,64,0),19 }, + { IPv4(24,64,96,0),19 }, + { IPv4(24,64,128,0),19 }, + { IPv4(24,64,192,0),19 }, + { IPv4(24,64,224,0),19 }, + { IPv4(24,65,0,0),19 }, + { IPv4(24,65,32,0),19 }, + { IPv4(24,65,64,0),19 }, + { IPv4(24,65,96,0),19 }, + { IPv4(24,65,128,0),19 }, + { IPv4(24,65,160,0),19 }, + { IPv4(24,65,192,0),19 }, + { IPv4(24,65,224,0),19 }, + { IPv4(24,66,0,0),19 }, + { IPv4(24,66,32,0),19 }, + { IPv4(24,66,64,0),19 }, + { IPv4(24,66,96,0),19 }, + { IPv4(24,66,128,0),19 }, + { IPv4(24,66,160,0),19 }, + { IPv4(24,66,192,0),19 }, + { IPv4(24,66,224,0),19 }, + { IPv4(24,67,0,0),19 }, + { IPv4(24,67,32,0),19 }, + { IPv4(24,67,64,0),19 }, + { IPv4(24,67,96,0),19 }, + { IPv4(24,67,128,0),19 }, + { IPv4(24,67,160,0),19 }, + { IPv4(24,67,192,0),19 }, + { IPv4(24,67,224,0),19 }, + { IPv4(24,68,0,0),19 }, + { IPv4(24,68,32,0),19 }, + { IPv4(24,68,96,0),19 }, + { IPv4(24,68,128,0),19 }, + { IPv4(24,68,160,0),19 }, + { IPv4(24,68,192,0),19 }, + { IPv4(24,68,224,0),19 }, + { IPv4(24,69,0,0),19 }, + { IPv4(24,69,32,0),19 }, + { IPv4(24,69,64,0),19 }, + { IPv4(24,69,96,0),19 }, + { IPv4(24,69,192,0),19 }, + { IPv4(24,69,224,0),19 }, + { IPv4(24,70,0,0),19 }, + { IPv4(24,70,32,0),19 }, + { IPv4(24,70,64,0),19 }, + { IPv4(24,70,96,0),19 }, + { IPv4(24,70,128,0),19 }, + { IPv4(24,70,160,0),19 }, + { IPv4(24,70,192,0),19 }, + { IPv4(24,70,224,0),19 }, + { IPv4(24,71,0,0),19 }, + { IPv4(24,71,32,0),19 }, + { IPv4(24,71,64,0),19 }, + { IPv4(24,71,96,0),19 }, + { IPv4(24,71,160,0),19 }, + { IPv4(24,71,192,0),19 }, + { IPv4(24,71,224,0),19 }, + { IPv4(24,76,0,0),14 }, + { IPv4(24,76,0,0),19 }, + { IPv4(24,76,32,0),19 }, + { IPv4(24,76,64,0),19 }, + { IPv4(24,76,96,0),19 }, + { IPv4(24,76,128,0),19 }, + { IPv4(24,76,160,0),19 }, + { IPv4(24,76,192,0),19 }, + { IPv4(24,76,224,0),19 }, + { IPv4(24,77,0,0),19 }, + { IPv4(24,77,32,0),19 }, + { IPv4(24,77,64,0),19 }, + { IPv4(24,77,96,0),19 }, + { IPv4(24,77,128,0),19 }, + { IPv4(24,77,160,0),19 }, + { IPv4(24,77,192,0),19 }, + { IPv4(24,77,224,0),19 }, + { IPv4(24,78,0,0),19 }, + { IPv4(24,78,32,0),19 }, + { IPv4(24,78,64,0),19 }, + { IPv4(24,78,96,0),19 }, + { IPv4(24,78,128,0),19 }, + { IPv4(24,78,160,0),19 }, + { IPv4(24,78,192,0),19 }, + { IPv4(24,78,224,0),19 }, + { IPv4(24,79,0,0),19 }, + { IPv4(24,79,32,0),19 }, + { IPv4(24,79,64,0),19 }, + { IPv4(24,79,96,0),19 }, + { IPv4(24,79,128,0),19 }, + { IPv4(24,79,160,0),19 }, + { IPv4(24,79,192,0),19 }, + { IPv4(24,79,224,0),19 }, + { IPv4(24,80,0,0),19 }, + { IPv4(24,80,0,0),14 }, + { IPv4(24,80,32,0),19 }, + { IPv4(24,80,128,0),19 }, + { IPv4(24,80,160,0),19 }, + { IPv4(24,81,0,0),19 }, + { IPv4(24,81,32,0),19 }, + { IPv4(24,81,64,0),19 }, + { IPv4(24,81,96,0),19 }, + { IPv4(24,81,128,0),19 }, + { IPv4(24,81,160,0),19 }, + { IPv4(24,81,192,0),19 }, + { IPv4(24,81,224,0),19 }, + { IPv4(24,82,0,0),19 }, + { IPv4(24,82,32,0),19 }, + { IPv4(24,82,64,0),19 }, + { IPv4(24,82,96,0),19 }, + { IPv4(24,82,128,0),19 }, + { IPv4(24,82,160,0),19 }, + { IPv4(24,82,192,0),19 }, + { IPv4(24,82,224,0),19 }, + { IPv4(24,83,0,0),19 }, + { IPv4(24,83,32,0),19 }, + { IPv4(24,83,64,0),19 }, + { IPv4(24,83,96,0),19 }, + { IPv4(24,83,128,0),19 }, + { IPv4(24,88,0,0),18 }, + { IPv4(24,88,64,0),19 }, + { IPv4(24,88,96,0),19 }, + { IPv4(24,88,128,0),19 }, + { IPv4(24,88,160,0),19 }, + { IPv4(24,88,192,0),19 }, + { IPv4(24,88,224,0),20 }, + { IPv4(24,88,240,0),20 }, + { IPv4(24,90,0,0),19 }, + { IPv4(24,91,0,0),16 }, + { IPv4(24,91,64,0),19 }, + { IPv4(24,91,96,0),19 }, + { IPv4(24,91,128,0),17 }, + { IPv4(24,92,0,0),19 }, + { IPv4(24,92,32,0),19 }, + { IPv4(24,92,64,0),19 }, + { IPv4(24,92,96,0),19 }, + { IPv4(24,92,128,0),20 }, + { IPv4(24,92,144,0),20 }, + { IPv4(24,92,160,0),19 }, + { IPv4(24,92,192,0),19 }, + { IPv4(24,92,224,0),19 }, + { IPv4(24,93,0,0),19 }, + { IPv4(24,93,32,0),19 }, + { IPv4(24,93,64,0),19 }, + { IPv4(24,93,96,0),19 }, + { IPv4(24,93,128,0),19 }, + { IPv4(24,93,160,0),19 }, + { IPv4(24,93,192,0),19 }, + { IPv4(24,93,224,0),19 }, + { IPv4(24,94,0,0),19 }, + { IPv4(24,94,32,0),19 }, + { IPv4(24,94,64,0),24 }, + { IPv4(24,94,64,0),19 }, + { IPv4(24,94,96,0),19 }, + { IPv4(24,94,128,0),19 }, + { IPv4(24,94,160,0),19 }, + { IPv4(24,94,192,0),19 }, + { IPv4(24,94,224,0),19 }, + { IPv4(24,95,0,0),19 }, + { IPv4(24,95,32,0),19 }, + { IPv4(24,95,64,0),19 }, + { IPv4(24,95,96,0),19 }, + { IPv4(24,95,128,0),19 }, + { IPv4(24,95,160,0),19 }, + { IPv4(24,95,192,0),19 }, + { IPv4(24,95,224,0),19 }, + { IPv4(24,98,0,0),17 }, + { IPv4(24,98,128,0),18 }, + { IPv4(24,98,192,0),20 }, + { IPv4(24,100,0,0),15 }, + { IPv4(24,102,0,0),17 }, + { IPv4(24,102,128,0),18 }, + { IPv4(24,102,192,0),19 }, + { IPv4(24,102,224,0),19 }, + { IPv4(24,103,0,0),17 }, + { IPv4(24,103,128,0),19 }, + { IPv4(24,103,160,0),20 }, + { IPv4(24,104,0,0),21 }, + { IPv4(24,104,0,0),18 }, + { IPv4(24,104,8,0),21 }, + { IPv4(24,104,40,0),21 }, + { IPv4(24,104,48,0),21 }, + { IPv4(24,104,64,0),19 }, + { IPv4(24,104,72,0),21 }, + { IPv4(24,108,0,0),18 }, + { IPv4(24,108,64,0),19 }, + { IPv4(24,108,96,0),19 }, + { IPv4(24,108,128,0),19 }, + { IPv4(24,108,160,0),19 }, + { IPv4(24,108,192,0),19 }, + { IPv4(24,108,224,0),19 }, + { IPv4(24,109,0,0),19 }, + { IPv4(24,109,32,0),20 }, + { IPv4(24,109,32,0),21 }, + { IPv4(24,109,48,0),20 }, + { IPv4(24,109,64,0),19 }, + { IPv4(24,109,96,0),20 }, + { IPv4(24,112,0,0),16 }, + { IPv4(24,112,112,0),20 }, + { IPv4(24,114,0,0),16 }, + { IPv4(24,116,0,0),23 }, + { IPv4(24,116,0,0),24 }, + { IPv4(24,116,0,0),17 }, + { IPv4(24,116,2,0),24 }, + { IPv4(24,116,3,0),24 }, + { IPv4(24,116,4,0),24 }, + { IPv4(24,116,5,0),24 }, + { IPv4(24,116,6,0),24 }, + { IPv4(24,116,7,0),24 }, + { IPv4(24,116,8,0),24 }, + { IPv4(24,116,10,0),24 }, + { IPv4(24,116,11,0),24 }, + { IPv4(24,116,12,0),23 }, + { IPv4(24,116,14,0),24 }, + { IPv4(24,116,15,0),24 }, + { IPv4(24,116,16,0),23 }, + { IPv4(24,116,18,0),23 }, + { IPv4(24,116,18,0),24 }, + { IPv4(24,116,25,0),24 }, + { IPv4(24,116,26,0),23 }, + { IPv4(24,116,28,0),22 }, + { IPv4(24,116,32,0),23 }, + { IPv4(24,116,36,0),24 }, + { IPv4(24,116,37,0),24 }, + { IPv4(24,116,38,0),24 }, + { IPv4(24,116,39,0),24 }, + { IPv4(24,116,56,0),21 }, + { IPv4(24,116,64,0),21 }, + { IPv4(24,116,72,0),23 }, + { IPv4(24,116,74,0),23 }, + { IPv4(24,116,76,0),22 }, + { IPv4(24,116,80,0),23 }, + { IPv4(24,116,82,0),23 }, + { IPv4(24,116,84,0),22 }, + { IPv4(24,116,88,0),22 }, + { IPv4(24,116,92,0),22 }, + { IPv4(24,116,96,0),22 }, + { IPv4(24,116,100,0),23 }, + { IPv4(24,116,102,0),23 }, + { IPv4(24,116,104,0),23 }, + { IPv4(24,116,106,0),24 }, + { IPv4(24,116,107,0),24 }, + { IPv4(24,116,108,0),23 }, + { IPv4(24,116,110,0),23 }, + { IPv4(24,116,112,0),23 }, + { IPv4(24,116,114,0),23 }, + { IPv4(24,116,116,0),22 }, + { IPv4(24,116,116,0),23 }, + { IPv4(24,116,118,0),24 }, + { IPv4(24,116,120,0),22 }, + { IPv4(24,116,124,0),23 }, + { IPv4(24,116,126,0),23 }, + { IPv4(24,116,128,0),19 }, + { IPv4(24,116,128,0),20 }, + { IPv4(24,116,144,0),21 }, + { IPv4(24,116,152,0),21 }, + { IPv4(24,116,160,0),19 }, + { IPv4(24,116,162,0),23 }, + { IPv4(24,116,164,0),22 }, + { IPv4(24,116,170,0),23 }, + { IPv4(24,116,172,0),22 }, + { IPv4(24,116,176,0),24 }, + { IPv4(24,116,178,0),24 }, + { IPv4(24,116,180,0),24 }, + { IPv4(24,118,0,0),17 }, + { IPv4(24,118,128,0),20 }, + { IPv4(24,120,0,0),19 }, + { IPv4(24,120,32,0),19 }, + { IPv4(24,120,64,0),19 }, + { IPv4(24,120,96,0),19 }, + { IPv4(24,120,128,0),19 }, + { IPv4(24,120,160,0),19 }, + { IPv4(24,121,31,0),24 }, + { IPv4(24,125,0,0),18 }, + { IPv4(24,125,64,0),20 }, + { IPv4(24,126,0,0),16 }, + { IPv4(24,127,0,0),17 }, + { IPv4(24,127,128,0),18 }, + { IPv4(24,128,0,0),16 }, + { IPv4(24,128,6,0),24 }, + { IPv4(24,128,191,0),24 }, + { IPv4(24,129,0,0),17 }, + { IPv4(24,129,2,0),24 }, + { IPv4(24,129,4,0),24 }, + { IPv4(24,129,24,0),23 }, + { IPv4(24,129,26,0),24 }, + { IPv4(24,129,28,0),24 }, + { IPv4(24,129,32,0),24 }, + { IPv4(24,129,42,0),24 }, + { IPv4(24,129,128,0),18 }, + { IPv4(24,129,192,0),19 }, + { IPv4(24,130,0,0),18 }, + { IPv4(24,130,64,0),19 }, + { IPv4(24,130,96,0),19 }, + { IPv4(24,130,192,0),19 }, + { IPv4(24,131,0,0),18 }, + { IPv4(24,131,64,0),18 }, + { IPv4(24,131,128,0),18 }, + { IPv4(24,131,192,0),18 }, + { IPv4(24,136,0,0),19 }, + { IPv4(24,136,32,0),19 }, + { IPv4(24,136,64,0),20 }, + { IPv4(24,136,64,0),22 }, + { IPv4(24,136,68,0),23 }, + { IPv4(24,136,70,0),24 }, + { IPv4(24,136,128,0),21 }, + { IPv4(24,136,136,0),22 }, + { IPv4(24,136,140,0),22 }, + { IPv4(24,136,144,0),22 }, + { IPv4(24,136,150,0),23 }, + { IPv4(24,136,152,0),21 }, + { IPv4(24,136,160,0),21 }, + { IPv4(24,137,0,0),19 }, + { IPv4(24,139,0,0),19 }, + { IPv4(24,140,0,0),19 }, + { IPv4(24,140,32,0),19 }, + { IPv4(24,141,0,0),20 }, + { IPv4(24,141,0,0),16 }, + { IPv4(24,141,16,0),20 }, + { IPv4(24,141,32,0),20 }, + { IPv4(24,141,48,0),20 }, + { IPv4(24,141,80,0),20 }, + { IPv4(24,141,96,0),20 }, + { IPv4(24,141,112,0),20 }, + { IPv4(24,141,128,0),20 }, + { IPv4(24,141,144,0),20 }, + { IPv4(24,141,160,0),20 }, + { IPv4(24,141,192,0),20 }, + { IPv4(24,141,224,0),20 }, + { IPv4(24,141,240,0),20 }, + { IPv4(24,142,32,0),19 }, + { IPv4(24,142,40,0),22 }, + { IPv4(24,142,44,0),22 }, + { IPv4(24,142,76,0),23 }, + { IPv4(24,142,88,0),23 }, + { IPv4(24,142,92,0),22 }, + { IPv4(24,142,96,0),22 }, + { IPv4(24,142,100,0),23 }, + { IPv4(24,142,160,0),19 }, + { IPv4(24,142,178,0),24 }, + { IPv4(24,142,192,0),18 }, + { IPv4(24,142,205,0),24 }, + { IPv4(24,145,128,0),19 }, + { IPv4(24,145,128,0),21 }, + { IPv4(24,145,136,0),22 }, + { IPv4(24,145,140,0),22 }, + { IPv4(24,145,144,0),21 }, + { IPv4(24,145,152,0),22 }, + { IPv4(24,145,156,0),23 }, + { IPv4(24,145,158,0),23 }, + { IPv4(24,145,160,0),20 }, + { IPv4(24,145,168,0),21 }, + { IPv4(24,147,0,0),16 }, + { IPv4(24,148,0,0),18 }, + { IPv4(24,148,64,0),19 }, + { IPv4(24,150,0,0),20 }, + { IPv4(24,150,0,0),16 }, + { IPv4(24,150,16,0),20 }, + { IPv4(24,150,48,0),20 }, + { IPv4(24,150,64,0),20 }, + { IPv4(24,150,80,0),20 }, + { IPv4(24,150,96,0),20 }, + { IPv4(24,150,112,0),20 }, + { IPv4(24,150,128,0),20 }, + { IPv4(24,150,144,0),20 }, + { IPv4(24,150,160,0),20 }, + { IPv4(24,150,176,0),20 }, + { IPv4(24,150,224,0),20 }, + { IPv4(24,150,240,0),20 }, + { IPv4(24,151,0,0),19 }, + { IPv4(24,151,32,0),20 }, + { IPv4(24,151,32,0),19 }, + { IPv4(24,151,40,0),22 }, + { IPv4(24,151,44,0),23 }, + { IPv4(24,151,46,0),23 }, + { IPv4(24,151,48,0),24 }, + { IPv4(24,151,48,0),20 }, + { IPv4(24,151,49,0),24 }, + { IPv4(24,151,50,0),23 }, + { IPv4(24,151,52,0),22 }, + { IPv4(24,151,60,0),23 }, + { IPv4(24,151,62,0),24 }, + { IPv4(24,151,63,0),24 }, + { IPv4(24,151,64,0),20 }, + { IPv4(24,151,80,0),21 }, + { IPv4(24,151,88,0),22 }, + { IPv4(24,151,92,0),22 }, + { IPv4(24,153,0,0),18 }, + { IPv4(24,155,0,0),19 }, + { IPv4(24,155,9,0),24 }, + { IPv4(24,155,10,0),24 }, + { IPv4(24,156,0,0),19 }, + { IPv4(24,156,0,0),16 }, + { IPv4(24,157,0,0),19 }, + { IPv4(24,157,0,0),18 }, + { IPv4(24,157,0,0),17 }, + { IPv4(24,157,64,0),19 }, + { IPv4(24,157,128,0),18 }, + { IPv4(24,157,192,0),19 }, + { IPv4(24,157,224,0),19 }, + { IPv4(24,158,32,0),19 }, + { IPv4(24,158,64,0),20 }, + { IPv4(24,158,80,0),20 }, + { IPv4(24,158,96,0),19 }, + { IPv4(24,158,128,0),20 }, + { IPv4(24,158,192,0),20 }, + { IPv4(24,158,240,0),20 }, + { IPv4(24,159,0,0),20 }, + { IPv4(24,159,32,0),20 }, + { IPv4(24,159,48,0),20 }, + { IPv4(24,159,64,0),21 }, + { IPv4(24,159,64,0),20 }, + { IPv4(24,159,72,0),21 }, + { IPv4(24,159,80,0),20 }, + { IPv4(24,159,96,0),20 }, + { IPv4(24,159,112,0),20 }, + { IPv4(24,159,128,0),20 }, + { IPv4(24,159,164,0),22 }, + { IPv4(24,159,168,0),23 }, + { IPv4(24,159,170,0),23 }, + { IPv4(24,159,172,0),23 }, + { IPv4(24,159,175,0),24 }, + { IPv4(24,159,176,0),20 }, + { IPv4(24,159,208,0),20 }, + { IPv4(24,160,0,0),19 }, + { IPv4(24,160,32,0),20 }, + { IPv4(24,160,48,0),20 }, + { IPv4(24,160,64,0),18 }, + { IPv4(24,160,128,0),19 }, + { IPv4(24,160,160,0),19 }, + { IPv4(24,160,192,0),19 }, + { IPv4(24,160,224,0),19 }, + { IPv4(24,161,0,0),18 }, + { IPv4(24,161,64,0),19 }, + { IPv4(24,161,96,0),19 }, + { IPv4(24,161,128,0),19 }, + { IPv4(24,161,160,0),19 }, + { IPv4(24,161,192,0),19 }, + { IPv4(24,161,224,0),19 }, + { IPv4(24,162,0,0),18 }, + { IPv4(24,162,64,0),19 }, + { IPv4(24,162,96,0),19 }, + { IPv4(24,162,128,0),19 }, + { IPv4(24,162,160,0),19 }, + { IPv4(24,162,192,0),19 }, + { IPv4(24,162,224,0),19 }, + { IPv4(24,163,0,0),19 }, + { IPv4(24,163,32,0),20 }, + { IPv4(24,163,48,0),20 }, + { IPv4(24,163,64,0),19 }, + { IPv4(24,163,96,0),19 }, + { IPv4(24,163,128,0),20 }, + { IPv4(24,163,144,0),20 }, + { IPv4(24,163,160,0),19 }, + { IPv4(24,163,192,0),19 }, + { IPv4(24,163,224,0),20 }, + { IPv4(24,163,240,0),20 }, + { IPv4(24,164,0,0),19 }, + { IPv4(24,164,0,0),18 }, + { IPv4(24,164,32,0),19 }, + { IPv4(24,164,64,0),19 }, + { IPv4(24,164,96,0),19 }, + { IPv4(24,164,128,0),19 }, + { IPv4(24,164,160,0),20 }, + { IPv4(24,164,176,0),20 }, + { IPv4(24,164,192,0),19 }, + { IPv4(24,164,224,0),19 }, + { IPv4(24,165,0,0),19 }, + { IPv4(24,165,32,0),19 }, + { IPv4(24,165,64,0),20 }, + { IPv4(24,165,80,0),20 }, + { IPv4(24,165,96,0),20 }, + { IPv4(24,165,112,0),20 }, + { IPv4(24,165,128,0),18 }, + { IPv4(24,165,192,0),19 }, + { IPv4(24,165,224,0),19 }, + { IPv4(24,166,0,0),19 }, + { IPv4(24,166,0,0),18 }, + { IPv4(24,166,32,0),19 }, + { IPv4(24,166,64,0),18 }, + { IPv4(24,166,128,0),19 }, + { IPv4(24,166,160,0),19 }, + { IPv4(24,166,192,0),19 }, + { IPv4(24,166,224,0),20 }, + { IPv4(24,166,240,0),20 }, + { IPv4(24,167,0,0),18 }, + { IPv4(24,167,64,0),19 }, + { IPv4(24,167,96,0),19 }, + { IPv4(24,167,128,0),19 }, + { IPv4(24,167,160,0),19 }, + { IPv4(24,167,192,0),19 }, + { IPv4(24,167,224,0),19 }, + { IPv4(24,168,0,0),18 }, + { IPv4(24,168,64,0),19 }, + { IPv4(24,168,96,0),19 }, + { IPv4(24,168,128,0),19 }, + { IPv4(24,168,192,0),19 }, + { IPv4(24,168,224,0),19 }, + { IPv4(24,169,0,0),19 }, + { IPv4(24,169,32,0),19 }, + { IPv4(24,169,64,0),19 }, + { IPv4(24,169,96,0),19 }, + { IPv4(24,169,128,0),19 }, + { IPv4(24,169,160,0),20 }, + { IPv4(24,169,176,0),20 }, + { IPv4(24,169,192,0),20 }, + { IPv4(24,169,208,0),20 }, + { IPv4(24,169,224,0),20 }, + { IPv4(24,169,240,0),20 }, + { IPv4(24,170,0,0),19 }, + { IPv4(24,170,32,0),19 }, + { IPv4(24,170,64,0),19 }, + { IPv4(24,170,96,0),19 }, + { IPv4(24,170,128,0),20 }, + { IPv4(24,170,144,0),21 }, + { IPv4(24,170,152,0),22 }, + { IPv4(24,170,156,0),23 }, + { IPv4(24,170,158,0),23 }, + { IPv4(24,176,0,0),17 }, + { IPv4(24,176,0,0),13 }, + { IPv4(24,176,0,0),14 }, + { IPv4(24,176,128,0),17 }, + { IPv4(24,177,0,0),17 }, + { IPv4(24,177,128,0),17 }, + { IPv4(24,178,0,0),16 }, + { IPv4(24,179,0,0),17 }, + { IPv4(24,179,128,0),17 }, + { IPv4(24,180,0,0),15 }, + { IPv4(24,180,0,0),17 }, + { IPv4(24,180,128,0),17 }, + { IPv4(24,181,0,0),17 }, + { IPv4(24,181,128,0),17 }, + { IPv4(24,182,0,0),16 }, + { IPv4(24,183,0,0),16 }, + { IPv4(24,196,16,0),20 }, + { IPv4(24,196,32,0),20 }, + { IPv4(24,196,48,0),20 }, + { IPv4(24,196,160,0),20 }, + { IPv4(24,196,176,0),20 }, + { IPv4(24,196,200,0),21 }, + { IPv4(24,196,224,0),20 }, + { IPv4(24,196,241,0),24 }, + { IPv4(24,196,244,0),22 }, + { IPv4(24,196,252,0),22 }, + { IPv4(24,197,0,0),24 }, + { IPv4(24,197,2,0),24 }, + { IPv4(24,197,4,0),22 }, + { IPv4(24,197,8,0),22 }, + { IPv4(24,197,12,0),22 }, + { IPv4(24,197,16,0),22 }, + { IPv4(24,197,32,0),19 }, + { IPv4(24,197,64,0),19 }, + { IPv4(24,197,96,0),21 }, + { IPv4(24,197,104,0),23 }, + { IPv4(24,197,112,0),20 }, + { IPv4(24,197,128,0),20 }, + { IPv4(24,198,0,0),18 }, + { IPv4(24,198,64,0),19 }, + { IPv4(24,198,96,0),20 }, + { IPv4(24,204,0,0),17 }, + { IPv4(24,206,0,0),20 }, + { IPv4(24,206,64,0),19 }, + { IPv4(24,206,160,0),20 }, + { IPv4(24,207,0,0),18 }, + { IPv4(24,207,128,0),18 }, + { IPv4(24,208,0,0),18 }, + { IPv4(24,213,0,0),21 }, + { IPv4(24,213,8,0),22 }, + { IPv4(24,213,12,0),22 }, + { IPv4(24,213,20,0),22 }, + { IPv4(24,213,24,0),22 }, + { IPv4(24,213,28,0),22 }, + { IPv4(24,213,32,0),19 }, + { IPv4(24,213,60,0),24 }, + { IPv4(24,214,0,0),18 }, + { IPv4(24,214,1,0),24 }, + { IPv4(24,214,3,0),24 }, + { IPv4(24,214,4,0),24 }, + { IPv4(24,214,5,0),24 }, + { IPv4(24,214,6,0),24 }, + { IPv4(24,214,7,0),24 }, + { IPv4(24,214,8,0),24 }, + { IPv4(24,214,9,0),24 }, + { IPv4(24,214,10,0),24 }, + { IPv4(24,214,11,0),24 }, + { IPv4(24,214,12,0),24 }, + { IPv4(24,214,13,0),24 }, + { IPv4(24,214,14,0),24 }, + { IPv4(24,214,15,0),24 }, + { IPv4(24,214,16,0),24 }, + { IPv4(24,214,17,0),24 }, + { IPv4(24,214,18,0),24 }, + { IPv4(24,214,19,0),24 }, + { IPv4(24,214,20,0),24 }, + { IPv4(24,214,21,0),24 }, + { IPv4(24,214,22,0),24 }, + { IPv4(24,214,23,0),24 }, + { IPv4(24,214,24,0),24 }, + { IPv4(24,214,25,0),24 }, + { IPv4(24,214,26,0),24 }, + { IPv4(24,214,27,0),24 }, + { IPv4(24,214,28,0),24 }, + { IPv4(24,214,29,0),24 }, + { IPv4(24,214,30,0),24 }, + { IPv4(24,214,31,0),24 }, + { IPv4(24,214,32,0),24 }, + { IPv4(24,214,33,0),24 }, + { IPv4(24,214,34,0),24 }, + { IPv4(24,214,35,0),24 }, + { IPv4(24,214,36,0),24 }, + { IPv4(24,214,37,0),24 }, + { IPv4(24,214,38,0),24 }, + { IPv4(24,214,39,0),24 }, + { IPv4(24,214,40,0),24 }, + { IPv4(24,214,41,0),24 }, + { IPv4(24,214,42,0),24 }, + { IPv4(24,214,43,0),24 }, + { IPv4(24,214,44,0),24 }, + { IPv4(24,214,45,0),24 }, + { IPv4(24,214,46,0),24 }, + { IPv4(24,214,47,0),24 }, + { IPv4(24,214,48,0),24 }, + { IPv4(24,214,49,0),24 }, + { IPv4(24,214,50,0),24 }, + { IPv4(24,214,51,0),24 }, + { IPv4(24,214,52,0),24 }, + { IPv4(24,214,53,0),24 }, + { IPv4(24,214,54,0),24 }, + { IPv4(24,214,55,0),24 }, + { IPv4(24,214,56,0),24 }, + { IPv4(24,214,57,0),24 }, + { IPv4(24,214,58,0),24 }, + { IPv4(24,214,59,0),24 }, + { IPv4(24,214,60,0),24 }, + { IPv4(24,214,61,0),24 }, + { IPv4(24,214,62,0),24 }, + { IPv4(24,214,63,0),24 }, + { IPv4(24,214,64,0),24 }, + { IPv4(24,214,65,0),24 }, + { IPv4(24,214,66,0),24 }, + { IPv4(24,214,67,0),24 }, + { IPv4(24,214,68,0),24 }, + { IPv4(24,214,69,0),24 }, + { IPv4(24,214,70,0),24 }, + { IPv4(24,214,71,0),24 }, + { IPv4(24,214,74,0),24 }, + { IPv4(24,214,75,0),24 }, + { IPv4(24,214,78,0),24 }, + { IPv4(24,214,80,0),24 }, + { IPv4(24,214,81,0),24 }, + { IPv4(24,214,82,0),24 }, + { IPv4(24,214,86,0),24 }, + { IPv4(24,214,87,0),24 }, + { IPv4(24,214,90,0),24 }, + { IPv4(24,214,91,0),24 }, + { IPv4(24,214,93,0),24 }, + { IPv4(24,214,94,0),24 }, + { IPv4(24,214,96,0),24 }, + { IPv4(24,214,97,0),24 }, + { IPv4(24,214,98,0),24 }, + { IPv4(24,214,99,0),24 }, + { IPv4(24,214,102,0),24 }, + { IPv4(24,214,103,0),24 }, + { IPv4(24,214,105,0),24 }, + { IPv4(24,214,108,0),24 }, + { IPv4(24,214,109,0),24 }, + { IPv4(24,214,113,0),24 }, + { IPv4(24,214,114,0),24 }, + { IPv4(24,214,115,0),24 }, + { IPv4(24,214,120,0),24 }, + { IPv4(24,214,121,0),24 }, + { IPv4(24,214,122,0),24 }, + { IPv4(24,214,126,0),24 }, + { IPv4(24,214,128,0),19 }, + { IPv4(24,214,133,0),24 }, + { IPv4(24,214,134,0),24 }, + { IPv4(24,215,0,0),19 }, + { IPv4(24,215,16,0),20 }, + { IPv4(24,215,24,0),21 }, + { IPv4(24,215,32,0),20 }, + { IPv4(24,216,10,0),24 }, + { IPv4(24,216,141,0),24 }, + { IPv4(24,216,184,0),24 }, + { IPv4(24,216,241,0),24 }, + { IPv4(24,216,252,0),24 }, + { IPv4(24,216,253,0),24 }, + { IPv4(24,216,254,0),24 }, + { IPv4(24,216,255,0),24 }, + { IPv4(24,217,0,0),16 }, + { IPv4(24,218,0,0),16 }, + { IPv4(24,218,188,0),22 }, + { IPv4(24,221,208,0),20 }, + { IPv4(24,222,112,0),20 }, + { IPv4(24,223,0,0),18 }, + { IPv4(24,223,64,0),20 }, + { IPv4(24,226,0,0),17 }, + { IPv4(24,226,32,0),20 }, + { IPv4(24,226,48,0),20 }, + { IPv4(24,226,64,0),20 }, + { IPv4(24,226,80,0),20 }, + { IPv4(24,226,96,0),20 }, + { IPv4(24,226,112,0),20 }, + { IPv4(24,227,0,0),19 }, + { IPv4(24,228,0,0),18 }, + { IPv4(24,228,64,0),19 }, + { IPv4(24,229,0,0),17 }, + { IPv4(24,229,128,0),19 }, + { IPv4(24,229,160,0),20 }, + { IPv4(24,234,0,0),19 }, + { IPv4(24,234,32,0),19 }, + { IPv4(24,234,64,0),19 }, + { IPv4(24,234,96,0),19 }, + { IPv4(24,234,128,0),19 }, + { IPv4(24,234,160,0),19 }, + { IPv4(24,234,192,0),19 }, + { IPv4(24,234,224,0),19 }, + { IPv4(24,236,0,0),19 }, + { IPv4(24,240,12,0),24 }, + { IPv4(24,240,26,0),24 }, + { IPv4(24,240,97,0),24 }, + { IPv4(24,240,100,0),24 }, + { IPv4(24,240,119,0),24 }, + { IPv4(24,240,122,0),24 }, + { IPv4(24,240,144,0),24 }, + { IPv4(24,240,145,0),24 }, + { IPv4(24,240,146,0),24 }, + { IPv4(24,240,147,0),24 }, + { IPv4(24,240,148,0),24 }, + { IPv4(24,240,149,0),24 }, + { IPv4(24,240,180,0),24 }, + { IPv4(24,240,186,0),24 }, + { IPv4(24,240,194,0),24 }, + { IPv4(24,240,199,0),24 }, + { IPv4(24,240,207,0),24 }, + { IPv4(24,240,213,0),24 }, + { IPv4(24,240,229,0),24 }, + { IPv4(24,240,230,0),24 }, + { IPv4(24,240,232,0),24 }, + { IPv4(24,240,233,0),24 }, + { IPv4(24,240,234,0),24 }, + { IPv4(24,240,235,0),24 }, + { IPv4(24,240,236,0),24 }, + { IPv4(24,240,242,0),24 }, + { IPv4(24,240,243,0),24 }, + { IPv4(24,240,244,0),24 }, + { IPv4(24,241,54,0),24 }, + { IPv4(24,241,71,0),24 }, + { IPv4(24,241,88,0),24 }, + { IPv4(24,241,105,0),24 }, + { IPv4(24,241,111,0),24 }, + { IPv4(24,241,120,0),24 }, + { IPv4(24,241,128,0),24 }, + { IPv4(24,241,135,0),24 }, + { IPv4(24,241,154,0),24 }, + { IPv4(24,241,167,0),24 }, + { IPv4(24,241,185,0),24 }, + { IPv4(24,242,0,0),19 }, + { IPv4(24,242,32,0),19 }, + { IPv4(24,242,64,0),19 }, + { IPv4(24,242,96,0),19 }, + { IPv4(24,242,128,0),20 }, + { IPv4(24,242,144,0),20 }, + { IPv4(24,242,160,0),20 }, + { IPv4(24,242,176,0),20 }, + { IPv4(24,244,0,0),20 }, + { IPv4(24,244,16,0),20 }, + { IPv4(24,245,0,0),18 }, + { IPv4(24,245,64,0),20 }, + { IPv4(24,246,0,0),17 }, + { IPv4(24,246,9,0),24 }, + { IPv4(24,246,10,0),23 }, + { IPv4(24,246,12,0),22 }, + { IPv4(24,246,16,0),23 }, + { IPv4(24,246,38,0),24 }, + { IPv4(24,246,60,0),24 }, + { IPv4(24,246,122,0),24 }, + { IPv4(24,246,128,0),18 }, + { IPv4(24,247,0,0),20 }, + { IPv4(24,247,16,0),21 }, + { IPv4(24,247,32,0),20 }, + { IPv4(24,247,48,0),20 }, + { IPv4(24,247,48,0),21 }, + { IPv4(24,247,64,0),20 }, + { IPv4(24,247,96,0),20 }, + { IPv4(24,247,112,0),20 }, + { IPv4(24,247,128,0),20 }, + { IPv4(24,247,144,0),20 }, + { IPv4(24,247,152,0),22 }, + { IPv4(24,247,176,0),20 }, + { IPv4(24,248,0,0),17 }, + { IPv4(24,248,0,0),13 }, + { IPv4(24,248,128,0),17 }, + { IPv4(24,249,0,0),17 }, + { IPv4(24,249,128,0),17 }, + { IPv4(24,250,0,0),18 }, + { IPv4(24,250,64,0),18 }, + { IPv4(24,250,128,0),18 }, + { IPv4(24,250,192,0),19 }, + { IPv4(24,250,224,0),19 }, + { IPv4(24,251,0,0),17 }, + { IPv4(24,251,128,0),18 }, + { IPv4(24,251,192,0),18 }, + { IPv4(24,252,0,0),17 }, + { IPv4(24,252,128,0),17 }, + { IPv4(24,253,0,0),17 }, + { IPv4(24,253,128,0),17 }, + { IPv4(24,254,0,0),17 }, + { IPv4(24,254,128,0),17 }, + { IPv4(24,255,0,0),17 }, + { IPv4(24,255,128,0),17 }, + { IPv4(32,0,0,0),8 }, + { IPv4(32,96,0,0),13 }, + { IPv4(32,96,43,0),24 }, + { IPv4(32,96,48,0),24 }, + { IPv4(32,96,62,0),24 }, + { IPv4(32,96,83,0),24 }, + { IPv4(32,96,86,0),24 }, + { IPv4(32,96,111,0),24 }, + { IPv4(32,96,224,0),19 }, + { IPv4(32,97,17,0),24 }, + { IPv4(32,97,80,0),21 }, + { IPv4(32,97,87,0),24 }, + { IPv4(32,97,91,0),24 }, + { IPv4(32,97,100,0),24 }, + { IPv4(32,97,104,0),24 }, + { IPv4(32,97,110,0),24 }, + { IPv4(32,97,132,0),24 }, + { IPv4(32,97,135,0),24 }, + { IPv4(32,97,136,0),24 }, + { IPv4(32,97,152,0),24 }, + { IPv4(32,97,155,0),24 }, + { IPv4(32,97,159,0),24 }, + { IPv4(32,97,167,0),24 }, + { IPv4(32,97,168,0),23 }, + { IPv4(32,97,170,0),24 }, + { IPv4(32,97,182,0),24 }, + { IPv4(32,97,183,0),24 }, + { IPv4(32,97,185,0),24 }, + { IPv4(32,97,198,0),24 }, + { IPv4(32,97,212,0),24 }, + { IPv4(32,97,217,0),24 }, + { IPv4(32,97,219,0),24 }, + { IPv4(32,97,225,0),24 }, + { IPv4(32,97,240,0),23 }, + { IPv4(32,97,242,0),24 }, + { IPv4(32,97,252,0),22 }, + { IPv4(32,102,134,0),23 }, + { IPv4(32,102,136,0),22 }, + { IPv4(32,102,140,0),23 }, + { IPv4(32,102,197,0),24 }, + { IPv4(32,102,198,0),24 }, + { IPv4(32,102,199,0),24 }, + { IPv4(32,102,200,0),24 }, + { IPv4(32,102,201,0),24 }, + { IPv4(32,102,202,0),24 }, + { IPv4(32,102,203,0),24 }, + { IPv4(32,102,204,0),24 }, + { IPv4(32,102,205,0),24 }, + { IPv4(32,102,206,0),24 }, + { IPv4(32,102,207,0),24 }, + { IPv4(32,102,208,0),24 }, + { IPv4(32,102,233,0),24 }, + { IPv4(32,102,234,0),24 }, + { IPv4(32,102,235,0),24 }, + { IPv4(32,102,236,0),24 }, + { IPv4(32,102,237,0),24 }, + { IPv4(32,102,238,0),24 }, + { IPv4(32,102,239,0),24 }, + { IPv4(32,102,240,0),24 }, + { IPv4(32,102,241,0),24 }, + { IPv4(32,102,242,0),24 }, + { IPv4(32,102,243,0),24 }, + { IPv4(32,102,244,0),24 }, + { IPv4(32,104,0,0),15 }, + { IPv4(32,107,14,0),24 }, + { IPv4(32,107,31,0),24 }, + { IPv4(32,224,112,0),24 }, + { IPv4(32,224,249,0),24 }, + { IPv4(32,227,135,0),24 }, + { IPv4(32,227,215,0),24 }, + { IPv4(32,227,216,0),24 }, + { IPv4(32,227,217,0),24 }, + { IPv4(32,227,218,0),24 }, + { IPv4(32,227,219,0),24 }, + { IPv4(32,227,220,0),24 }, + { IPv4(32,227,233,0),24 }, + { IPv4(32,227,234,0),24 }, + { IPv4(32,227,235,0),24 }, + { IPv4(32,227,236,0),24 }, + { IPv4(32,227,237,0),24 }, + { IPv4(32,227,238,0),24 }, + { IPv4(32,228,128,0),19 }, + { IPv4(32,229,0,0),18 }, + { IPv4(32,229,64,0),18 }, + { IPv4(32,229,128,0),18 }, + { IPv4(32,229,192,0),18 }, + { IPv4(33,0,0,0),8 }, + { IPv4(35,35,96,0),20 }, + { IPv4(35,35,144,0),20 }, + { IPv4(35,35,176,0),20 }, + { IPv4(38,156,161,0),24 }, + { IPv4(38,195,234,0),24 }, + { IPv4(38,233,177,0),24 }, + { IPv4(38,241,180,0),24 }, + { IPv4(38,241,183,0),24 }, + { IPv4(40,0,96,0),22 }, + { IPv4(44,0,0,0),11 }, + { IPv4(44,4,129,0),24 }, + { IPv4(44,32,0,0),13 }, + { IPv4(44,40,0,0),14 }, + { IPv4(44,46,0,0),15 }, + { IPv4(44,48,0,0),12 }, + { IPv4(44,64,0,0),10 }, + { IPv4(44,128,0,0),9 }, + { IPv4(44,166,0,0),16 }, + { IPv4(47,8,0,0),14 }, + { IPv4(47,46,0,0),15 }, + { IPv4(47,46,48,0),20 }, + { IPv4(47,46,160,0),19 }, + { IPv4(47,46,192,0),20 }, + { IPv4(47,46,208,0),20 }, + { IPv4(47,46,234,0),23 }, + { IPv4(47,47,224,0),21 }, + { IPv4(47,47,240,0),21 }, + { IPv4(47,153,64,0),18 }, + { IPv4(47,153,128,0),18 }, + { IPv4(47,249,0,0),16 }, + { IPv4(47,249,128,0),17 }, + { IPv4(53,244,0,0),19 }, + { IPv4(55,0,0,0),8 }, + { IPv4(56,0,64,0),19 }, + { IPv4(56,0,128,0),18 }, + { IPv4(56,0,128,0),19 }, + { IPv4(56,0,160,0),19 }, + { IPv4(61,6,0,0),17 }, + { IPv4(61,6,128,0),18 }, + { IPv4(61,8,0,0),19 }, + { IPv4(61,8,30,0),24 }, + { IPv4(61,8,96,0),19 }, + { IPv4(61,8,242,0),24 }, + { IPv4(61,8,243,0),24 }, + { IPv4(61,8,244,0),24 }, + { IPv4(61,8,245,0),24 }, + { IPv4(61,8,246,0),24 }, + { IPv4(61,8,247,0),24 }, + { IPv4(61,8,248,0),24 }, + { IPv4(61,8,249,0),24 }, + { IPv4(61,8,250,0),24 }, + { IPv4(61,8,251,0),24 }, + { IPv4(61,9,0,0),17 }, + { IPv4(61,9,73,0),24 }, + { IPv4(61,9,74,0),24 }, + { IPv4(61,9,75,0),24 }, + { IPv4(61,9,76,0),24 }, + { IPv4(61,9,77,0),24 }, + { IPv4(61,9,78,0),24 }, + { IPv4(61,9,112,0),24 }, + { IPv4(61,9,126,0),24 }, + { IPv4(61,10,0,0),17 }, + { IPv4(61,10,128,0),17 }, + { IPv4(61,11,0,0),19 }, + { IPv4(61,11,12,0),22 }, + { IPv4(61,11,24,0),21 }, + { IPv4(61,11,32,0),20 }, + { IPv4(61,11,36,0),22 }, + { IPv4(61,11,48,0),21 }, + { IPv4(61,13,0,0),16 }, + { IPv4(61,14,32,0),22 }, + { IPv4(61,15,0,0),17 }, + { IPv4(61,16,0,0),17 }, + { IPv4(61,18,0,0),18 }, + { IPv4(61,18,0,0),17 }, + { IPv4(61,18,64,0),18 }, + { IPv4(61,18,128,0),17 }, + { IPv4(61,18,128,0),18 }, + { IPv4(61,18,192,0),18 }, + { IPv4(61,20,0,0),16 }, + { IPv4(61,30,0,0),19 }, + { IPv4(61,30,0,0),16 }, + { IPv4(61,30,64,0),19 }, + { IPv4(61,30,128,0),20 }, + { IPv4(61,30,144,0),21 }, + { IPv4(61,30,176,0),20 }, + { IPv4(61,30,192,0),21 }, + { IPv4(61,32,0,0),13 }, + { IPv4(61,33,241,0),24 }, + { IPv4(61,33,244,0),24 }, + { IPv4(61,37,254,0),24 }, + { IPv4(61,40,0,0),14 }, + { IPv4(61,48,0,0),16 }, + { IPv4(61,56,192,0),19 }, + { IPv4(61,56,224,0),19 }, + { IPv4(61,57,128,0),20 }, + { IPv4(61,58,128,0),19 }, + { IPv4(61,59,0,0),16 }, + { IPv4(61,59,0,0),19 }, + { IPv4(61,59,0,0),18 }, + { IPv4(61,59,64,0),18 }, + { IPv4(61,59,128,0),18 }, + { IPv4(61,59,192,0),18 }, + { IPv4(61,60,0,0),19 }, + { IPv4(61,61,0,0),21 }, + { IPv4(61,61,8,0),21 }, + { IPv4(61,61,16,0),21 }, + { IPv4(61,61,24,0),21 }, + { IPv4(61,61,32,0),20 }, + { IPv4(61,61,48,0),21 }, + { IPv4(61,61,48,0),20 }, + { IPv4(61,61,56,0),21 }, + { IPv4(61,68,0,0),15 }, + { IPv4(61,70,0,0),16 }, + { IPv4(61,71,0,0),17 }, + { IPv4(61,72,0,0),13 }, + { IPv4(61,72,102,0),23 }, + { IPv4(61,72,104,0),21 }, + { IPv4(61,73,64,0),24 }, + { IPv4(61,73,152,0),24 }, + { IPv4(61,78,50,0),24 }, + { IPv4(61,78,74,0),24 }, + { IPv4(61,78,126,0),24 }, + { IPv4(61,78,127,0),24 }, + { IPv4(61,78,128,0),24 }, + { IPv4(61,80,0,0),14 }, + { IPv4(61,84,0,0),15 }, + { IPv4(61,96,0,0),17 }, + { IPv4(61,96,20,0),22 }, + { IPv4(61,96,66,0),23 }, + { IPv4(61,96,68,0),22 }, + { IPv4(61,96,72,0),22 }, + { IPv4(61,96,96,0),21 }, + { IPv4(61,96,108,0),22 }, + { IPv4(61,96,116,0),22 }, + { IPv4(61,96,124,0),22 }, + { IPv4(61,114,64,0),20 }, + { IPv4(61,114,80,0),20 }, + { IPv4(61,114,128,0),19 }, + { IPv4(61,115,208,0),20 }, + { IPv4(61,115,240,0),20 }, + { IPv4(61,117,0,0),17 }, + { IPv4(61,120,0,0),17 }, + { IPv4(61,120,144,0),20 }, + { IPv4(61,120,192,0),20 }, + { IPv4(61,121,224,0),20 }, + { IPv4(61,122,48,0),20 }, + { IPv4(61,122,128,0),18 }, + { IPv4(61,122,208,0),20 }, + { IPv4(61,122,240,0),20 }, + { IPv4(61,125,160,0),20 }, + { IPv4(61,128,96,0),19 }, + { IPv4(61,128,128,0),17 }, + { IPv4(61,129,0,0),16 }, + { IPv4(61,130,0,0),17 }, + { IPv4(61,130,128,0),17 }, + { IPv4(61,131,0,0),17 }, + { IPv4(61,131,128,0),17 }, + { IPv4(61,132,0,0),17 }, + { IPv4(61,132,128,0),17 }, + { IPv4(61,133,0,0),17 }, + { IPv4(61,133,128,0),18 }, + { IPv4(61,133,192,0),19 }, + { IPv4(61,133,224,0),19 }, + { IPv4(61,134,0,0),18 }, + { IPv4(61,134,128,0),18 }, + { IPv4(61,134,192,0),18 }, + { IPv4(61,135,0,0),17 }, + { IPv4(61,135,128,0),19 }, + { IPv4(61,136,0,0),18 }, + { IPv4(61,136,64,0),18 }, + { IPv4(61,136,128,0),18 }, + { IPv4(61,137,0,0),17 }, + { IPv4(61,137,128,0),17 }, + { IPv4(61,138,0,0),18 }, + { IPv4(61,138,64,0),18 }, + { IPv4(61,138,128,0),18 }, + { IPv4(61,138,192,0),19 }, + { IPv4(61,138,224,0),19 }, + { IPv4(61,139,0,0),17 }, + { IPv4(61,139,128,0),18 }, + { IPv4(61,139,128,0),17 }, + { IPv4(61,139,192,0),18 }, + { IPv4(61,140,0,0),14 }, + { IPv4(61,144,0,0),15 }, + { IPv4(61,146,0,0),16 }, + { IPv4(61,147,0,0),16 }, + { IPv4(61,148,0,0),15 }, + { IPv4(61,150,0,0),17 }, + { IPv4(61,150,128,0),17 }, + { IPv4(61,151,0,0),16 }, + { IPv4(61,152,0,0),16 }, + { IPv4(61,153,0,0),16 }, + { IPv4(61,154,0,0),16 }, + { IPv4(61,155,0,0),16 }, + { IPv4(61,156,0,0),16 }, + { IPv4(61,157,0,0),16 }, + { IPv4(61,158,0,0),17 }, + { IPv4(61,158,128,0),17 }, + { IPv4(61,159,0,0),18 }, + { IPv4(61,159,64,0),18 }, + { IPv4(61,159,128,0),18 }, + { IPv4(61,159,192,0),18 }, + { IPv4(61,160,0,0),16 }, + { IPv4(61,161,0,0),18 }, + { IPv4(61,161,128,0),17 }, + { IPv4(61,163,0,0),16 }, + { IPv4(61,164,0,0),16 }, + { IPv4(61,165,0,0),16 }, + { IPv4(61,166,0,0),16 }, + { IPv4(61,167,0,0),17 }, + { IPv4(61,167,0,0),16 }, + { IPv4(61,167,128,0),17 }, + { IPv4(61,168,0,0),16 }, + { IPv4(61,169,0,0),16 }, + { IPv4(61,170,0,0),16 }, + { IPv4(61,171,0,0),16 }, + { IPv4(61,172,0,0),16 }, + { IPv4(61,172,0,0),15 }, + { IPv4(61,173,0,0),16 }, + { IPv4(61,174,0,0),16 }, + { IPv4(61,175,0,0),16 }, + { IPv4(61,176,0,0),16 }, + { IPv4(61,177,0,0),16 }, + { IPv4(61,178,0,0),16 }, + { IPv4(61,179,0,0),16 }, + { IPv4(61,180,0,0),17 }, + { IPv4(61,180,128,0),17 }, + { IPv4(61,181,0,0),16 }, + { IPv4(61,182,0,0),16 }, + { IPv4(61,183,0,0),16 }, + { IPv4(61,184,0,0),16 }, + { IPv4(61,185,0,0),16 }, + { IPv4(61,186,0,0),17 }, + { IPv4(61,186,64,0),18 }, + { IPv4(61,186,128,0),17 }, + { IPv4(61,187,0,0),16 }, + { IPv4(61,188,0,0),16 }, + { IPv4(61,189,0,0),17 }, + { IPv4(61,189,128,0),17 }, + { IPv4(61,190,0,0),16 }, + { IPv4(61,193,0,0),17 }, + { IPv4(61,193,144,0),20 }, + { IPv4(61,195,48,0),21 }, + { IPv4(61,195,64,0),20 }, + { IPv4(61,195,96,0),19 }, + { IPv4(61,195,128,0),20 }, + { IPv4(61,195,224,0),20 }, + { IPv4(61,198,16,0),20 }, + { IPv4(61,198,64,0),19 }, + { IPv4(61,198,128,0),17 }, + { IPv4(61,200,80,0),20 }, + { IPv4(61,200,128,0),17 }, + { IPv4(61,202,0,0),17 }, + { IPv4(61,202,128,0),18 }, + { IPv4(61,203,0,0),17 }, + { IPv4(61,203,176,0),20 }, + { IPv4(61,203,192,0),19 }, + { IPv4(61,204,0,0),17 }, + { IPv4(61,205,0,0),20 }, + { IPv4(61,205,64,0),20 }, + { IPv4(61,205,80,0),20 }, + { IPv4(61,205,96,0),20 }, + { IPv4(61,205,112,0),20 }, + { IPv4(61,206,0,0),20 }, + { IPv4(61,206,96,0),20 }, + { IPv4(61,206,112,0),20 }, + { IPv4(61,206,224,0),20 }, + { IPv4(61,211,128,0),20 }, + { IPv4(61,211,128,0),23 }, + { IPv4(61,211,130,0),24 }, + { IPv4(61,211,176,0),20 }, + { IPv4(61,213,128,0),20 }, + { IPv4(61,213,144,0),20 }, + { IPv4(61,213,160,0),19 }, + { IPv4(61,213,192,0),21 }, + { IPv4(61,213,208,0),20 }, + { IPv4(61,213,240,0),20 }, + { IPv4(61,215,176,0),20 }, + { IPv4(61,215,208,0),22 }, + { IPv4(61,215,240,0),20 }, + { IPv4(61,216,0,0),18 }, + { IPv4(61,216,64,0),18 }, + { IPv4(61,217,0,0),16 }, + { IPv4(61,220,0,0),16 }, + { IPv4(61,226,0,0),16 }, + { IPv4(61,227,0,0),16 }, + { IPv4(61,248,0,0),17 }, + { IPv4(61,248,0,0),16 }, + { IPv4(61,248,128,0),17 }, + { IPv4(61,250,0,0),18 }, + { IPv4(61,250,128,0),18 }, + { IPv4(61,251,0,0),20 }, + { IPv4(61,251,48,0),20 }, + { IPv4(61,251,128,0),20 }, + { IPv4(61,251,144,0),20 }, + { IPv4(61,251,160,0),20 }, + { IPv4(61,251,224,0),19 }, + { IPv4(61,252,0,0),21 }, + { IPv4(61,252,8,0),22 }, + { IPv4(61,252,12,0),23 }, + { IPv4(61,252,14,0),23 }, + { IPv4(61,252,32,0),19 }, + { IPv4(61,252,128,0),19 }, + { IPv4(61,252,192,0),19 }, + { IPv4(61,253,0,0),17 }, + { IPv4(61,254,0,0),15 }, + { IPv4(62,1,0,0),16 }, + { IPv4(62,2,0,0),16 }, + { IPv4(62,3,0,0),19 }, + { IPv4(62,4,64,0),19 }, + { IPv4(62,5,0,0),17 }, + { IPv4(62,6,0,0),16 }, + { IPv4(62,7,0,0),16 }, + { IPv4(62,8,0,0),19 }, + { IPv4(62,8,10,0),24 }, + { IPv4(62,8,10,0),23 }, + { IPv4(62,8,11,0),24 }, + { IPv4(62,12,0,0),19 }, + { IPv4(62,13,192,0),19 }, + { IPv4(62,14,0,0),15 }, + { IPv4(62,28,0,0),19 }, + { IPv4(62,29,0,0),17 }, + { IPv4(62,30,0,0),15 }, + { IPv4(62,38,0,0),16 }, + { IPv4(62,40,128,0),17 }, + { IPv4(62,42,0,0),16 }, + { IPv4(62,48,0,0),19 }, + { IPv4(62,48,64,0),19 }, + { IPv4(62,48,96,0),19 }, + { IPv4(62,49,0,0),16 }, + { IPv4(62,56,0,0),17 }, + { IPv4(62,58,0,0),15 }, + { IPv4(62,72,64,0),19 }, + { IPv4(62,74,0,0),21 }, + { IPv4(62,74,12,0),22 }, + { IPv4(62,74,16,0),20 }, + { IPv4(62,74,32,0),19 }, + { IPv4(62,74,64,0),18 }, + { IPv4(62,74,128,0),18 }, + { IPv4(62,74,192,0),19 }, + { IPv4(62,74,240,0),20 }, + { IPv4(62,77,0,0),19 }, + { IPv4(62,80,64,0),20 }, + { IPv4(62,80,80,0),20 }, + { IPv4(62,97,145,0),24 }, + { IPv4(62,100,0,0),18 }, + { IPv4(62,102,0,0),17 }, + { IPv4(62,103,0,0),16 }, + { IPv4(62,104,56,0),24 }, + { IPv4(62,104,174,0),24 }, + { IPv4(62,108,64,0),19 }, + { IPv4(62,108,192,0),19 }, + { IPv4(62,111,0,0),17 }, + { IPv4(62,113,0,0),19 }, + { IPv4(62,116,128,0),19 }, + { IPv4(62,128,192,0),20 }, + { IPv4(62,128,208,0),20 }, + { IPv4(62,129,128,0),19 }, + { IPv4(62,131,0,0),16 }, + { IPv4(62,134,0,0),16 }, + { IPv4(62,151,0,0),19 }, + { IPv4(62,151,32,0),19 }, + { IPv4(62,151,64,0),18 }, + { IPv4(62,152,128,0),19 }, + { IPv4(62,166,0,0),16 }, + { IPv4(62,168,128,0),19 }, + { IPv4(62,170,0,0),15 }, + { IPv4(62,172,0,0),21 }, + { IPv4(62,172,0,0),16 }, + { IPv4(62,172,4,0),22 }, + { IPv4(62,180,0,0),16 }, + { IPv4(62,185,204,0),24 }, + { IPv4(62,186,35,0),24 }, + { IPv4(62,186,236,0),24 }, + { IPv4(62,192,0,0),19 }, + { IPv4(62,200,0,0),16 }, + { IPv4(62,204,96,0),19 }, + { IPv4(62,205,0,0),19 }, + { IPv4(62,212,128,0),19 }, + { IPv4(62,215,0,0),16 }, + { IPv4(62,216,192,0),22 }, + { IPv4(62,229,128,0),20 }, + { IPv4(62,229,130,0),24 }, + { IPv4(62,229,132,0),24 }, + { IPv4(62,232,0,0),16 }, + { IPv4(62,232,20,0),24 }, + { IPv4(62,232,21,0),24 }, + { IPv4(62,232,22,0),24 }, + { IPv4(62,232,46,0),24 }, + { IPv4(62,232,72,0),24 }, + { IPv4(62,233,0,0),19 }, + { IPv4(62,238,0,0),16 }, + { IPv4(62,250,0,0),16 }, + { IPv4(62,251,0,0),17 }, + { IPv4(62,252,0,0),17 }, + { IPv4(62,252,0,0),14 }, + { IPv4(62,252,0,0),16 }, + { IPv4(62,252,128,0),17 }, + { IPv4(62,253,128,0),17 }, + { IPv4(62,254,0,0),16 }, + { IPv4(62,254,128,0),17 }, + { IPv4(62,255,128,0),17 }, + { IPv4(63,64,59,0),24 }, + { IPv4(63,64,126,0),24 }, + { IPv4(63,64,130,0),23 }, + { IPv4(63,64,228,0),23 }, + { IPv4(63,64,247,0),24 }, + { IPv4(63,64,254,0),23 }, + { IPv4(63,65,84,0),23 }, + { IPv4(63,65,127,0),24 }, + { IPv4(63,65,176,0),22 }, + { IPv4(63,65,221,0),24 }, + { IPv4(63,65,236,0),22 }, + { IPv4(63,65,248,0),22 }, + { IPv4(63,66,112,0),24 }, + { IPv4(63,66,113,0),24 }, + { IPv4(63,66,240,0),24 }, + { IPv4(63,66,246,0),24 }, + { IPv4(63,67,32,0),24 }, + { IPv4(63,67,73,0),24 }, + { IPv4(63,67,116,0),23 }, + { IPv4(63,67,188,0),24 }, + { IPv4(63,67,196,0),24 }, + { IPv4(63,67,205,0),24 }, + { IPv4(63,68,54,0),23 }, + { IPv4(63,68,112,0),24 }, + { IPv4(63,68,218,0),23 }, + { IPv4(63,69,98,0),23 }, + { IPv4(63,69,114,0),23 }, + { IPv4(63,69,228,0),22 }, + { IPv4(63,69,230,0),24 }, + { IPv4(63,69,231,0),24 }, + { IPv4(63,69,248,0),21 }, + { IPv4(63,70,161,0),24 }, + { IPv4(63,70,164,0),23 }, + { IPv4(63,70,212,0),24 }, + { IPv4(63,71,3,0),24 }, + { IPv4(63,71,94,0),23 }, + { IPv4(63,71,166,0),23 }, + { IPv4(63,72,61,0),24 }, + { IPv4(63,72,216,0),24 }, + { IPv4(63,73,1,0),24 }, + { IPv4(63,73,4,0),22 }, + { IPv4(63,73,10,0),24 }, + { IPv4(63,73,11,0),24 }, + { IPv4(63,73,12,0),24 }, + { IPv4(63,73,58,0),24 }, + { IPv4(63,73,70,0),24 }, + { IPv4(63,73,78,0),24 }, + { IPv4(63,73,123,0),24 }, + { IPv4(63,73,130,0),23 }, + { IPv4(63,73,136,0),22 }, + { IPv4(63,73,169,0),24 }, + { IPv4(63,73,182,0),24 }, + { IPv4(63,73,204,0),24 }, + { IPv4(63,73,224,0),22 }, + { IPv4(63,73,225,0),24 }, + { IPv4(63,73,227,0),24 }, + { IPv4(63,73,238,0),24 }, + { IPv4(63,73,240,0),21 }, + { IPv4(63,74,32,0),23 }, + { IPv4(63,74,89,0),24 }, + { IPv4(63,74,160,0),24 }, + { IPv4(63,74,163,0),24 }, + { IPv4(63,74,226,0),24 }, + { IPv4(63,75,68,0),23 }, + { IPv4(63,75,74,0),24 }, + { IPv4(63,75,78,0),24 }, + { IPv4(63,75,79,0),24 }, + { IPv4(63,75,91,0),24 }, + { IPv4(63,75,167,0),24 }, + { IPv4(63,75,194,0),24 }, + { IPv4(63,76,98,0),24 }, + { IPv4(63,76,137,0),24 }, + { IPv4(63,76,243,0),24 }, + { IPv4(63,76,244,0),24 }, + { IPv4(63,76,245,0),24 }, + { IPv4(63,77,90,0),24 }, + { IPv4(63,78,12,0),22 }, + { IPv4(63,78,137,0),24 }, + { IPv4(63,79,29,0),24 }, + { IPv4(63,79,104,0),24 }, + { IPv4(63,79,105,0),24 }, + { IPv4(63,79,122,0),24 }, + { IPv4(63,79,128,0),21 }, + { IPv4(63,80,13,0),24 }, + { IPv4(63,80,45,0),24 }, + { IPv4(63,81,224,0),24 }, + { IPv4(63,81,227,0),24 }, + { IPv4(63,81,228,0),24 }, + { IPv4(63,81,231,0),24 }, + { IPv4(63,81,234,0),24 }, + { IPv4(63,81,236,0),24 }, + { IPv4(63,81,238,0),24 }, + { IPv4(63,81,239,0),24 }, + { IPv4(63,82,26,0),24 }, + { IPv4(63,82,40,0),22 }, + { IPv4(63,82,43,0),24 }, + { IPv4(63,82,44,0),23 }, + { IPv4(63,82,80,0),24 }, + { IPv4(63,82,241,0),24 }, + { IPv4(63,83,36,0),23 }, + { IPv4(63,83,95,0),24 }, + { IPv4(63,83,140,0),22 }, + { IPv4(63,83,208,0),20 }, + { IPv4(63,83,240,0),22 }, + { IPv4(63,83,244,0),22 }, + { IPv4(63,84,15,0),24 }, + { IPv4(63,84,62,0),24 }, + { IPv4(63,84,63,0),24 }, + { IPv4(63,84,72,0),22 }, + { IPv4(63,84,74,0),24 }, + { IPv4(63,84,122,0),24 }, + { IPv4(63,84,135,0),24 }, + { IPv4(63,84,140,0),22 }, + { IPv4(63,84,231,0),24 }, + { IPv4(63,85,19,0),24 }, + { IPv4(63,85,72,0),24 }, + { IPv4(63,85,181,0),24 }, + { IPv4(63,85,212,0),24 }, + { IPv4(63,85,213,0),24 }, + { IPv4(63,86,126,0),24 }, + { IPv4(63,87,84,0),24 }, + { IPv4(63,87,170,0),23 }, + { IPv4(63,87,173,0),24 }, + { IPv4(63,87,220,0),23 }, + { IPv4(63,88,88,0),23 }, + { IPv4(63,88,172,0),24 }, + { IPv4(63,89,141,0),24 }, + { IPv4(63,89,167,0),24 }, + { IPv4(63,90,24,0),23 }, + { IPv4(63,90,66,0),23 }, + { IPv4(63,90,77,0),24 }, + { IPv4(63,90,79,0),24 }, + { IPv4(63,91,110,0),23 }, + { IPv4(63,91,145,0),24 }, + { IPv4(63,91,172,0),24 }, + { IPv4(63,91,173,0),24 }, + { IPv4(63,92,80,0),21 }, + { IPv4(63,92,88,0),22 }, + { IPv4(63,92,133,0),24 }, + { IPv4(63,92,172,0),24 }, + { IPv4(63,92,192,0),23 }, + { IPv4(63,92,194,0),24 }, + { IPv4(63,93,152,0),24 }, + { IPv4(63,93,196,0),24 }, + { IPv4(63,93,197,0),24 }, + { IPv4(63,93,203,0),24 }, + { IPv4(63,94,99,0),24 }, + { IPv4(63,94,105,0),24 }, + { IPv4(63,95,0,0),21 }, + { IPv4(63,95,86,0),24 }, + { IPv4(63,95,193,0),24 }, + { IPv4(63,95,216,0),24 }, + { IPv4(63,95,254,0),23 }, + { IPv4(63,96,60,0),24 }, + { IPv4(63,96,61,0),24 }, + { IPv4(63,96,62,0),24 }, + { IPv4(63,96,63,0),24 }, + { IPv4(63,97,1,0),24 }, + { IPv4(63,97,144,0),24 }, + { IPv4(63,97,145,0),24 }, + { IPv4(63,97,179,0),24 }, + { IPv4(63,97,180,0),22 }, + { IPv4(63,98,125,0),24 }, + { IPv4(63,98,127,0),24 }, + { IPv4(63,98,188,0),22 }, + { IPv4(63,99,9,0),24 }, + { IPv4(63,99,41,0),24 }, + { IPv4(63,99,120,0),22 }, + { IPv4(63,99,128,0),21 }, + { IPv4(63,99,152,0),23 }, + { IPv4(63,100,17,0),24 }, + { IPv4(63,100,108,0),24 }, + { IPv4(63,100,128,0),23 }, + { IPv4(63,100,130,0),23 }, + { IPv4(63,100,192,0),21 }, + { IPv4(63,100,195,0),24 }, + { IPv4(63,100,199,0),24 }, + { IPv4(63,100,200,0),22 }, + { IPv4(63,100,202,0),23 }, + { IPv4(63,100,204,0),24 }, + { IPv4(63,100,204,0),22 }, + { IPv4(63,100,205,0),24 }, + { IPv4(63,100,206,0),24 }, + { IPv4(63,100,207,0),24 }, + { IPv4(63,100,208,0),24 }, + { IPv4(63,100,209,0),24 }, + { IPv4(63,100,210,0),24 }, + { IPv4(63,100,211,0),24 }, + { IPv4(63,100,212,0),22 }, + { IPv4(63,100,216,0),22 }, + { IPv4(63,100,222,0),23 }, + { IPv4(63,101,54,0),23 }, + { IPv4(63,101,83,0),24 }, + { IPv4(63,101,150,0),23 }, + { IPv4(63,102,5,0),24 }, + { IPv4(63,102,48,0),23 }, + { IPv4(63,102,72,0),21 }, + { IPv4(63,102,192,0),22 }, + { IPv4(63,102,218,0),24 }, + { IPv4(63,102,224,0),22 }, + { IPv4(63,103,40,0),22 }, + { IPv4(63,103,83,0),24 }, + { IPv4(63,103,128,0),24 }, + { IPv4(63,103,129,0),24 }, + { IPv4(63,103,130,0),24 }, + { IPv4(63,103,132,0),23 }, + { IPv4(63,103,134,0),24 }, + { IPv4(63,103,135,0),24 }, + { IPv4(63,103,136,0),24 }, + { IPv4(63,103,137,0),24 }, + { IPv4(63,103,138,0),24 }, + { IPv4(63,103,139,0),24 }, + { IPv4(63,103,140,0),24 }, + { IPv4(63,103,141,0),24 }, + { IPv4(63,103,142,0),24 }, + { IPv4(63,103,143,0),24 }, + { IPv4(63,103,182,0),24 }, + { IPv4(63,103,202,0),24 }, + { IPv4(63,104,48,0),22 }, + { IPv4(63,104,84,0),22 }, + { IPv4(63,104,160,0),24 }, + { IPv4(63,104,192,0),21 }, + { IPv4(63,104,240,0),24 }, + { IPv4(63,104,243,0),24 }, + { IPv4(63,105,7,0),24 }, + { IPv4(63,105,100,0),24 }, + { IPv4(63,105,126,0),23 }, + { IPv4(63,105,192,0),20 }, + { IPv4(63,106,49,0),24 }, + { IPv4(63,106,156,0),23 }, + { IPv4(63,107,10,0),23 }, + { IPv4(63,107,112,0),24 }, + { IPv4(63,107,128,0),24 }, + { IPv4(63,107,135,0),24 }, + { IPv4(63,107,224,0),23 }, + { IPv4(63,108,88,0),21 }, + { IPv4(63,108,112,0),22 }, + { IPv4(63,108,116,0),24 }, + { IPv4(63,108,125,0),24 }, + { IPv4(63,108,133,0),24 }, + { IPv4(63,109,64,0),24 }, + { IPv4(63,109,65,0),24 }, + { IPv4(63,109,68,0),24 }, + { IPv4(63,109,71,0),24 }, + { IPv4(63,109,72,0),24 }, + { IPv4(63,109,75,0),24 }, + { IPv4(63,109,76,0),24 }, + { IPv4(63,109,77,0),24 }, + { IPv4(63,109,78,0),24 }, + { IPv4(63,109,79,0),24 }, + { IPv4(63,109,240,0),20 }, + { IPv4(63,110,83,0),24 }, + { IPv4(63,110,128,0),20 }, + { IPv4(63,110,160,0),21 }, + { IPv4(63,110,188,0),24 }, + { IPv4(63,112,144,0),24 }, + { IPv4(63,112,168,0),22 }, + { IPv4(63,113,38,0),23 }, + { IPv4(63,113,73,0),24 }, + { IPv4(63,113,80,0),20 }, + { IPv4(63,114,0,0),24 }, + { IPv4(63,114,74,0),23 }, + { IPv4(63,114,88,0),23 }, + { IPv4(63,114,195,0),24 }, + { IPv4(63,115,198,0),24 }, + { IPv4(63,117,40,0),21 }, + { IPv4(63,117,79,0),24 }, + { IPv4(63,117,116,0),24 }, + { IPv4(63,117,117,0),24 }, + { IPv4(63,117,118,0),24 }, + { IPv4(63,117,119,0),24 }, + { IPv4(63,118,66,0),24 }, + { IPv4(63,118,148,0),24 }, + { IPv4(63,118,152,0),23 }, + { IPv4(63,118,165,0),24 }, + { IPv4(63,118,246,0),24 }, + { IPv4(63,118,247,0),24 }, + { IPv4(63,120,80,0),24 }, + { IPv4(63,120,115,0),24 }, + { IPv4(63,120,127,0),24 }, + { IPv4(63,120,154,0),24 }, + { IPv4(63,121,1,0),24 }, + { IPv4(63,121,28,0),22 }, + { IPv4(63,121,58,0),24 }, + { IPv4(63,121,84,0),24 }, + { IPv4(63,121,111,0),24 }, + { IPv4(63,121,136,0),22 }, + { IPv4(63,121,144,0),23 }, + { IPv4(63,121,159,0),24 }, + { IPv4(63,122,8,0),24 }, + { IPv4(63,122,36,0),24 }, + { IPv4(63,122,152,0),24 }, + { IPv4(63,122,154,0),24 }, + { IPv4(63,122,155,0),24 }, + { IPv4(63,123,103,0),24 }, + { IPv4(63,124,17,0),24 }, + { IPv4(63,124,32,0),19 }, + { IPv4(63,124,124,0),24 }, + { IPv4(63,124,132,0),24 }, + { IPv4(63,125,6,0),23 }, + { IPv4(63,125,15,0),24 }, + { IPv4(63,125,162,0),23 }, + { IPv4(63,125,222,0),24 }, + { IPv4(63,125,226,0),24 }, + { IPv4(63,126,178,0),24 }, + { IPv4(63,126,208,0),21 }, + { IPv4(63,127,10,0),23 }, + { IPv4(63,127,192,0),21 }, + { IPv4(63,136,64,0),20 }, + { IPv4(63,136,80,0),22 }, + { IPv4(63,137,26,0),24 }, + { IPv4(63,137,252,0),22 }, + { IPv4(63,139,32,0),20 }, + { IPv4(63,140,55,0),24 }, + { IPv4(63,140,132,0),24 }, + { IPv4(63,140,134,0),24 }, + { IPv4(63,140,137,0),24 }, + { IPv4(63,144,15,0),24 }, + { IPv4(63,144,116,0),24 }, + { IPv4(63,144,220,0),24 }, + { IPv4(63,144,236,0),24 }, + { IPv4(63,145,47,0),24 }, + { IPv4(63,145,50,0),24 }, + { IPv4(63,145,61,0),24 }, + { IPv4(63,145,66,0),24 }, + { IPv4(63,145,71,0),24 }, + { IPv4(63,145,72,0),24 }, + { IPv4(63,145,73,0),24 }, + { IPv4(63,145,74,0),24 }, + { IPv4(63,145,76,0),24 }, + { IPv4(63,145,77,0),24 }, + { IPv4(63,145,79,0),24 }, + { IPv4(63,145,80,0),23 }, + { IPv4(63,145,167,0),24 }, + { IPv4(63,145,171,0),24 }, + { IPv4(63,145,192,0),24 }, + { IPv4(63,145,197,0),24 }, + { IPv4(63,145,199,0),24 }, + { IPv4(63,145,200,0),24 }, + { IPv4(63,145,203,0),24 }, + { IPv4(63,145,209,0),24 }, + { IPv4(63,145,210,0),24 }, + { IPv4(63,145,212,0),24 }, + { IPv4(63,145,215,0),24 }, + { IPv4(63,145,226,0),23 }, + { IPv4(63,146,36,0),24 }, + { IPv4(63,146,71,0),24 }, + { IPv4(63,146,93,0),24 }, + { IPv4(63,146,144,0),24 }, + { IPv4(63,146,152,0),24 }, + { IPv4(63,146,154,0),24 }, + { IPv4(63,146,237,0),24 }, + { IPv4(63,146,242,0),23 }, + { IPv4(63,146,252,0),24 }, + { IPv4(63,147,1,0),24 }, + { IPv4(63,147,4,0),24 }, + { IPv4(63,147,6,0),24 }, + { IPv4(63,147,32,0),20 }, + { IPv4(63,147,108,0),22 }, + { IPv4(63,147,128,0),21 }, + { IPv4(63,147,156,0),22 }, + { IPv4(63,147,196,0),24 }, + { IPv4(63,147,200,0),22 }, + { IPv4(63,148,39,0),24 }, + { IPv4(63,148,77,0),24 }, + { IPv4(63,148,93,0),24 }, + { IPv4(63,148,107,0),24 }, + { IPv4(63,149,26,0),24 }, + { IPv4(63,149,28,0),24 }, + { IPv4(63,149,75,0),24 }, + { IPv4(63,149,98,0),24 }, + { IPv4(63,149,100,0),24 }, + { IPv4(63,149,102,0),24 }, + { IPv4(63,149,103,0),24 }, + { IPv4(63,149,113,0),24 }, + { IPv4(63,149,118,0),24 }, + { IPv4(63,149,121,0),24 }, + { IPv4(63,149,125,0),24 }, + { IPv4(63,149,126,0),24 }, + { IPv4(63,149,199,0),24 }, + { IPv4(63,149,232,0),24 }, + { IPv4(63,150,4,0),24 }, + { IPv4(63,150,7,0),24 }, + { IPv4(63,150,44,0),24 }, + { IPv4(63,150,69,0),24 }, + { IPv4(63,150,71,0),24 }, + { IPv4(63,150,72,0),22 }, + { IPv4(63,150,158,0),23 }, + { IPv4(63,150,158,0),24 }, + { IPv4(63,150,160,0),20 }, + { IPv4(63,150,164,0),24 }, + { IPv4(63,150,166,0),24 }, + { IPv4(63,150,167,0),24 }, + { IPv4(63,150,169,0),24 }, + { IPv4(63,150,173,0),24 }, + { IPv4(63,150,174,0),24 }, + { IPv4(63,150,175,0),24 }, + { IPv4(63,150,210,0),23 }, + { IPv4(63,150,213,0),24 }, + { IPv4(63,151,12,0),24 }, + { IPv4(63,151,14,0),24 }, + { IPv4(63,151,15,0),24 }, + { IPv4(63,151,32,0),21 }, + { IPv4(63,151,86,0),23 }, + { IPv4(63,151,137,0),24 }, + { IPv4(63,151,148,0),22 }, + { IPv4(63,151,155,0),24 }, + { IPv4(63,151,191,0),24 }, + { IPv4(63,151,220,0),22 }, + { IPv4(63,151,240,0),21 }, + { IPv4(63,160,32,0),21 }, + { IPv4(63,160,36,0),24 }, + { IPv4(63,160,129,0),24 }, + { IPv4(63,161,4,0),23 }, + { IPv4(63,161,14,0),24 }, + { IPv4(63,161,51,0),24 }, + { IPv4(63,161,73,0),24 }, + { IPv4(63,161,112,0),24 }, + { IPv4(63,161,204,0),22 }, + { IPv4(63,162,36,0),24 }, + { IPv4(63,162,253,0),24 }, + { IPv4(63,163,76,0),23 }, + { IPv4(63,163,160,0),19 }, + { IPv4(63,164,221,0),24 }, + { IPv4(63,165,90,0),24 }, + { IPv4(63,165,127,0),24 }, + { IPv4(63,165,191,0),24 }, + { IPv4(63,166,28,0),23 }, + { IPv4(63,166,30,0),24 }, + { IPv4(63,166,56,0),24 }, + { IPv4(63,166,100,0),24 }, + { IPv4(63,166,114,0),24 }, + { IPv4(63,166,116,0),22 }, + { IPv4(63,166,144,0),24 }, + { IPv4(63,166,226,0),24 }, + { IPv4(63,167,8,0),23 }, + { IPv4(63,167,44,0),22 }, + { IPv4(63,167,108,0),24 }, + { IPv4(63,167,126,0),24 }, + { IPv4(63,167,126,0),23 }, + { IPv4(63,167,127,0),24 }, + { IPv4(63,167,160,0),24 }, + { IPv4(63,167,204,0),24 }, + { IPv4(63,167,205,0),24 }, + { IPv4(63,167,206,0),24 }, + { IPv4(63,167,207,0),24 }, + { IPv4(63,167,208,0),20 }, + { IPv4(63,168,117,0),24 }, + { IPv4(63,168,244,0),23 }, + { IPv4(63,169,11,0),24 }, + { IPv4(63,169,100,0),24 }, + { IPv4(63,169,120,0),21 }, + { IPv4(63,169,132,0),24 }, + { IPv4(63,169,190,0),24 }, + { IPv4(63,170,14,0),24 }, + { IPv4(63,170,78,0),24 }, + { IPv4(63,170,208,0),24 }, + { IPv4(63,170,254,0),23 }, + { IPv4(63,171,3,0),24 }, + { IPv4(63,171,66,0),24 }, + { IPv4(63,171,98,0),23 }, + { IPv4(63,171,251,0),24 }, + { IPv4(63,172,2,0),24 }, + { IPv4(63,172,189,0),24 }, + { IPv4(63,173,76,0),23 }, + { IPv4(63,173,180,0),22 }, + { IPv4(63,174,16,0),20 }, + { IPv4(63,174,82,0),23 }, + { IPv4(63,174,120,0),21 }, + { IPv4(63,174,209,0),24 }, + { IPv4(63,175,32,0),20 }, + { IPv4(63,175,68,0),22 }, + { IPv4(63,175,96,0),24 }, + { IPv4(63,192,112,0),20 }, + { IPv4(63,192,141,0),24 }, + { IPv4(63,194,96,0),19 }, + { IPv4(63,196,192,0),20 }, + { IPv4(63,198,37,0),24 }, + { IPv4(63,201,0,0),20 }, + { IPv4(63,201,7,0),24 }, + { IPv4(63,201,12,0),22 }, + { IPv4(63,201,16,0),20 }, + { IPv4(63,201,154,0),24 }, + { IPv4(63,202,128,0),20 }, + { IPv4(63,202,144,0),20 }, + { IPv4(63,202,150,0),24 }, + { IPv4(63,202,152,0),22 }, + { IPv4(63,210,101,0),24 }, + { IPv4(63,210,255,0),24 }, + { IPv4(63,211,38,0),23 }, + { IPv4(63,214,242,0),24 }, + { IPv4(63,215,70,0),24 }, + { IPv4(63,221,60,0),24 }, + { IPv4(63,224,168,0),24 }, + { IPv4(63,224,189,0),24 }, + { IPv4(63,224,244,0),24 }, + { IPv4(63,225,13,0),24 }, + { IPv4(63,225,63,0),24 }, + { IPv4(63,226,73,0),24 }, + { IPv4(63,226,74,0),24 }, + { IPv4(63,226,75,0),24 }, + { IPv4(63,226,76,0),24 }, + { IPv4(63,226,110,0),23 }, + { IPv4(63,226,158,0),24 }, + { IPv4(63,226,166,0),24 }, + { IPv4(63,227,154,0),23 }, + { IPv4(63,227,188,0),24 }, + { IPv4(63,227,192,0),24 }, + { IPv4(63,228,26,0),24 }, + { IPv4(63,228,28,0),24 }, + { IPv4(63,228,156,0),23 }, + { IPv4(63,228,214,0),23 }, + { IPv4(63,228,220,0),22 }, + { IPv4(63,229,89,0),24 }, + { IPv4(63,229,90,0),24 }, + { IPv4(63,229,91,0),24 }, + { IPv4(63,229,92,0),24 }, + { IPv4(63,229,93,0),24 }, + { IPv4(63,229,94,0),24 }, + { IPv4(63,229,95,0),24 }, + { IPv4(63,229,96,0),24 }, + { IPv4(63,229,104,0),24 }, + { IPv4(63,229,108,0),24 }, + { IPv4(63,229,144,0),20 }, + { IPv4(63,229,182,0),24 }, + { IPv4(63,229,183,0),24 }, + { IPv4(63,230,115,0),24 }, + { IPv4(63,230,116,0),23 }, + { IPv4(63,230,176,0),22 }, + { IPv4(63,230,181,0),24 }, + { IPv4(63,230,182,0),23 }, + { IPv4(63,230,184,0),22 }, + { IPv4(63,230,240,0),23 }, + { IPv4(63,230,250,0),24 }, + { IPv4(63,232,123,0),24 }, + { IPv4(63,232,160,0),22 }, + { IPv4(63,232,164,0),22 }, + { IPv4(63,232,168,0),22 }, + { IPv4(63,232,172,0),22 }, + { IPv4(63,232,176,0),22 }, + { IPv4(63,232,180,0),24 }, + { IPv4(63,232,181,0),24 }, + { IPv4(63,232,183,0),24 }, + { IPv4(63,232,186,0),24 }, + { IPv4(63,232,187,0),24 }, + { IPv4(63,232,188,0),22 }, + { IPv4(63,233,196,0),24 }, + { IPv4(63,233,224,0),22 }, + { IPv4(63,234,56,0),22 }, + { IPv4(63,234,60,0),24 }, + { IPv4(63,236,76,0),23 }, + { IPv4(63,236,112,0),21 }, + { IPv4(63,236,120,0),24 }, + { IPv4(63,236,120,0),23 }, + { IPv4(63,236,142,0),23 }, + { IPv4(63,236,176,0),22 }, + { IPv4(63,236,184,0),22 }, + { IPv4(63,236,250,0),24 }, + { IPv4(63,237,39,0),24 }, + { IPv4(63,237,60,0),24 }, + { IPv4(63,237,80,0),23 }, + { IPv4(63,237,114,0),24 }, + { IPv4(63,237,116,0),24 }, + { IPv4(63,237,125,0),24 }, + { IPv4(63,237,126,0),24 }, + { IPv4(63,237,171,0),24 }, + { IPv4(63,237,186,0),24 }, + { IPv4(63,237,201,0),24 }, + { IPv4(63,237,220,0),24 }, + { IPv4(63,237,225,0),24 }, + { IPv4(63,237,226,0),24 }, + { IPv4(63,237,230,0),23 }, + { IPv4(63,237,233,0),24 }, + { IPv4(63,237,236,0),24 }, + { IPv4(63,237,238,0),24 }, + { IPv4(63,237,239,0),24 }, + { IPv4(63,237,244,0),24 }, + { IPv4(63,237,245,0),24 }, + { IPv4(63,237,246,0),24 }, + { IPv4(63,238,48,0),22 }, + { IPv4(63,238,70,0),24 }, + { IPv4(63,238,79,0),24 }, + { IPv4(63,238,96,0),22 }, + { IPv4(63,238,121,0),24 }, + { IPv4(63,238,128,0),22 }, + { IPv4(63,238,152,0),22 }, + { IPv4(63,238,156,0),23 }, + { IPv4(63,238,160,0),19 }, + { IPv4(63,238,215,0),24 }, + { IPv4(63,238,226,0),24 }, + { IPv4(63,238,230,0),24 }, + { IPv4(63,238,231,0),24 }, + { IPv4(63,239,2,0),24 }, + { IPv4(63,239,5,0),24 }, + { IPv4(63,239,6,0),24 }, + { IPv4(63,239,48,0),21 }, + { IPv4(63,239,60,0),22 }, + { IPv4(63,239,92,0),24 }, + { IPv4(63,239,102,0),24 }, + { IPv4(63,239,116,0),24 }, + { IPv4(63,239,144,0),24 }, + { IPv4(63,239,145,0),24 }, + { IPv4(63,239,148,0),24 }, + { IPv4(63,239,149,0),24 }, + { IPv4(63,239,150,0),24 }, + { IPv4(63,239,163,0),24 }, + { IPv4(63,239,199,0),24 }, + { IPv4(63,239,204,0),23 }, + { IPv4(63,239,204,0),24 }, + { IPv4(63,239,205,0),24 }, + { IPv4(63,239,211,0),24 }, + { IPv4(63,239,240,0),20 }, + { IPv4(63,240,0,0),15 }, + { IPv4(63,240,0,0),18 }, + { IPv4(63,240,4,0),24 }, + { IPv4(63,240,55,0),24 }, + { IPv4(63,240,64,0),19 }, + { IPv4(63,240,128,0),18 }, + { IPv4(63,240,192,0),19 }, + { IPv4(63,240,224,0),19 }, + { IPv4(63,241,0,0),18 }, + { IPv4(63,241,16,0),21 }, + { IPv4(63,241,44,0),23 }, + { IPv4(63,241,48,0),21 }, + { IPv4(63,241,59,0),24 }, + { IPv4(63,241,61,0),24 }, + { IPv4(63,241,62,0),24 }, + { IPv4(63,241,63,0),24 }, + { IPv4(63,241,64,0),19 }, + { IPv4(63,241,91,0),24 }, + { IPv4(63,241,128,0),18 }, + { IPv4(63,241,192,0),18 }, + { IPv4(63,242,0,0),16 }, + { IPv4(63,249,13,0),24 }, + { IPv4(63,249,14,0),23 }, + { IPv4(63,249,16,0),21 }, + { IPv4(63,249,64,0),19 }, + { IPv4(63,250,128,0),20 }, + { IPv4(63,250,144,0),24 }, + { IPv4(63,250,144,0),20 }, + { IPv4(63,250,145,0),24 }, + { IPv4(63,250,146,0),24 }, + { IPv4(63,250,147,0),24 }, + { IPv4(63,250,148,0),24 }, + { IPv4(63,250,150,0),24 }, + { IPv4(63,250,151,0),24 }, + { IPv4(63,250,152,0),24 }, + { IPv4(63,250,153,0),24 }, + { IPv4(63,250,154,0),24 }, + { IPv4(63,250,155,0),24 }, + { IPv4(63,250,156,0),24 }, + { IPv4(63,250,157,0),24 }, + { IPv4(63,250,158,0),24 }, + { IPv4(63,250,159,0),24 }, + { IPv4(63,250,160,0),20 }, + { IPv4(63,250,192,0),19 }, + { IPv4(63,251,0,0),20 }, + { IPv4(63,251,32,0),20 }, + { IPv4(63,251,33,0),24 }, + { IPv4(63,251,35,0),24 }, + { IPv4(63,251,36,0),24 }, + { IPv4(63,251,37,0),24 }, + { IPv4(63,251,40,0),21 }, + { IPv4(63,251,42,0),24 }, + { IPv4(63,251,44,0),23 }, + { IPv4(63,251,48,0),20 }, + { IPv4(63,251,49,0),24 }, + { IPv4(63,251,52,0),24 }, + { IPv4(63,251,60,0),24 }, + { IPv4(63,251,64,0),24 }, + { IPv4(63,251,64,0),20 }, + { IPv4(63,251,65,0),24 }, + { IPv4(63,251,75,0),24 }, + { IPv4(63,251,78,0),24 }, + { IPv4(63,251,80,0),20 }, + { IPv4(63,251,86,0),23 }, + { IPv4(63,251,93,0),24 }, + { IPv4(63,251,95,0),24 }, + { IPv4(63,251,96,0),24 }, + { IPv4(63,251,96,0),20 }, + { IPv4(63,251,106,0),24 }, + { IPv4(63,251,110,0),24 }, + { IPv4(63,251,112,0),20 }, + { IPv4(63,251,118,0),24 }, + { IPv4(63,251,121,0),24 }, + { IPv4(63,251,128,0),20 }, + { IPv4(63,251,140,0),24 }, + { IPv4(63,251,144,0),20 }, + { IPv4(63,251,156,0),24 }, + { IPv4(63,251,160,0),20 }, + { IPv4(63,251,174,0),24 }, + { IPv4(63,251,176,0),20 }, + { IPv4(63,251,192,0),24 }, + { IPv4(63,251,192,0),19 }, + { IPv4(63,251,203,0),24 }, + { IPv4(63,251,208,0),20 }, + { IPv4(63,251,208,0),21 }, + { IPv4(63,251,212,0),24 }, + { IPv4(63,251,213,0),24 }, + { IPv4(63,251,224,0),24 }, + { IPv4(63,251,224,0),19 }, + { IPv4(63,251,228,0),24 }, + { IPv4(63,251,233,0),24 }, + { IPv4(63,251,234,0),24 }, + { IPv4(63,251,239,0),24 }, + { IPv4(63,251,242,0),23 }, + { IPv4(63,251,247,0),24 }, + { IPv4(63,251,251,0),24 }, + { IPv4(64,0,0,0),14 }, + { IPv4(64,0,8,0),21 }, + { IPv4(64,0,25,0),24 }, + { IPv4(64,3,138,0),24 }, + { IPv4(64,3,139,0),24 }, + { IPv4(64,4,0,0),18 }, + { IPv4(64,4,128,0),19 }, + { IPv4(64,4,147,0),24 }, + { IPv4(64,4,148,0),23 }, + { IPv4(64,4,192,0),19 }, + { IPv4(64,5,71,0),24 }, + { IPv4(64,5,224,0),24 }, + { IPv4(64,5,225,0),24 }, + { IPv4(64,5,226,0),24 }, + { IPv4(64,6,64,0),20 }, + { IPv4(64,6,128,0),20 }, + { IPv4(64,6,144,0),20 }, + { IPv4(64,6,176,0),20 }, + { IPv4(64,7,64,0),19 }, + { IPv4(64,7,128,0),20 }, + { IPv4(64,8,128,0),18 }, + { IPv4(64,8,192,0),18 }, + { IPv4(64,9,52,0),24 }, + { IPv4(64,12,0,0),16 }, + { IPv4(64,12,0,0),20 }, + { IPv4(64,13,0,0),16 }, + { IPv4(64,13,64,0),20 }, + { IPv4(64,13,80,0),20 }, + { IPv4(64,14,9,0),24 }, + { IPv4(64,14,74,0),23 }, + { IPv4(64,14,136,0),24 }, + { IPv4(64,14,136,0),23 }, + { IPv4(64,15,162,0),24 }, + { IPv4(64,15,165,0),24 }, + { IPv4(64,15,166,0),24 }, + { IPv4(64,15,194,0),23 }, + { IPv4(64,16,133,0),24 }, + { IPv4(64,16,136,0),24 }, + { IPv4(64,16,147,0),24 }, + { IPv4(64,16,160,0),24 }, + { IPv4(64,16,170,0),24 }, + { IPv4(64,16,173,0),24 }, + { IPv4(64,16,176,0),24 }, + { IPv4(64,16,180,0),24 }, + { IPv4(64,16,184,0),24 }, + { IPv4(64,16,189,0),24 }, + { IPv4(64,17,10,0),24 }, + { IPv4(64,17,19,0),24 }, + { IPv4(64,17,44,0),24 }, + { IPv4(64,17,59,0),24 }, + { IPv4(64,17,60,0),22 }, + { IPv4(64,17,208,0),20 }, + { IPv4(64,20,48,0),20 }, + { IPv4(64,21,0,0),17 }, + { IPv4(64,21,49,0),24 }, + { IPv4(64,21,56,0),23 }, + { IPv4(64,21,68,0),23 }, + { IPv4(64,21,79,0),24 }, + { IPv4(64,21,102,0),23 }, + { IPv4(64,21,128,0),18 }, + { IPv4(64,21,192,0),19 }, + { IPv4(64,22,132,0),22 }, + { IPv4(64,22,132,0),24 }, + { IPv4(64,22,136,0),24 }, + { IPv4(64,22,192,0),19 }, + { IPv4(64,23,217,0),24 }, + { IPv4(64,24,80,0),20 }, + { IPv4(64,24,112,0),21 }, + { IPv4(64,24,112,0),20 }, + { IPv4(64,24,120,0),21 }, + { IPv4(64,26,64,0),18 }, + { IPv4(64,26,128,0),18 }, + { IPv4(64,26,192,0),19 }, + { IPv4(64,26,224,0),19 }, + { IPv4(64,27,64,0),18 }, + { IPv4(64,28,0,0),19 }, + { IPv4(64,28,68,0),23 }, + { IPv4(64,28,144,0),20 }, + { IPv4(64,29,16,0),20 }, + { IPv4(64,29,32,0),20 }, + { IPv4(64,29,64,0),19 }, + { IPv4(64,29,64,0),20 }, + { IPv4(64,29,70,0),24 }, + { IPv4(64,29,71,0),24 }, + { IPv4(64,29,80,0),24 }, + { IPv4(64,29,87,0),24 }, + { IPv4(64,29,94,0),24 }, + { IPv4(64,29,96,0),20 }, + { IPv4(64,29,160,0),20 }, + { IPv4(64,29,168,0),22 }, + { IPv4(64,29,172,0),22 }, + { IPv4(64,29,224,0),20 }, + { IPv4(64,30,17,0),24 }, + { IPv4(64,30,26,0),24 }, + { IPv4(64,30,34,0),24 }, + { IPv4(64,30,128,0),19 }, + { IPv4(64,30,224,0),20 }, + { IPv4(64,31,0,0),19 }, + { IPv4(64,33,120,0),24 }, + { IPv4(64,33,128,0),18 }, + { IPv4(64,35,0,0),18 }, + { IPv4(64,35,0,0),17 }, + { IPv4(64,35,128,0),20 }, + { IPv4(64,35,172,0),24 }, + { IPv4(64,37,64,0),19 }, + { IPv4(64,37,96,0),19 }, + { IPv4(64,37,128,0),21 }, + { IPv4(64,37,144,0),20 }, + { IPv4(64,38,96,0),19 }, + { IPv4(64,38,128,0),18 }, + { IPv4(64,39,0,0),19 }, + { IPv4(64,39,96,0),20 }, + { IPv4(64,39,192,0),19 }, + { IPv4(64,40,0,0),20 }, + { IPv4(64,40,32,0),19 }, + { IPv4(64,40,96,0),20 }, + { IPv4(64,41,152,0),24 }, + { IPv4(64,41,152,0),21 }, + { IPv4(64,41,255,0),24 }, + { IPv4(64,42,0,0),17 }, + { IPv4(64,42,128,0),18 }, + { IPv4(64,43,0,0),16 }, + { IPv4(64,44,40,0),23 }, + { IPv4(64,45,128,0),19 }, + { IPv4(64,46,128,0),19 }, + { IPv4(64,46,160,0),20 }, + { IPv4(64,46,192,0),18 }, + { IPv4(64,48,0,0),16 }, + { IPv4(64,48,128,0),18 }, + { IPv4(64,48,190,0),24 }, + { IPv4(64,49,0,0),18 }, + { IPv4(64,49,128,0),18 }, + { IPv4(64,49,192,0),19 }, + { IPv4(64,50,0,0),17 }, + { IPv4(64,50,7,0),24 }, + { IPv4(64,50,8,0),22 }, + { IPv4(64,50,64,0),20 }, + { IPv4(64,50,97,0),24 }, + { IPv4(64,50,107,0),24 }, + { IPv4(64,50,124,0),24 }, + { IPv4(64,50,125,0),24 }, + { IPv4(64,50,128,0),19 }, + { IPv4(64,50,160,0),19 }, + { IPv4(64,50,192,0),19 }, + { IPv4(64,52,32,0),19 }, + { IPv4(64,52,64,0),20 }, + { IPv4(64,52,112,0),20 }, + { IPv4(64,52,192,0),19 }, + { IPv4(64,53,0,0),18 }, + { IPv4(64,53,64,0),18 }, + { IPv4(64,54,0,0),16 }, + { IPv4(64,55,0,0),16 }, + { IPv4(64,55,0,0),17 }, + { IPv4(64,55,128,0),17 }, + { IPv4(64,56,0,0),19 }, + { IPv4(64,56,96,0),20 }, + { IPv4(64,56,224,0),20 }, + { IPv4(64,57,0,0),20 }, + { IPv4(64,57,224,0),20 }, + { IPv4(64,58,128,0),19 }, + { IPv4(64,58,158,0),23 }, + { IPv4(64,58,160,0),19 }, + { IPv4(64,58,168,0),23 }, + { IPv4(64,58,185,0),24 }, + { IPv4(64,58,190,0),23 }, + { IPv4(64,59,0,0),23 }, + { IPv4(64,59,0,0),18 }, + { IPv4(64,59,10,0),23 }, + { IPv4(64,59,20,0),23 }, + { IPv4(64,59,128,0),19 }, + { IPv4(64,59,128,0),18 }, + { IPv4(64,59,224,0),19 }, + { IPv4(64,60,112,0),21 }, + { IPv4(64,60,120,0),21 }, + { IPv4(64,60,208,0),20 }, + { IPv4(64,60,224,0),20 }, + { IPv4(64,60,240,0),20 }, + { IPv4(64,61,29,0),24 }, + { IPv4(64,62,0,0),21 }, + { IPv4(64,62,0,0),17 }, + { IPv4(64,62,12,0),22 }, + { IPv4(64,62,94,0),24 }, + { IPv4(64,62,104,0),22 }, + { IPv4(64,62,112,0),24 }, + { IPv4(64,62,120,0),24 }, + { IPv4(64,62,125,0),24 }, + { IPv4(64,63,0,0),20 }, + { IPv4(64,63,32,0),19 }, + { IPv4(64,63,64,0),19 }, + { IPv4(64,63,112,0),20 }, + { IPv4(64,63,128,0),20 }, + { IPv4(64,63,176,0),20 }, + { IPv4(64,65,0,0),18 }, + { IPv4(64,66,28,0),24 }, + { IPv4(64,66,32,0),22 }, + { IPv4(64,66,32,0),20 }, + { IPv4(64,68,0,0),19 }, + { IPv4(64,68,32,0),19 }, + { IPv4(64,68,96,0),19 }, + { IPv4(64,68,102,0),23 }, + { IPv4(64,68,128,0),22 }, + { IPv4(64,68,192,0),20 }, + { IPv4(64,69,16,0),20 }, + { IPv4(64,69,64,0),19 }, + { IPv4(64,69,128,0),20 }, + { IPv4(64,69,208,0),20 }, + { IPv4(64,70,4,0),22 }, + { IPv4(64,70,68,0),22 }, + { IPv4(64,70,128,0),17 }, + { IPv4(64,71,64,0),22 }, + { IPv4(64,71,64,0),19 }, + { IPv4(64,71,128,0),18 }, + { IPv4(64,73,0,0),17 }, + { IPv4(64,73,128,0),18 }, + { IPv4(64,74,0,0),19 }, + { IPv4(64,74,5,0),24 }, + { IPv4(64,74,16,0),22 }, + { IPv4(64,74,32,0),19 }, + { IPv4(64,74,36,0),23 }, + { IPv4(64,74,38,0),24 }, + { IPv4(64,74,39,0),24 }, + { IPv4(64,74,44,0),24 }, + { IPv4(64,74,46,0),24 }, + { IPv4(64,74,46,0),23 }, + { IPv4(64,74,47,0),24 }, + { IPv4(64,74,63,0),24 }, + { IPv4(64,75,25,0),24 }, + { IPv4(64,75,26,0),24 }, + { IPv4(64,75,64,0),20 }, + { IPv4(64,75,111,0),24 }, + { IPv4(64,75,112,0),20 }, + { IPv4(64,75,128,0),18 }, + { IPv4(64,75,129,0),24 }, + { IPv4(64,75,133,0),24 }, + { IPv4(64,75,134,0),24 }, + { IPv4(64,75,144,0),22 }, + { IPv4(64,75,148,0),24 }, + { IPv4(64,75,149,0),24 }, + { IPv4(64,75,150,0),23 }, + { IPv4(64,75,152,0),23 }, + { IPv4(64,75,154,0),23 }, + { IPv4(64,75,156,0),23 }, + { IPv4(64,75,158,0),24 }, + { IPv4(64,75,168,0),24 }, + { IPv4(64,75,170,0),24 }, + { IPv4(64,75,176,0),22 }, + { IPv4(64,75,180,0),23 }, + { IPv4(64,75,182,0),23 }, + { IPv4(64,75,184,0),22 }, + { IPv4(64,75,188,0),24 }, + { IPv4(64,75,189,0),24 }, + { IPv4(64,76,68,0),22 }, + { IPv4(64,76,72,0),21 }, + { IPv4(64,76,152,0),24 }, + { IPv4(64,78,0,0),18 }, + { IPv4(64,78,64,0),19 }, + { IPv4(64,78,64,0),18 }, + { IPv4(64,79,0,0),19 }, + { IPv4(64,79,64,0),19 }, + { IPv4(64,79,96,0),20 }, + { IPv4(64,79,224,0),20 }, + { IPv4(64,80,0,0),16 }, + { IPv4(64,81,0,0),19 }, + { IPv4(64,81,32,0),20 }, + { IPv4(64,81,48,0),20 }, + { IPv4(64,81,64,0),20 }, + { IPv4(64,81,80,0),20 }, + { IPv4(64,81,96,0),20 }, + { IPv4(64,81,112,0),20 }, + { IPv4(64,81,128,0),21 }, + { IPv4(64,81,136,0),21 }, + { IPv4(64,81,144,0),20 }, + { IPv4(64,81,160,0),19 }, + { IPv4(64,81,176,0),23 }, + { IPv4(64,81,192,0),19 }, + { IPv4(64,81,224,0),21 }, + { IPv4(64,81,232,0),21 }, + { IPv4(64,81,240,0),20 }, + { IPv4(64,82,0,0),17 }, + { IPv4(64,83,160,0),20 }, + { IPv4(64,84,24,0),23 }, + { IPv4(64,84,26,0),24 }, + { IPv4(64,84,32,0),22 }, + { IPv4(64,84,41,0),24 }, + { IPv4(64,86,16,0),21 }, + { IPv4(64,86,224,0),24 }, + { IPv4(64,86,225,0),24 }, + { IPv4(64,86,253,0),24 }, + { IPv4(64,86,254,0),24 }, + { IPv4(64,87,64,0),19 }, + { IPv4(64,88,128,0),19 }, + { IPv4(64,89,96,0),19 }, + { IPv4(64,89,106,0),24 }, + { IPv4(64,89,107,0),24 }, + { IPv4(64,89,110,0),23 }, + { IPv4(64,89,160,0),20 }, + { IPv4(64,89,224,0),20 }, + { IPv4(64,90,0,0),23 }, + { IPv4(64,90,0,0),19 }, + { IPv4(64,90,2,0),24 }, + { IPv4(64,90,3,0),24 }, + { IPv4(64,90,4,0),24 }, + { IPv4(64,90,5,0),24 }, + { IPv4(64,90,6,0),24 }, + { IPv4(64,90,7,0),24 }, + { IPv4(64,90,8,0),24 }, + { IPv4(64,90,9,0),24 }, + { IPv4(64,90,10,0),24 }, + { IPv4(64,90,12,0),24 }, + { IPv4(64,90,13,0),24 }, + { IPv4(64,90,14,0),24 }, + { IPv4(64,90,15,0),24 }, + { IPv4(64,90,16,0),24 }, + { IPv4(64,90,18,0),24 }, + { IPv4(64,90,19,0),24 }, + { IPv4(64,90,20,0),23 }, + { IPv4(64,90,22,0),24 }, + { IPv4(64,90,23,0),24 }, + { IPv4(64,90,24,0),23 }, + { IPv4(64,90,26,0),24 }, + { IPv4(64,90,27,0),24 }, + { IPv4(64,90,28,0),24 }, + { IPv4(64,90,29,0),24 }, + { IPv4(64,90,30,0),24 }, + { IPv4(64,90,31,0),24 }, + { IPv4(64,90,32,0),19 }, + { IPv4(64,90,64,0),20 }, + { IPv4(64,90,240,0),20 }, + { IPv4(64,91,224,0),20 }, + { IPv4(64,92,75,0),24 }, + { IPv4(64,94,0,0),20 }, + { IPv4(64,94,6,0),24 }, + { IPv4(64,94,13,0),24 }, + { IPv4(64,94,15,0),24 }, + { IPv4(64,94,16,0),20 }, + { IPv4(64,94,30,0),24 }, + { IPv4(64,94,31,0),24 }, + { IPv4(64,94,32,0),20 }, + { IPv4(64,94,48,0),22 }, + { IPv4(64,94,48,0),20 }, + { IPv4(64,94,49,0),24 }, + { IPv4(64,94,57,0),24 }, + { IPv4(64,94,58,0),24 }, + { IPv4(64,94,62,0),24 }, + { IPv4(64,94,64,0),19 }, + { IPv4(64,94,68,0),24 }, + { IPv4(64,94,70,0),24 }, + { IPv4(64,94,78,0),24 }, + { IPv4(64,94,81,0),24 }, + { IPv4(64,94,82,0),24 }, + { IPv4(64,94,83,0),24 }, + { IPv4(64,94,88,0),24 }, + { IPv4(64,94,89,0),24 }, + { IPv4(64,94,93,0),24 }, + { IPv4(64,94,94,0),24 }, + { IPv4(64,94,95,0),24 }, + { IPv4(64,94,96,0),23 }, + { IPv4(64,94,98,0),24 }, + { IPv4(64,94,99,0),24 }, + { IPv4(64,94,108,0),24 }, + { IPv4(64,94,112,0),20 }, + { IPv4(64,94,128,0),22 }, + { IPv4(64,94,128,0),20 }, + { IPv4(64,94,144,0),22 }, + { IPv4(64,94,144,0),20 }, + { IPv4(64,94,151,0),24 }, + { IPv4(64,94,152,0),22 }, + { IPv4(64,94,162,0),24 }, + { IPv4(64,94,170,0),24 }, + { IPv4(64,94,174,0),24 }, + { IPv4(64,94,175,0),24 }, + { IPv4(64,94,180,0),23 }, + { IPv4(64,94,182,0),24 }, + { IPv4(64,94,182,0),23 }, + { IPv4(64,94,188,0),23 }, + { IPv4(64,94,189,0),24 }, + { IPv4(64,94,199,0),24 }, + { IPv4(64,94,202,0),23 }, + { IPv4(64,94,202,0),24 }, + { IPv4(64,94,208,0),20 }, + { IPv4(64,94,214,0),23 }, + { IPv4(64,94,218,0),24 }, + { IPv4(64,94,223,0),24 }, + { IPv4(64,94,224,0),20 }, + { IPv4(64,94,224,0),21 }, + { IPv4(64,94,238,0),24 }, + { IPv4(64,94,240,0),20 }, + { IPv4(64,95,0,0),19 }, + { IPv4(64,95,9,0),24 }, + { IPv4(64,95,12,0),24 }, + { IPv4(64,95,18,0),24 }, + { IPv4(64,95,20,0),24 }, + { IPv4(64,95,26,0),24 }, + { IPv4(64,95,28,0),24 }, + { IPv4(64,95,29,0),24 }, + { IPv4(64,95,48,0),20 }, + { IPv4(64,95,64,0),20 }, + { IPv4(64,95,74,0),24 }, + { IPv4(64,95,80,0),20 }, + { IPv4(64,95,94,0),24 }, + { IPv4(64,95,95,0),24 }, + { IPv4(64,95,96,0),20 }, + { IPv4(64,95,96,0),21 }, + { IPv4(64,95,100,0),22 }, + { IPv4(64,95,112,0),20 }, + { IPv4(64,95,118,0),24 }, + { IPv4(64,95,119,0),24 }, + { IPv4(64,95,128,0),20 }, + { IPv4(64,95,160,0),19 }, + { IPv4(64,95,168,0),22 }, + { IPv4(64,95,172,0),23 }, + { IPv4(64,95,180,0),22 }, + { IPv4(64,95,189,0),24 }, + { IPv4(64,95,192,0),20 }, + { IPv4(64,95,208,0),20 }, + { IPv4(64,95,221,0),24 }, + { IPv4(64,95,222,0),24 }, + { IPv4(64,95,223,0),24 }, + { IPv4(64,95,224,0),24 }, + { IPv4(64,95,224,0),20 }, + { IPv4(64,95,225,0),24 }, + { IPv4(64,95,226,0),24 }, + { IPv4(64,95,227,0),24 }, + { IPv4(64,95,238,0),24 }, + { IPv4(64,95,240,0),20 }, + { IPv4(64,100,0,0),14 }, + { IPv4(64,102,0,0),16 }, + { IPv4(64,104,0,0),16 }, + { IPv4(64,107,0,0),17 }, + { IPv4(64,107,128,0),17 }, + { IPv4(64,110,0,0),19 }, + { IPv4(64,110,15,0),24 }, + { IPv4(64,110,22,0),24 }, + { IPv4(64,110,24,0),22 }, + { IPv4(64,110,24,0),24 }, + { IPv4(64,110,27,0),24 }, + { IPv4(64,110,28,0),22 }, + { IPv4(64,110,29,0),24 }, + { IPv4(64,110,32,0),20 }, + { IPv4(64,110,36,0),23 }, + { IPv4(64,110,48,0),20 }, + { IPv4(64,110,48,0),21 }, + { IPv4(64,110,51,0),24 }, + { IPv4(64,110,54,0),23 }, + { IPv4(64,110,56,0),22 }, + { IPv4(64,110,60,0),22 }, + { IPv4(64,110,64,0),20 }, + { IPv4(64,110,75,0),24 }, + { IPv4(64,110,76,0),23 }, + { IPv4(64,110,79,0),24 }, + { IPv4(64,110,80,0),20 }, + { IPv4(64,110,96,0),20 }, + { IPv4(64,110,104,0),21 }, + { IPv4(64,110,112,0),20 }, + { IPv4(64,110,112,0),24 }, + { IPv4(64,110,113,0),24 }, + { IPv4(64,110,114,0),24 }, + { IPv4(64,110,115,0),24 }, + { IPv4(64,110,121,0),24 }, + { IPv4(64,110,122,0),24 }, + { IPv4(64,110,126,0),24 }, + { IPv4(64,110,128,0),20 }, + { IPv4(64,110,128,0),21 }, + { IPv4(64,110,133,0),24 }, + { IPv4(64,110,136,0),21 }, + { IPv4(64,110,144,0),20 }, + { IPv4(64,110,144,0),21 }, + { IPv4(64,110,148,0),23 }, + { IPv4(64,110,148,0),22 }, + { IPv4(64,110,150,0),23 }, + { IPv4(64,110,156,0),22 }, + { IPv4(64,110,160,0),20 }, + { IPv4(64,110,166,0),24 }, + { IPv4(64,110,176,0),20 }, + { IPv4(64,110,190,0),23 }, + { IPv4(64,111,48,0),20 }, + { IPv4(64,112,16,0),22 }, + { IPv4(64,112,64,0),21 }, + { IPv4(64,113,64,0),19 }, + { IPv4(64,113,192,0),19 }, + { IPv4(64,113,208,0),23 }, + { IPv4(64,115,0,0),19 }, + { IPv4(64,118,64,0),20 }, + { IPv4(64,118,96,0),21 }, + { IPv4(64,118,128,0),20 }, + { IPv4(64,118,130,0),24 }, + { IPv4(64,118,131,0),24 }, + { IPv4(64,118,140,0),24 }, + { IPv4(64,118,143,0),24 }, + { IPv4(64,119,32,0),20 }, + { IPv4(64,119,128,0),20 }, + { IPv4(64,119,160,0),20 }, + { IPv4(64,121,0,0),16 }, + { IPv4(64,122,0,0),19 }, + { IPv4(64,122,16,0),20 }, + { IPv4(64,122,64,0),20 }, + { IPv4(64,123,96,0),21 }, + { IPv4(64,123,195,0),24 }, + { IPv4(64,124,0,0),15 }, + { IPv4(64,124,6,0),24 }, + { IPv4(64,124,31,0),24 }, + { IPv4(64,124,37,0),24 }, + { IPv4(64,124,38,0),24 }, + { IPv4(64,124,41,0),24 }, + { IPv4(64,124,63,0),24 }, + { IPv4(64,124,70,0),24 }, + { IPv4(64,124,92,0),24 }, + { IPv4(64,124,106,0),23 }, + { IPv4(64,124,147,0),24 }, + { IPv4(64,124,148,0),24 }, + { IPv4(64,124,150,0),24 }, + { IPv4(64,124,152,0),24 }, + { IPv4(64,124,169,0),24 }, + { IPv4(64,124,212,0),24 }, + { IPv4(64,124,213,0),24 }, + { IPv4(64,124,236,0),23 }, + { IPv4(64,124,236,0),22 }, + { IPv4(64,124,239,0),24 }, + { IPv4(64,125,88,0),21 }, + { IPv4(64,125,132,0),22 }, + { IPv4(64,125,133,0),24 }, + { IPv4(64,125,134,0),24 }, + { IPv4(64,125,135,0),24 }, + { IPv4(64,125,140,0),24 }, + { IPv4(64,125,178,0),23 }, + { IPv4(64,125,179,0),24 }, + { IPv4(64,125,192,0),22 }, + { IPv4(64,125,248,0),22 }, + { IPv4(64,126,0,0),18 }, + { IPv4(64,127,0,0),18 }, + { IPv4(64,132,2,0),23 }, + { IPv4(64,132,14,0),24 }, + { IPv4(64,132,26,0),24 }, + { IPv4(64,132,83,0),24 }, + { IPv4(64,132,84,0),24 }, + { IPv4(64,134,12,0),24 }, + { IPv4(64,134,16,0),22 }, + { IPv4(64,134,20,0),23 }, + { IPv4(64,134,29,0),24 }, + { IPv4(64,134,126,0),24 }, + { IPv4(64,139,16,0),20 }, + { IPv4(64,139,32,0),20 }, + { IPv4(64,146,0,0),20 }, + { IPv4(64,146,9,0),24 }, + { IPv4(64,147,0,0),19 }, + { IPv4(64,147,192,0),20 }, + { IPv4(64,148,0,0),16 }, + { IPv4(64,148,224,0),20 }, + { IPv4(64,152,6,0),24 }, + { IPv4(64,152,7,0),24 }, + { IPv4(64,152,8,0),22 }, + { IPv4(64,152,12,0),24 }, + { IPv4(64,152,13,0),24 }, + { IPv4(64,152,20,0),24 }, + { IPv4(64,152,21,0),24 }, + { IPv4(64,152,108,0),24 }, + { IPv4(64,152,110,0),24 }, + { IPv4(64,152,111,0),24 }, + { IPv4(64,152,121,0),24 }, + { IPv4(64,152,176,0),21 }, + { IPv4(64,152,195,0),24 }, + { IPv4(64,154,10,0),23 }, + { IPv4(64,154,176,0),21 }, + { IPv4(64,154,194,0),23 }, + { IPv4(64,156,13,0),24 }, + { IPv4(64,156,44,0),23 }, + { IPv4(64,156,50,0),24 }, + { IPv4(64,156,180,0),23 }, + { IPv4(64,157,32,0),21 }, + { IPv4(64,157,129,0),24 }, + { IPv4(64,157,130,0),24 }, + { IPv4(64,157,131,0),24 }, + { IPv4(64,157,171,0),24 }, + { IPv4(64,157,232,0),22 }, + { IPv4(64,158,116,0),24 }, + { IPv4(64,158,118,0),24 }, + { IPv4(64,160,116,0),22 }, + { IPv4(64,161,32,0),20 }, + { IPv4(64,161,48,0),20 }, + { IPv4(64,161,121,0),24 }, + { IPv4(64,162,79,0),24 }, + { IPv4(64,162,99,0),24 }, + { IPv4(64,162,108,0),23 }, + { IPv4(64,162,222,0),24 }, + { IPv4(64,164,59,0),24 }, + { IPv4(64,164,232,0),24 }, + { IPv4(64,165,105,0),24 }, + { IPv4(64,166,160,0),20 }, + { IPv4(64,168,192,0),20 }, + { IPv4(64,169,0,0),20 }, + { IPv4(64,169,41,0),24 }, + { IPv4(64,173,192,0),20 }, + { IPv4(64,173,208,0),20 }, + { IPv4(64,178,0,0),18 }, + { IPv4(64,178,64,0),19 }, + { IPv4(64,181,1,0),24 }, + { IPv4(64,185,128,0),21 }, + { IPv4(64,185,136,0),22 }, + { IPv4(64,185,140,0),22 }, + { IPv4(64,185,140,0),23 }, + { IPv4(64,185,142,0),23 }, + { IPv4(64,185,144,0),20 }, + { IPv4(64,185,144,0),21 }, + { IPv4(64,185,152,0),21 }, + { IPv4(64,186,64,0),21 }, + { IPv4(64,186,72,0),22 }, + { IPv4(64,186,96,0),20 }, + { IPv4(64,186,128,0),20 }, + { IPv4(64,186,160,0),20 }, + { IPv4(64,186,232,0),22 }, + { IPv4(64,188,158,0),23 }, + { IPv4(64,200,0,0),22 }, + { IPv4(64,200,0,0),16 }, + { IPv4(64,200,4,0),22 }, + { IPv4(64,200,16,0),22 }, + { IPv4(64,200,32,0),21 }, + { IPv4(64,200,80,0),23 }, + { IPv4(64,200,82,0),24 }, + { IPv4(64,200,88,0),23 }, + { IPv4(64,200,90,0),23 }, + { IPv4(64,200,92,0),22 }, + { IPv4(64,200,96,0),23 }, + { IPv4(64,200,98,0),24 }, + { IPv4(64,200,99,0),24 }, + { IPv4(64,200,100,0),24 }, + { IPv4(64,200,104,0),23 }, + { IPv4(64,200,112,0),23 }, + { IPv4(64,200,136,0),23 }, + { IPv4(64,200,144,0),21 }, + { IPv4(64,200,144,0),24 }, + { IPv4(64,200,145,0),24 }, + { IPv4(64,200,170,0),23 }, + { IPv4(64,200,172,0),24 }, + { IPv4(64,200,173,0),24 }, + { IPv4(64,200,180,0),23 }, + { IPv4(64,200,184,0),23 }, + { IPv4(64,200,187,0),24 }, + { IPv4(64,200,188,0),23 }, + { IPv4(64,200,192,0),23 }, + { IPv4(64,200,212,0),24 }, + { IPv4(64,200,253,0),24 }, + { IPv4(64,208,52,0),23 }, + { IPv4(64,208,56,0),23 }, + { IPv4(64,208,186,0),23 }, + { IPv4(64,208,240,0),24 }, + { IPv4(64,209,32,0),23 }, + { IPv4(64,209,70,0),23 }, + { IPv4(64,209,92,0),24 }, + { IPv4(64,209,189,0),24 }, + { IPv4(64,209,201,0),24 }, + { IPv4(64,210,75,0),24 }, + { IPv4(64,210,178,0),24 }, + { IPv4(64,211,101,0),24 }, + { IPv4(64,211,184,0),21 }, + { IPv4(64,211,230,0),24 }, + { IPv4(64,212,8,0),21 }, + { IPv4(64,212,152,0),24 }, + { IPv4(64,212,170,0),24 }, + { IPv4(64,212,171,0),24 }, + { IPv4(64,213,66,0),23 }, + { IPv4(64,213,130,0),24 }, + { IPv4(64,214,85,0),24 }, + { IPv4(64,216,96,0),20 }, + { IPv4(64,217,32,0),20 }, + { IPv4(64,220,0,0),15 }, + { IPv4(64,220,201,0),24 }, + { IPv4(64,221,95,0),24 }, + { IPv4(64,221,168,0),21 }, + { IPv4(64,221,207,0),24 }, + { IPv4(64,221,223,0),24 }, + { IPv4(64,221,232,0),24 }, + { IPv4(64,224,0,0),17 }, + { IPv4(64,224,128,0),17 }, + { IPv4(64,225,0,0),16 }, + { IPv4(64,226,0,0),16 }, + { IPv4(64,232,0,0),16 }, + { IPv4(64,232,0,0),22 }, + { IPv4(64,232,52,0),23 }, + { IPv4(64,232,88,0),24 }, + { IPv4(64,232,95,0),24 }, + { IPv4(64,232,116,0),23 }, + { IPv4(64,232,133,0),24 }, + { IPv4(64,232,138,0),24 }, + { IPv4(64,232,152,0),21 }, + { IPv4(64,232,160,0),20 }, + { IPv4(64,232,187,0),24 }, + { IPv4(64,232,196,0),24 }, + { IPv4(64,232,200,0),24 }, + { IPv4(64,232,206,0),24 }, + { IPv4(64,232,212,0),23 }, + { IPv4(64,232,252,0),22 }, + { IPv4(64,233,0,0),17 }, + { IPv4(64,233,8,0),21 }, + { IPv4(64,233,16,0),22 }, + { IPv4(64,236,0,0),16 }, + { IPv4(64,236,12,0),24 }, + { IPv4(64,236,16,0),21 }, + { IPv4(64,238,0,0),20 }, + { IPv4(64,238,128,0),20 }, + { IPv4(64,238,224,0),20 }, + { IPv4(64,239,0,0),18 }, + { IPv4(64,239,128,0),18 }, + { IPv4(64,240,69,0),24 }, + { IPv4(64,240,93,0),24 }, + { IPv4(64,241,64,0),24 }, + { IPv4(64,242,40,0),24 }, + { IPv4(64,242,41,0),24 }, + { IPv4(64,242,42,0),24 }, + { IPv4(64,242,43,0),24 }, + { IPv4(64,242,117,0),24 }, + { IPv4(64,242,118,0),23 }, + { IPv4(64,242,216,0),22 }, + { IPv4(64,242,250,0),23 }, + { IPv4(64,243,232,0),22 }, + { IPv4(64,244,80,0),21 }, + { IPv4(64,244,115,0),24 }, + { IPv4(64,244,120,0),21 }, + { IPv4(64,244,223,0),24 }, + { IPv4(64,245,48,0),20 }, + { IPv4(64,245,96,0),21 }, + { IPv4(64,245,224,0),21 }, + { IPv4(64,247,0,0),19 }, + { IPv4(64,250,128,0),18 }, + { IPv4(64,251,32,0),20 }, + { IPv4(64,251,64,0),20 }, + { IPv4(64,251,160,0),20 }, + { IPv4(64,251,240,0),20 }, + { IPv4(64,252,0,0),16 }, + { IPv4(64,252,224,0),19 }, + { IPv4(64,253,0,0),19 }, + { IPv4(64,253,9,0),24 }, + { IPv4(64,253,10,0),24 }, + { IPv4(64,253,32,0),19 }, + { IPv4(64,253,32,0),22 }, + { IPv4(64,253,36,0),23 }, + { IPv4(64,253,96,0),20 }, + { IPv4(64,254,32,0),19 }, + { IPv4(64,254,96,0),20 }, + { IPv4(64,254,96,0),24 }, + { IPv4(64,254,97,0),24 }, + { IPv4(64,254,98,0),24 }, + { IPv4(64,254,99,0),24 }, + { IPv4(64,254,100,0),24 }, + { IPv4(64,254,102,0),24 }, + { IPv4(64,254,103,0),24 }, + { IPv4(64,254,108,0),24 }, + { IPv4(64,254,160,0),20 }, + { IPv4(64,254,161,0),24 }, + { IPv4(64,254,163,0),24 }, + { IPv4(64,254,165,0),24 }, + { IPv4(64,254,166,0),24 }, + { IPv4(64,254,170,0),24 }, + { IPv4(64,254,172,0),23 }, + { IPv4(64,254,174,0),23 }, + { IPv4(64,255,64,0),19 }, + { IPv4(65,0,0,0),14 }, + { IPv4(65,0,0,0),17 }, + { IPv4(65,0,0,0),12 }, + { IPv4(65,0,0,0),13 }, + { IPv4(65,0,128,0),17 }, + { IPv4(65,1,0,0),17 }, + { IPv4(65,1,128,0),17 }, + { IPv4(65,2,0,0),17 }, + { IPv4(65,2,128,0),18 }, + { IPv4(65,2,192,0),19 }, + { IPv4(65,2,224,0),19 }, + { IPv4(65,3,0,0),18 }, + { IPv4(65,3,64,0),19 }, + { IPv4(65,3,96,0),19 }, + { IPv4(65,3,128,0),17 }, + { IPv4(65,3,192,0),19 }, + { IPv4(65,3,224,0),19 }, + { IPv4(65,4,0,0),16 }, + { IPv4(65,5,0,0),17 }, + { IPv4(65,5,128,0),17 }, + { IPv4(65,6,0,0),15 }, + { IPv4(65,8,0,0),17 }, + { IPv4(65,8,0,0),14 }, + { IPv4(65,8,128,0),17 }, + { IPv4(65,9,0,0),16 }, + { IPv4(65,10,0,0),17 }, + { IPv4(65,10,96,0),19 }, + { IPv4(65,10,128,0),18 }, + { IPv4(65,10,192,0),19 }, + { IPv4(65,10,224,0),19 }, + { IPv4(65,11,0,0),18 }, + { IPv4(65,11,64,0),19 }, + { IPv4(65,11,96,0),19 }, + { IPv4(65,11,128,0),18 }, + { IPv4(65,11,192,0),18 }, + { IPv4(65,12,0,0),17 }, + { IPv4(65,12,128,0),17 }, + { IPv4(65,13,0,0),17 }, + { IPv4(65,13,128,0),18 }, + { IPv4(65,13,192,0),18 }, + { IPv4(65,14,0,0),17 }, + { IPv4(65,14,128,0),17 }, + { IPv4(65,15,0,0),17 }, + { IPv4(65,15,128,0),17 }, + { IPv4(65,21,128,0),18 }, + { IPv4(65,24,0,0),17 }, + { IPv4(65,24,0,0),18 }, + { IPv4(65,24,64,0),18 }, + { IPv4(65,24,128,0),18 }, + { IPv4(65,24,192,0),18 }, + { IPv4(65,24,200,0),21 }, + { IPv4(65,24,208,0),21 }, + { IPv4(65,24,216,0),22 }, + { IPv4(65,24,236,0),22 }, + { IPv4(65,24,240,0),22 }, + { IPv4(65,25,0,0),17 }, + { IPv4(65,25,128,0),19 }, + { IPv4(65,25,160,0),19 }, + { IPv4(65,25,192,0),18 }, + { IPv4(65,26,0,0),17 }, + { IPv4(65,26,128,0),20 }, + { IPv4(65,26,144,0),20 }, + { IPv4(65,26,160,0),20 }, + { IPv4(65,26,176,0),20 }, + { IPv4(65,26,192,0),18 }, + { IPv4(65,27,0,0),17 }, + { IPv4(65,27,80,0),20 }, + { IPv4(65,27,120,0),21 }, + { IPv4(65,27,128,0),17 }, + { IPv4(65,28,0,0),17 }, + { IPv4(65,28,0,0),14 }, + { IPv4(65,28,128,0),20 }, + { IPv4(65,28,144,0),20 }, + { IPv4(65,28,160,0),19 }, + { IPv4(65,28,192,0),19 }, + { IPv4(65,28,224,0),19 }, + { IPv4(65,29,0,0),18 }, + { IPv4(65,29,64,0),19 }, + { IPv4(65,29,96,0),19 }, + { IPv4(65,29,128,0),18 }, + { IPv4(65,29,192,0),19 }, + { IPv4(65,29,224,0),19 }, + { IPv4(65,30,0,0),18 }, + { IPv4(65,30,128,0),18 }, + { IPv4(65,30,192,0),19 }, + { IPv4(65,30,224,0),19 }, + { IPv4(65,31,0,0),19 }, + { IPv4(65,31,32,0),19 }, + { IPv4(65,31,64,0),20 }, + { IPv4(65,31,80,0),20 }, + { IPv4(65,31,96,0),19 }, + { IPv4(65,31,128,0),18 }, + { IPv4(65,31,192,0),20 }, + { IPv4(65,31,224,0),20 }, + { IPv4(65,31,240,0),20 }, + { IPv4(65,32,0,0),17 }, + { IPv4(65,32,128,0),18 }, + { IPv4(65,32,192,0),18 }, + { IPv4(65,33,0,0),17 }, + { IPv4(65,33,128,0),18 }, + { IPv4(65,33,192,0),18 }, + { IPv4(65,34,0,0),20 }, + { IPv4(65,34,16,0),20 }, + { IPv4(65,34,32,0),20 }, + { IPv4(65,34,48,0),20 }, + { IPv4(65,34,64,0),18 }, + { IPv4(65,34,128,0),18 }, + { IPv4(65,34,192,0),18 }, + { IPv4(65,35,0,0),18 }, + { IPv4(65,35,64,0),19 }, + { IPv4(65,35,96,0),19 }, + { IPv4(65,35,128,0),17 }, + { IPv4(65,42,208,0),22 }, + { IPv4(65,45,0,0),17 }, + { IPv4(65,45,0,0),16 }, + { IPv4(65,45,128,0),21 }, + { IPv4(65,45,128,0),18 }, + { IPv4(65,54,128,0),19 }, + { IPv4(65,54,160,0),19 }, + { IPv4(65,54,192,0),19 }, + { IPv4(65,54,224,0),19 }, + { IPv4(65,64,176,0),20 }, + { IPv4(65,65,32,0),20 }, + { IPv4(65,65,224,0),20 }, + { IPv4(65,67,64,0),20 }, + { IPv4(65,68,96,0),20 }, + { IPv4(65,68,160,0),20 }, + { IPv4(65,68,160,0),22 }, + { IPv4(65,68,252,0),24 }, + { IPv4(65,68,253,0),24 }, + { IPv4(65,69,224,0),20 }, + { IPv4(65,70,224,0),20 }, + { IPv4(65,71,160,0),20 }, + { IPv4(65,76,0,0),16 }, + { IPv4(65,88,0,0),24 }, + { IPv4(65,88,0,0),14 }, + { IPv4(65,88,9,0),24 }, + { IPv4(65,88,10,0),23 }, + { IPv4(65,88,22,0),24 }, + { IPv4(65,88,62,0),24 }, + { IPv4(65,88,80,0),22 }, + { IPv4(65,88,84,0),22 }, + { IPv4(65,88,88,0),22 }, + { IPv4(65,88,96,0),20 }, + { IPv4(65,88,112,0),22 }, + { IPv4(65,88,116,0),22 }, + { IPv4(65,88,125,0),24 }, + { IPv4(65,88,168,0),21 }, + { IPv4(65,88,176,0),20 }, + { IPv4(65,88,192,0),21 }, + { IPv4(65,88,200,0),22 }, + { IPv4(65,88,204,0),23 }, + { IPv4(65,88,206,0),24 }, + { IPv4(65,88,207,0),25 }, + { IPv4(65,88,214,0),24 }, + { IPv4(65,88,216,0),22 }, + { IPv4(65,88,240,0),20 }, + { IPv4(65,89,5,0),24 }, + { IPv4(65,89,14,0),23 }, + { IPv4(65,89,32,0),21 }, + { IPv4(65,89,40,0),22 }, + { IPv4(65,89,44,0),22 }, + { IPv4(65,89,48,0),21 }, + { IPv4(65,89,56,0),22 }, + { IPv4(65,89,60,0),22 }, + { IPv4(65,89,80,0),20 }, + { IPv4(65,89,96,0),20 }, + { IPv4(65,89,128,0),22 }, + { IPv4(65,89,150,0),24 }, + { IPv4(65,89,156,0),23 }, + { IPv4(65,89,166,192),26 }, + { IPv4(65,89,204,0),24 }, + { IPv4(65,89,220,0),22 }, + { IPv4(65,89,240,0),21 }, + { IPv4(65,89,250,0),23 }, + { IPv4(65,90,56,0),21 }, + { IPv4(65,90,64,0),21 }, + { IPv4(65,90,72,0),21 }, + { IPv4(65,90,80,0),21 }, + { IPv4(65,90,88,0),21 }, + { IPv4(65,90,144,0),20 }, + { IPv4(65,90,177,0),24 }, + { IPv4(65,90,208,0),20 }, + { IPv4(65,96,0,0),19 }, + { IPv4(65,96,32,0),19 }, + { IPv4(65,96,64,0),18 }, + { IPv4(65,96,128,0),17 }, + { IPv4(65,97,0,0),19 }, + { IPv4(65,100,53,0),24 }, + { IPv4(65,100,54,0),24 }, + { IPv4(65,104,0,0),14 }, + { IPv4(65,105,159,0),24 }, + { IPv4(65,105,191,0),24 }, + { IPv4(65,105,218,0),24 }, + { IPv4(65,105,227,0),24 }, + { IPv4(65,105,236,0),24 }, + { IPv4(65,106,136,0),24 }, + { IPv4(65,106,164,0),24 }, + { IPv4(65,106,171,0),24 }, + { IPv4(65,112,27,0),24 }, + { IPv4(65,112,28,0),23 }, + { IPv4(65,112,31,0),24 }, + { IPv4(65,112,122,0),23 }, + { IPv4(65,112,125,0),24 }, + { IPv4(65,112,126,0),24 }, + { IPv4(65,112,127,0),24 }, + { IPv4(65,112,196,0),24 }, + { IPv4(65,112,198,0),24 }, + { IPv4(65,112,199,0),24 }, + { IPv4(65,112,206,0),24 }, + { IPv4(65,112,216,0),24 }, + { IPv4(65,112,241,0),24 }, + { IPv4(65,112,246,0),23 }, + { IPv4(65,112,255,0),24 }, + { IPv4(65,113,8,0),24 }, + { IPv4(65,113,10,0),24 }, + { IPv4(65,113,11,0),24 }, + { IPv4(65,113,12,0),23 }, + { IPv4(65,113,14,0),23 }, + { IPv4(65,113,45,0),24 }, + { IPv4(65,113,124,0),22 }, + { IPv4(65,113,220,0),22 }, + { IPv4(65,113,224,0),24 }, + { IPv4(65,113,227,0),24 }, + { IPv4(65,113,229,0),24 }, + { IPv4(65,113,230,0),24 }, + { IPv4(65,113,236,0),22 }, + { IPv4(65,113,240,0),24 }, + { IPv4(65,113,242,0),24 }, + { IPv4(65,113,244,0),23 }, + { IPv4(65,114,9,0),24 }, + { IPv4(65,114,27,0),24 }, + { IPv4(65,114,50,0),24 }, + { IPv4(65,114,51,0),24 }, + { IPv4(65,114,194,0),23 }, + { IPv4(65,114,200,0),24 }, + { IPv4(65,114,201,0),24 }, + { IPv4(65,114,202,0),24 }, + { IPv4(65,114,203,0),24 }, + { IPv4(65,114,204,0),24 }, + { IPv4(65,114,205,0),24 }, + { IPv4(65,114,210,0),24 }, + { IPv4(65,114,211,0),24 }, + { IPv4(65,114,213,0),24 }, + { IPv4(65,114,214,0),24 }, + { IPv4(65,114,215,0),24 }, + { IPv4(65,114,216,0),23 }, + { IPv4(65,114,220,0),24 }, + { IPv4(65,114,221,0),24 }, + { IPv4(65,114,222,0),24 }, + { IPv4(65,114,223,0),24 }, + { IPv4(65,114,227,0),24 }, + { IPv4(65,114,228,0),24 }, + { IPv4(65,114,229,0),24 }, + { IPv4(65,114,230,0),24 }, + { IPv4(65,114,232,0),24 }, + { IPv4(65,114,234,0),24 }, + { IPv4(65,114,239,0),24 }, + { IPv4(65,114,240,0),24 }, + { IPv4(65,114,241,0),24 }, + { IPv4(65,114,244,0),23 }, + { IPv4(65,114,247,0),24 }, + { IPv4(65,114,248,0),23 }, + { IPv4(65,114,250,0),24 }, + { IPv4(65,115,54,0),24 }, + { IPv4(65,115,174,0),24 }, + { IPv4(65,115,238,0),24 }, + { IPv4(65,116,66,0),24 }, + { IPv4(65,116,67,0),24 }, + { IPv4(65,116,68,0),24 }, + { IPv4(65,116,76,0),24 }, + { IPv4(65,116,77,0),24 }, + { IPv4(65,116,78,0),24 }, + { IPv4(65,116,183,0),24 }, + { IPv4(65,116,186,0),24 }, + { IPv4(65,116,228,0),23 }, + { IPv4(65,116,240,0),24 }, + { IPv4(65,116,242,0),24 }, + { IPv4(65,116,243,0),24 }, + { IPv4(65,117,80,0),24 }, + { IPv4(65,117,81,0),24 }, + { IPv4(65,117,86,0),24 }, + { IPv4(65,117,87,0),24 }, + { IPv4(65,117,88,0),21 }, + { IPv4(65,117,102,0),24 }, + { IPv4(65,117,103,0),24 }, + { IPv4(65,117,104,0),21 }, + { IPv4(65,117,150,0),24 }, + { IPv4(65,117,242,0),23 }, + { IPv4(65,117,244,0),24 }, + { IPv4(65,117,245,0),24 }, + { IPv4(65,117,247,0),24 }, + { IPv4(65,117,249,0),24 }, + { IPv4(65,117,252,0),24 }, + { IPv4(65,117,253,0),24 }, + { IPv4(65,120,230,0),24 }, + { IPv4(65,120,231,0),24 }, + { IPv4(65,120,240,0),23 }, + { IPv4(65,120,242,0),23 }, + { IPv4(65,121,0,0),23 }, + { IPv4(65,121,4,0),24 }, + { IPv4(65,121,8,0),23 }, + { IPv4(65,121,18,0),24 }, + { IPv4(65,121,28,0),24 }, + { IPv4(65,121,33,0),24 }, + { IPv4(65,121,34,0),24 }, + { IPv4(65,121,86,0),24 }, + { IPv4(65,160,176,0),20 }, + { IPv4(65,160,224,0),20 }, + { IPv4(65,161,12,0),23 }, + { IPv4(65,161,42,0),24 }, + { IPv4(65,161,43,0),24 }, + { IPv4(65,161,122,0),23 }, + { IPv4(65,161,192,0),24 }, + { IPv4(65,163,56,0),24 }, + { IPv4(65,163,129,0),24 }, + { IPv4(65,163,182,0),24 }, + { IPv4(65,163,226,0),24 }, + { IPv4(65,163,227,0),24 }, + { IPv4(65,164,116,0),22 }, + { IPv4(65,164,118,0),24 }, + { IPv4(65,164,145,0),24 }, + { IPv4(65,164,236,0),22 }, + { IPv4(65,165,67,0),24 }, + { IPv4(65,165,127,0),24 }, + { IPv4(65,165,134,0),23 }, + { IPv4(65,166,123,0),24 }, + { IPv4(65,166,147,0),24 }, + { IPv4(65,166,233,0),24 }, + { IPv4(65,167,179,0),24 }, + { IPv4(65,168,39,0),24 }, + { IPv4(65,168,204,0),22 }, + { IPv4(65,170,140,0),22 }, + { IPv4(65,170,225,0),24 }, + { IPv4(65,171,16,0),21 }, + { IPv4(65,174,28,0),22 }, + { IPv4(65,174,51,0),24 }, + { IPv4(65,174,154,0),23 }, + { IPv4(65,192,32,0),24 }, + { IPv4(65,192,36,0),23 }, + { IPv4(65,193,3,0),24 }, + { IPv4(65,193,19,0),24 }, + { IPv4(65,193,164,0),22 }, + { IPv4(65,193,252,0),22 }, + { IPv4(65,194,128,0),21 }, + { IPv4(65,194,184,0),22 }, + { IPv4(65,195,9,0),24 }, + { IPv4(65,195,12,0),24 }, + { IPv4(65,195,32,0),21 }, + { IPv4(65,195,209,0),24 }, + { IPv4(65,195,211,0),24 }, + { IPv4(65,196,66,0),23 }, + { IPv4(65,197,21,0),24 }, + { IPv4(65,197,91,0),24 }, + { IPv4(65,197,177,0),24 }, + { IPv4(65,197,236,0),22 }, + { IPv4(65,198,132,0),23 }, + { IPv4(65,198,187,0),24 }, + { IPv4(65,198,197,0),24 }, + { IPv4(65,198,198,0),24 }, + { IPv4(65,198,219,0),24 }, + { IPv4(65,198,220,0),23 }, + { IPv4(65,199,0,0),21 }, + { IPv4(65,199,16,0),24 }, + { IPv4(65,199,17,0),24 }, + { IPv4(65,199,18,0),24 }, + { IPv4(65,199,19,0),24 }, + { IPv4(65,199,28,0),24 }, + { IPv4(65,199,44,0),24 }, + { IPv4(65,199,145,0),24 }, + { IPv4(65,199,148,0),24 }, + { IPv4(65,199,149,0),24 }, + { IPv4(65,199,213,0),24 }, + { IPv4(65,200,30,0),24 }, + { IPv4(65,200,122,0),24 }, + { IPv4(65,201,12,0),22 }, + { IPv4(65,201,209,0),24 }, + { IPv4(65,202,11,0),24 }, + { IPv4(65,202,30,0),24 }, + { IPv4(65,202,64,0),22 }, + { IPv4(65,202,115,0),24 }, + { IPv4(65,202,192,0),23 }, + { IPv4(65,203,43,0),24 }, + { IPv4(65,204,41,0),24 }, + { IPv4(65,204,80,0),24 }, + { IPv4(65,204,150,0),24 }, + { IPv4(65,204,186,0),24 }, + { IPv4(65,205,141,0),24 }, + { IPv4(65,205,160,0),22 }, + { IPv4(65,205,191,0),24 }, + { IPv4(65,205,248,0),22 }, + { IPv4(65,206,228,0),22 }, + { IPv4(65,207,56,0),21 }, + { IPv4(65,208,24,0),22 }, + { IPv4(65,208,97,0),24 }, + { IPv4(65,210,129,0),24 }, + { IPv4(65,210,176,0),20 }, + { IPv4(65,211,151,0),24 }, + { IPv4(66,1,224,0),20 }, + { IPv4(66,2,0,0),17 }, + { IPv4(66,2,128,0),18 }, + { IPv4(66,3,0,0),17 }, + { IPv4(66,3,32,0),20 }, + { IPv4(66,3,128,0),18 }, + { IPv4(66,3,192,0),18 }, + { IPv4(66,4,0,0),17 }, + { IPv4(66,4,128,0),17 }, + { IPv4(66,5,0,0),17 }, + { IPv4(66,5,128,0),17 }, + { IPv4(66,6,0,0),20 }, + { IPv4(66,6,96,0),19 }, + { IPv4(66,6,160,0),23 }, + { IPv4(66,6,160,0),20 }, + { IPv4(66,6,162,0),24 }, + { IPv4(66,6,163,0),24 }, + { IPv4(66,6,164,0),24 }, + { IPv4(66,6,165,0),24 }, + { IPv4(66,7,128,0),24 }, + { IPv4(66,7,142,0),24 }, + { IPv4(66,7,145,0),24 }, + { IPv4(66,7,160,0),24 }, + { IPv4(66,7,191,0),24 }, + { IPv4(66,7,224,0),20 }, + { IPv4(66,8,128,0),19 }, + { IPv4(66,8,160,0),20 }, + { IPv4(66,8,176,0),20 }, + { IPv4(66,8,192,0),20 }, + { IPv4(66,8,208,0),20 }, + { IPv4(66,8,224,0),20 }, + { IPv4(66,8,240,0),20 }, + { IPv4(66,9,0,0),16 }, + { IPv4(66,21,4,0),24 }, + { IPv4(66,24,0,0),18 }, + { IPv4(66,24,64,0),18 }, + { IPv4(66,24,128,0),19 }, + { IPv4(66,24,160,0),20 }, + { IPv4(66,24,176,0),20 }, + { IPv4(66,24,192,0),19 }, + { IPv4(66,24,224,0),19 }, + { IPv4(66,25,0,0),18 }, + { IPv4(66,25,64,0),19 }, + { IPv4(66,25,96,0),19 }, + { IPv4(66,25,128,0),17 }, + { IPv4(66,26,0,0),20 }, + { IPv4(66,26,16,0),20 }, + { IPv4(66,26,32,0),19 }, + { IPv4(66,26,64,0),19 }, + { IPv4(66,26,96,0),20 }, + { IPv4(66,26,112,0),20 }, + { IPv4(66,26,128,0),19 }, + { IPv4(66,26,160,0),20 }, + { IPv4(66,26,176,0),20 }, + { IPv4(66,26,192,0),19 }, + { IPv4(66,26,224,0),19 }, + { IPv4(66,27,0,0),20 }, + { IPv4(66,27,16,0),20 }, + { IPv4(66,27,32,0),20 }, + { IPv4(66,27,48,0),20 }, + { IPv4(66,27,64,0),18 }, + { IPv4(66,27,128,0),19 }, + { IPv4(66,27,160,0),19 }, + { IPv4(66,27,192,0),19 }, + { IPv4(66,27,224,0),20 }, + { IPv4(66,27,240,0),20 }, + { IPv4(66,28,0,0),17 }, + { IPv4(66,28,13,0),24 }, + { IPv4(66,28,15,0),24 }, + { IPv4(66,28,17,0),24 }, + { IPv4(66,30,0,0),19 }, + { IPv4(66,30,32,0),20 }, + { IPv4(66,30,48,0),20 }, + { IPv4(66,30,64,0),18 }, + { IPv4(66,30,128,0),18 }, + { IPv4(66,30,192,0),18 }, + { IPv4(66,31,0,0),16 }, + { IPv4(66,32,4,0),22 }, + { IPv4(66,32,32,0),21 }, + { IPv4(66,32,112,0),20 }, + { IPv4(66,32,136,0),22 }, + { IPv4(66,33,0,0),17 }, + { IPv4(66,33,128,0),19 }, + { IPv4(66,35,64,0),19 }, + { IPv4(66,35,68,0),22 }, + { IPv4(66,35,72,0),23 }, + { IPv4(66,35,78,0),24 }, + { IPv4(66,36,0,0),20 }, + { IPv4(66,37,128,0),20 }, + { IPv4(66,37,160,0),20 }, + { IPv4(66,37,172,0),24 }, + { IPv4(66,37,173,0),24 }, + { IPv4(66,37,224,0),20 }, + { IPv4(66,38,0,0),24 }, + { IPv4(66,38,0,0),20 }, + { IPv4(66,38,1,0),24 }, + { IPv4(66,38,2,0),24 }, + { IPv4(66,38,3,0),24 }, + { IPv4(66,38,4,0),22 }, + { IPv4(66,38,8,0),22 }, + { IPv4(66,38,12,0),22 }, + { IPv4(66,38,16,0),20 }, + { IPv4(66,38,16,0),22 }, + { IPv4(66,38,20,0),22 }, + { IPv4(66,38,24,0),22 }, + { IPv4(66,38,28,0),24 }, + { IPv4(66,38,28,0),22 }, + { IPv4(66,38,29,0),24 }, + { IPv4(66,38,30,0),24 }, + { IPv4(66,38,31,0),24 }, + { IPv4(66,38,32,0),20 }, + { IPv4(66,38,32,0),21 }, + { IPv4(66,38,32,0),22 }, + { IPv4(66,38,36,0),22 }, + { IPv4(66,38,40,0),22 }, + { IPv4(66,38,44,0),23 }, + { IPv4(66,38,44,0),22 }, + { IPv4(66,38,46,0),23 }, + { IPv4(66,38,48,0),24 }, + { IPv4(66,38,48,0),22 }, + { IPv4(66,38,48,0),20 }, + { IPv4(66,38,52,0),24 }, + { IPv4(66,38,54,0),23 }, + { IPv4(66,38,63,0),24 }, + { IPv4(66,38,181,0),24 }, + { IPv4(66,38,182,0),24 }, + { IPv4(66,39,0,0),17 }, + { IPv4(66,40,0,0),18 }, + { IPv4(66,40,64,0),19 }, + { IPv4(66,40,80,0),20 }, + { IPv4(66,40,96,0),21 }, + { IPv4(66,40,96,0),20 }, + { IPv4(66,40,104,0),21 }, + { IPv4(66,40,112,0),20 }, + { IPv4(66,40,128,0),17 }, + { IPv4(66,40,248,0),21 }, + { IPv4(66,41,0,0),19 }, + { IPv4(66,41,32,0),19 }, + { IPv4(66,41,80,0),20 }, + { IPv4(66,41,96,0),19 }, + { IPv4(66,41,128,0),20 }, + { IPv4(66,41,144,0),20 }, + { IPv4(66,41,160,0),19 }, + { IPv4(66,41,192,0),18 }, + { IPv4(66,42,32,0),20 }, + { IPv4(66,43,192,0),18 }, + { IPv4(66,44,0,0),17 }, + { IPv4(66,45,0,0),17 }, + { IPv4(66,45,0,0),20 }, + { IPv4(66,45,0,0),23 }, + { IPv4(66,45,0,0),18 }, + { IPv4(66,45,2,0),23 }, + { IPv4(66,45,8,0),24 }, + { IPv4(66,45,9,0),24 }, + { IPv4(66,45,10,0),23 }, + { IPv4(66,45,12,0),24 }, + { IPv4(66,45,13,0),24 }, + { IPv4(66,45,14,0),24 }, + { IPv4(66,45,15,0),24 }, + { IPv4(66,45,16,0),22 }, + { IPv4(66,45,16,0),20 }, + { IPv4(66,45,20,0),22 }, + { IPv4(66,45,24,0),24 }, + { IPv4(66,45,25,0),24 }, + { IPv4(66,45,26,0),23 }, + { IPv4(66,45,28,0),22 }, + { IPv4(66,45,32,0),22 }, + { IPv4(66,45,32,0),20 }, + { IPv4(66,45,36,0),23 }, + { IPv4(66,45,38,0),23 }, + { IPv4(66,45,41,0),24 }, + { IPv4(66,45,42,0),24 }, + { IPv4(66,45,43,0),24 }, + { IPv4(66,45,44,0),22 }, + { IPv4(66,45,48,0),24 }, + { IPv4(66,45,49,0),24 }, + { IPv4(66,45,50,0),23 }, + { IPv4(66,45,52,0),22 }, + { IPv4(66,45,56,0),23 }, + { IPv4(66,45,59,0),24 }, + { IPv4(66,45,60,0),22 }, + { IPv4(66,45,64,0),21 }, + { IPv4(66,45,64,0),19 }, + { IPv4(66,45,72,0),24 }, + { IPv4(66,45,73,0),24 }, + { IPv4(66,45,74,0),24 }, + { IPv4(66,45,75,0),24 }, + { IPv4(66,45,76,0),23 }, + { IPv4(66,45,78,0),24 }, + { IPv4(66,45,80,0),24 }, + { IPv4(66,45,81,0),24 }, + { IPv4(66,45,82,0),23 }, + { IPv4(66,45,84,0),24 }, + { IPv4(66,45,86,0),23 }, + { IPv4(66,45,88,0),21 }, + { IPv4(66,45,96,0),20 }, + { IPv4(66,45,102,0),23 }, + { IPv4(66,45,104,0),21 }, + { IPv4(66,45,112,0),22 }, + { IPv4(66,45,116,0),23 }, + { IPv4(66,45,120,0),21 }, + { IPv4(66,46,0,0),16 }, + { IPv4(66,46,144,0),21 }, + { IPv4(66,47,4,0),22 }, + { IPv4(66,47,40,0),22 }, + { IPv4(66,47,144,0),22 }, + { IPv4(66,47,148,0),22 }, + { IPv4(66,47,156,0),22 }, + { IPv4(66,47,160,0),20 }, + { IPv4(66,47,188,0),22 }, + { IPv4(66,47,224,0),20 }, + { IPv4(66,47,240,0),22 }, + { IPv4(66,51,7,0),24 }, + { IPv4(66,51,8,0),24 }, + { IPv4(66,51,9,0),24 }, + { IPv4(66,51,10,0),24 }, + { IPv4(66,51,11,0),24 }, + { IPv4(66,51,24,0),23 }, + { IPv4(66,51,26,0),23 }, + { IPv4(66,51,28,0),24 }, + { IPv4(66,51,30,0),24 }, + { IPv4(66,51,32,0),20 }, + { IPv4(66,51,64,0),20 }, + { IPv4(66,51,80,0),22 }, + { IPv4(66,52,192,0),18 }, + { IPv4(66,53,32,0),19 }, + { IPv4(66,53,64,0),19 }, + { IPv4(66,54,154,0),24 }, + { IPv4(66,54,155,0),24 }, + { IPv4(66,54,186,0),24 }, + { IPv4(66,54,193,0),24 }, + { IPv4(66,54,200,0),21 }, + { IPv4(66,54,209,0),24 }, + { IPv4(66,55,0,0),18 }, + { IPv4(66,56,0,0),18 }, + { IPv4(66,56,64,0),19 }, + { IPv4(66,56,96,0),20 }, + { IPv4(66,56,96,0),19 }, + { IPv4(66,56,112,0),20 }, + { IPv4(66,56,128,0),19 }, + { IPv4(66,56,160,0),19 }, + { IPv4(66,56,192,0),19 }, + { IPv4(66,56,224,0),19 }, + { IPv4(66,57,0,0),19 }, + { IPv4(66,57,32,0),19 }, + { IPv4(66,57,32,0),20 }, + { IPv4(66,57,48,0),20 }, + { IPv4(66,57,64,0),19 }, + { IPv4(66,57,96,0),19 }, + { IPv4(66,57,128,0),19 }, + { IPv4(66,57,160,0),19 }, + { IPv4(66,57,192,0),20 }, + { IPv4(66,61,0,0),20 }, + { IPv4(66,61,16,0),20 }, + { IPv4(66,61,32,0),19 }, + { IPv4(66,61,64,0),18 }, + { IPv4(66,61,128,0),19 }, + { IPv4(66,61,144,0),20 }, + { IPv4(66,61,152,0),21 }, + { IPv4(66,61,160,0),19 }, + { IPv4(66,62,0,0),16 }, + { IPv4(66,62,55,0),24 }, + { IPv4(66,62,61,0),24 }, + { IPv4(66,62,224,0),19 }, + { IPv4(66,62,245,0),24 }, + { IPv4(66,65,0,0),18 }, + { IPv4(66,65,64,0),19 }, + { IPv4(66,65,96,0),20 }, + { IPv4(66,65,112,0),20 }, + { IPv4(66,66,0,0),18 }, + { IPv4(66,66,64,0),18 }, + { IPv4(66,66,128,0),18 }, + { IPv4(66,66,192,0),18 }, + { IPv4(66,67,0,0),19 }, + { IPv4(66,67,32,0),20 }, + { IPv4(66,67,48,0),20 }, + { IPv4(66,67,64,0),18 }, + { IPv4(66,68,0,0),17 }, + { IPv4(66,68,128,0),18 }, + { IPv4(66,69,0,0),17 }, + { IPv4(66,69,128,0),18 }, + { IPv4(66,69,192,0),18 }, + { IPv4(66,70,0,0),18 }, + { IPv4(66,70,0,0),17 }, + { IPv4(66,70,58,0),23 }, + { IPv4(66,70,64,0),18 }, + { IPv4(66,70,120,0),22 }, + { IPv4(66,70,152,0),21 }, + { IPv4(66,70,176,0),22 }, + { IPv4(66,70,185,0),24 }, + { IPv4(66,70,188,0),22 }, + { IPv4(66,70,216,0),21 }, + { IPv4(66,71,0,0),17 }, + { IPv4(66,71,128,0),18 }, + { IPv4(66,74,0,0),18 }, + { IPv4(66,74,64,0),19 }, + { IPv4(66,74,96,0),20 }, + { IPv4(66,74,112,0),20 }, + { IPv4(66,74,128,0),18 }, + { IPv4(66,74,192,0),19 }, + { IPv4(66,74,224,0),20 }, + { IPv4(66,74,240,0),20 }, + { IPv4(66,75,0,0),19 }, + { IPv4(66,75,32,0),19 }, + { IPv4(66,75,64,0),19 }, + { IPv4(66,75,96,0),19 }, + { IPv4(66,75,128,0),19 }, + { IPv4(66,75,160,0),20 }, + { IPv4(66,75,176,0),20 }, + { IPv4(66,75,192,0),19 }, + { IPv4(66,76,0,0),18 }, + { IPv4(66,76,64,0),20 }, + { IPv4(66,76,80,0),20 }, + { IPv4(66,76,96,0),20 }, + { IPv4(66,76,112,0),20 }, + { IPv4(66,76,128,0),20 }, + { IPv4(66,76,160,0),20 }, + { IPv4(66,76,176,0),20 }, + { IPv4(66,77,32,0),21 }, + { IPv4(66,77,34,0),24 }, + { IPv4(66,77,36,0),23 }, + { IPv4(66,77,38,0),24 }, + { IPv4(66,79,128,0),19 }, + { IPv4(66,79,129,0),24 }, + { IPv4(66,79,132,0),24 }, + { IPv4(66,79,133,0),24 }, + { IPv4(66,79,135,0),24 }, + { IPv4(66,79,136,0),24 }, + { IPv4(66,81,0,0),17 }, + { IPv4(66,82,0,0),19 }, + { IPv4(66,87,32,0),20 }, + { IPv4(66,87,48,0),20 }, + { IPv4(66,87,128,0),20 }, + { IPv4(66,87,208,0),20 }, + { IPv4(66,88,0,0),15 }, + { IPv4(66,90,0,0),21 }, + { IPv4(66,91,0,0),18 }, + { IPv4(66,91,64,0),19 }, + { IPv4(66,91,96,0),20 }, + { IPv4(66,92,0,0),19 }, + { IPv4(66,92,20,0),22 }, + { IPv4(66,92,32,0),19 }, + { IPv4(66,92,64,0),19 }, + { IPv4(66,92,96,0),19 }, + { IPv4(66,92,128,0),20 }, + { IPv4(66,92,144,0),20 }, + { IPv4(66,92,160,0),20 }, + { IPv4(66,92,176,0),20 }, + { IPv4(66,92,192,0),22 }, + { IPv4(66,92,196,0),22 }, + { IPv4(66,92,200,0),22 }, + { IPv4(66,92,204,0),22 }, + { IPv4(66,92,208,0),22 }, + { IPv4(66,92,216,0),21 }, + { IPv4(66,92,240,0),21 }, + { IPv4(66,92,248,0),22 }, + { IPv4(66,92,252,0),22 }, + { IPv4(66,95,0,0),17 }, + { IPv4(66,95,128,0),19 }, + { IPv4(66,96,0,0),20 }, + { IPv4(66,96,192,0),18 }, + { IPv4(66,99,0,0),16 }, + { IPv4(66,100,104,0),22 }, + { IPv4(66,100,108,0),23 }, + { IPv4(66,101,32,0),20 }, + { IPv4(66,105,0,0),16 }, + { IPv4(66,106,0,0),15 }, + { IPv4(66,108,0,0),17 }, + { IPv4(66,108,128,0),17 }, + { IPv4(66,109,160,0),20 }, + { IPv4(66,109,192,0),20 }, + { IPv4(66,110,28,0),23 }, + { IPv4(66,111,192,0),19 }, + { IPv4(66,111,224,0),19 }, + { IPv4(66,113,0,0),19 }, + { IPv4(66,114,64,0),20 }, + { IPv4(66,114,96,0),20 }, + { IPv4(66,114,128,0),19 }, + { IPv4(66,115,128,0),18 }, + { IPv4(66,118,64,0),19 }, + { IPv4(66,118,80,0),20 }, + { IPv4(66,118,192,0),19 }, + { IPv4(66,119,192,0),19 }, + { IPv4(66,119,196,0),22 }, + { IPv4(66,119,200,0),22 }, + { IPv4(66,119,208,0),22 }, + { IPv4(66,121,192,0),20 }, + { IPv4(66,122,64,0),20 }, + { IPv4(66,122,164,0),24 }, + { IPv4(66,128,2,0),24 }, + { IPv4(66,128,96,0),20 }, + { IPv4(66,128,160,0),20 }, + { IPv4(66,129,64,0),20 }, + { IPv4(66,129,80,0),20 }, + { IPv4(66,129,192,0),19 }, + { IPv4(66,130,0,0),17 }, + { IPv4(66,133,0,0),18 }, + { IPv4(66,133,4,0),24 }, + { IPv4(66,133,21,0),24 }, + { IPv4(66,135,128,0),20 }, + { IPv4(66,135,224,0),20 }, + { IPv4(66,137,176,0),20 }, + { IPv4(66,144,0,0),15 }, + { IPv4(66,149,0,0),17 }, + { IPv4(66,149,64,0),20 }, + { IPv4(66,149,112,0),22 }, + { IPv4(66,149,120,0),22 }, + { IPv4(66,150,0,0),20 }, + { IPv4(66,150,5,0),24 }, + { IPv4(66,150,14,0),24 }, + { IPv4(66,150,16,0),20 }, + { IPv4(66,150,48,0),20 }, + { IPv4(66,150,64,0),21 }, + { IPv4(66,150,64,0),20 }, + { IPv4(66,150,96,0),20 }, + { IPv4(66,150,112,0),20 }, + { IPv4(66,150,128,0),20 }, + { IPv4(66,150,144,0),20 }, + { IPv4(66,152,128,0),19 }, + { IPv4(66,153,128,0),18 }, + { IPv4(66,153,192,0),20 }, + { IPv4(66,154,128,0),17 }, + { IPv4(66,155,0,0),17 }, + { IPv4(66,158,0,0),17 }, + { IPv4(66,161,128,0),18 }, + { IPv4(66,161,138,0),23 }, + { IPv4(66,162,33,0),24 }, + { IPv4(66,163,224,0),20 }, + { IPv4(66,164,0,0),24 }, + { IPv4(66,164,1,0),24 }, + { IPv4(66,164,2,0),24 }, + { IPv4(66,164,4,0),24 }, + { IPv4(66,164,5,0),24 }, + { IPv4(66,164,7,0),24 }, + { IPv4(66,164,200,0),21 }, + { IPv4(66,164,208,0),21 }, + { IPv4(66,164,240,0),20 }, + { IPv4(66,168,32,0),24 }, + { IPv4(66,168,38,0),23 }, + { IPv4(66,168,80,0),20 }, + { IPv4(66,170,96,0),20 }, + { IPv4(66,175,0,0),18 }, + { IPv4(66,177,0,0),17 }, + { IPv4(66,177,128,0),18 }, + { IPv4(66,177,192,0),19 }, + { IPv4(66,179,0,0),18 }, + { IPv4(66,179,0,0),23 }, + { IPv4(66,179,4,0),22 }, + { IPv4(66,179,64,0),19 }, + { IPv4(66,179,96,0),20 }, + { IPv4(66,180,32,0),20 }, + { IPv4(66,180,192,0),20 }, + { IPv4(67,0,0,0),16 }, + { IPv4(67,8,0,0),19 }, + { IPv4(67,8,32,0),20 }, + { IPv4(67,89,0,0),17 }, + { IPv4(67,89,128,0),18 }, + { IPv4(67,96,0,0),19 }, + { IPv4(67,96,0,0),14 }, + { IPv4(67,96,86,0),24 }, + { IPv4(67,96,87,0),24 }, + { IPv4(67,96,88,0),23 }, + { IPv4(67,96,96,0),21 }, + { IPv4(67,96,224,0),21 }, + { IPv4(67,97,64,0),20 }, + { IPv4(67,97,144,0),21 }, + { IPv4(67,97,152,0),21 }, + { IPv4(67,97,160,0),20 }, + { IPv4(67,97,176,0),21 }, + { IPv4(67,104,0,0),15 }, + { IPv4(67,105,4,0),23 }, + { IPv4(67,160,0,0),16 }, + { IPv4(67,160,0,0),13 }, + { IPv4(67,161,0,0),17 }, + { IPv4(67,161,128,0),17 }, + { IPv4(67,162,0,0),16 }, + { IPv4(67,163,0,0),17 }, + { IPv4(67,163,128,0),17 }, + { IPv4(67,164,0,0),15 }, + { IPv4(67,166,0,0),17 }, + { IPv4(67,166,192,0),18 }, + { IPv4(67,167,0,0),17 }, + { IPv4(67,167,128,0),17 }, + { IPv4(80,0,0,0),13 }, + { IPv4(80,60,0,0),15 }, + { IPv4(80,64,32,0),20 }, + { IPv4(80,65,96,0),20 }, + { IPv4(80,66,224,0),20 }, + { IPv4(80,67,168,0),21 }, + { IPv4(80,68,128,0),20 }, + { IPv4(80,69,64,0),20 }, + { IPv4(80,71,64,0),20 }, + { IPv4(80,72,96,0),20 }, + { IPv4(80,72,160,0),24 }, + { IPv4(80,74,128,0),20 }, + { IPv4(80,75,64,0),20 }, + { IPv4(80,76,160,0),20 }, + { IPv4(80,78,32,0),20 }, + { IPv4(80,78,160,0),20 }, + { IPv4(80,78,224,0),20 }, + { IPv4(80,79,160,0),20 }, + { IPv4(80,79,224,0),20 }, + { IPv4(80,81,96,0),20 }, + { IPv4(80,84,160,0),22 }, + { IPv4(80,86,32,0),20 }, + { IPv4(80,88,192,0),20 }, + { IPv4(80,90,128,0),20 }, + { IPv4(80,91,128,0),20 }, + { IPv4(80,94,192,0),24 }, + { IPv4(80,96,3,0),24 }, + { IPv4(80,96,8,0),24 }, + { IPv4(80,96,128,0),24 }, + { IPv4(80,96,148,0),24 }, + { IPv4(80,96,184,0),24 }, + { IPv4(80,192,0,0),14 }, + { IPv4(127,0,0,0),8 }, + { IPv4(128,2,0,0),16 }, + { IPv4(128,3,0,0),16 }, + { IPv4(128,6,0,0),16 }, + { IPv4(128,15,0,0),16 }, + { IPv4(128,19,0,0),16 }, + { IPv4(128,23,0,0),16 }, + { IPv4(128,32,0,0),16 }, + { IPv4(128,37,0,0),16 }, + { IPv4(128,38,0,0),16 }, + { IPv4(128,47,0,0),16 }, + { IPv4(128,48,0,0),16 }, + { IPv4(128,49,0,0),16 }, + { IPv4(128,54,0,0),16 }, + { IPv4(128,55,0,0),16 }, + { IPv4(128,56,0,0),16 }, + { IPv4(128,59,0,0),16 }, + { IPv4(128,60,0,0),16 }, + { IPv4(128,61,0,0),16 }, + { IPv4(128,62,0,0),16 }, + { IPv4(128,63,0,0),16 }, + { IPv4(128,64,32,0),24 }, + { IPv4(128,64,148,0),22 }, + { IPv4(128,64,164,0),23 }, + { IPv4(128,64,192,0),23 }, + { IPv4(128,64,203,0),24 }, + { IPv4(128,64,250,0),24 }, + { IPv4(128,64,251,0),24 }, + { IPv4(128,83,0,0),16 }, + { IPv4(128,84,0,0),16 }, + { IPv4(128,88,0,0),16 }, + { IPv4(128,91,0,0),16 }, + { IPv4(128,97,0,0),16 }, + { IPv4(128,101,0,0),16 }, + { IPv4(128,102,0,0),16 }, + { IPv4(128,102,18,0),24 }, + { IPv4(128,104,25,0),24 }, + { IPv4(128,107,0,0),16 }, + { IPv4(128,110,0,0),16 }, + { IPv4(128,111,0,0),16 }, + { IPv4(128,112,0,0),16 }, + { IPv4(128,113,0,0),16 }, + { IPv4(128,114,0,0),16 }, + { IPv4(128,115,0,0),16 }, + { IPv4(128,116,0,0),16 }, + { IPv4(128,117,0,0),16 }, + { IPv4(128,118,0,0),16 }, + { IPv4(128,120,0,0),16 }, + { IPv4(128,121,0,0),16 }, + { IPv4(128,122,0,0),16 }, + { IPv4(128,129,0,0),16 }, + { IPv4(128,132,0,0),16 }, + { IPv4(128,134,0,0),16 }, + { IPv4(128,134,20,0),24 }, + { IPv4(128,134,21,0),24 }, + { IPv4(128,134,37,0),24 }, + { IPv4(128,134,38,0),24 }, + { IPv4(128,134,39,0),24 }, + { IPv4(128,134,75,0),24 }, + { IPv4(128,134,76,0),24 }, + { IPv4(128,134,85,0),24 }, + { IPv4(128,134,86,0),24 }, + { IPv4(128,134,87,0),24 }, + { IPv4(128,134,88,0),24 }, + { IPv4(128,134,89,0),24 }, + { IPv4(128,134,90,0),24 }, + { IPv4(128,134,91,0),24 }, + { IPv4(128,134,92,0),24 }, + { IPv4(128,134,93,0),24 }, + { IPv4(128,134,94,0),24 }, + { IPv4(128,134,126,0),24 }, + { IPv4(128,134,127,0),24 }, + { IPv4(128,134,135,0),24 }, + { IPv4(128,134,148,0),24 }, + { IPv4(128,134,149,0),24 }, + { IPv4(128,134,150,0),24 }, + { IPv4(128,134,154,0),24 }, + { IPv4(128,134,170,0),24 }, + { IPv4(128,134,225,0),24 }, + { IPv4(128,138,0,0),16 }, + { IPv4(128,147,0,0),16 }, + { IPv4(128,149,0,0),16 }, + { IPv4(128,151,0,0),16 }, + { IPv4(128,152,0,0),16 }, + { IPv4(128,153,0,0),16 }, + { IPv4(128,154,0,0),16 }, + { IPv4(128,155,0,0),16 }, + { IPv4(128,156,0,0),16 }, + { IPv4(128,157,0,0),16 }, + { IPv4(128,158,0,0),16 }, + { IPv4(128,159,0,0),16 }, + { IPv4(128,160,0,0),16 }, + { IPv4(128,162,0,0),16 }, + { IPv4(128,163,0,0),16 }, + { IPv4(128,164,0,0),16 }, + { IPv4(128,165,0,0),16 }, + { IPv4(128,170,0,0),16 }, + { IPv4(128,174,0,0),16 }, + { IPv4(128,177,0,0),16 }, + { IPv4(128,177,208,0),20 }, + { IPv4(128,177,246,0),24 }, + { IPv4(128,177,248,0),24 }, + { IPv4(128,180,0,0),16 }, + { IPv4(128,182,0,0),16 }, + { IPv4(128,182,64,0),18 }, + { IPv4(128,183,0,0),16 }, + { IPv4(128,187,0,0),16 }, + { IPv4(128,190,0,0),16 }, + { IPv4(128,190,132,0),24 }, + { IPv4(128,190,161,0),26 }, + { IPv4(128,190,203,0),27 }, + { IPv4(128,190,250,0),24 }, + { IPv4(128,192,0,0),16 }, + { IPv4(128,195,0,0),16 }, + { IPv4(128,196,0,0),16 }, + { IPv4(128,198,0,0),16 }, + { IPv4(128,200,0,0),16 }, + { IPv4(128,202,0,0),16 }, + { IPv4(128,205,0,0),16 }, + { IPv4(128,206,0,0),16 }, + { IPv4(128,209,0,0),16 }, + { IPv4(128,213,0,0),16 }, + { IPv4(128,217,0,0),16 }, + { IPv4(128,218,0,0),16 }, + { IPv4(128,219,0,0),16 }, + { IPv4(128,220,0,0),16 }, + { IPv4(128,226,0,0),16 }, + { IPv4(128,228,0,0),16 }, + { IPv4(128,230,0,0),16 }, + { IPv4(128,236,0,0),16 }, + { IPv4(128,237,0,0),16 }, + { IPv4(128,238,0,0),16 }, + { IPv4(128,241,0,0),16 }, + { IPv4(128,242,0,0),16 }, + { IPv4(128,242,192,0),18 }, + { IPv4(128,246,0,0),16 }, + { IPv4(128,248,0,0),16 }, + { IPv4(128,252,0,0),16 }, + { IPv4(128,253,0,0),16 }, + { IPv4(128,255,0,0),16 }, + { IPv4(129,3,0,0),16 }, + { IPv4(129,8,0,0),16 }, + { IPv4(129,9,0,0),16 }, + { IPv4(129,17,0,0),16 }, + { IPv4(129,19,0,0),16 }, + { IPv4(129,21,0,0),16 }, + { IPv4(129,29,0,0),16 }, + { IPv4(129,30,0,0),16 }, + { IPv4(129,33,0,0),19 }, + { IPv4(129,33,0,0),16 }, + { IPv4(129,33,32,0),19 }, + { IPv4(129,33,64,0),19 }, + { IPv4(129,33,96,0),19 }, + { IPv4(129,33,128,0),19 }, + { IPv4(129,33,160,0),19 }, + { IPv4(129,33,224,0),20 }, + { IPv4(129,33,224,0),19 }, + { IPv4(129,35,0,0),16 }, + { IPv4(129,35,40,0),21 }, + { IPv4(129,35,64,0),22 }, + { IPv4(129,35,65,0),24 }, + { IPv4(129,35,68,0),22 }, + { IPv4(129,35,72,0),22 }, + { IPv4(129,35,76,0),22 }, + { IPv4(129,35,96,0),20 }, + { IPv4(129,35,128,0),20 }, + { IPv4(129,35,160,0),22 }, + { IPv4(129,35,160,0),20 }, + { IPv4(129,35,192,0),21 }, + { IPv4(129,35,224,0),21 }, + { IPv4(129,35,232,0),22 }, + { IPv4(129,37,0,0),16 }, + { IPv4(129,37,25,0),24 }, + { IPv4(129,37,37,0),24 }, + { IPv4(129,37,40,0),24 }, + { IPv4(129,37,70,0),24 }, + { IPv4(129,37,78,0),24 }, + { IPv4(129,37,81,0),24 }, + { IPv4(129,37,95,0),24 }, + { IPv4(129,37,97,0),24 }, + { IPv4(129,37,109,0),24 }, + { IPv4(129,37,112,0),24 }, + { IPv4(129,37,136,0),21 }, + { IPv4(129,37,144,0),20 }, + { IPv4(129,37,152,0),24 }, + { IPv4(129,37,160,0),20 }, + { IPv4(129,37,176,0),22 }, + { IPv4(129,37,180,0),23 }, + { IPv4(129,37,184,0),24 }, + { IPv4(129,37,204,0),24 }, + { IPv4(129,37,243,0),24 }, + { IPv4(129,37,254,0),24 }, + { IPv4(129,41,32,0),20 }, + { IPv4(129,41,80,0),20 }, + { IPv4(129,41,192,0),20 }, + { IPv4(129,41,208,0),20 }, + { IPv4(129,42,0,0),16 }, + { IPv4(129,42,1,0),24 }, + { IPv4(129,42,2,0),24 }, + { IPv4(129,42,3,0),24 }, + { IPv4(129,42,4,0),24 }, + { IPv4(129,42,8,0),24 }, + { IPv4(129,42,9,0),24 }, + { IPv4(129,42,10,0),24 }, + { IPv4(129,42,14,0),24 }, + { IPv4(129,42,16,0),24 }, + { IPv4(129,42,17,0),24 }, + { IPv4(129,42,18,0),24 }, + { IPv4(129,42,19,0),24 }, + { IPv4(129,42,20,0),24 }, + { IPv4(129,42,21,0),24 }, + { IPv4(129,42,24,0),24 }, + { IPv4(129,42,26,0),24 }, + { IPv4(129,42,36,0),24 }, + { IPv4(129,42,37,0),24 }, + { IPv4(129,42,38,0),24 }, + { IPv4(129,42,39,0),24 }, + { IPv4(129,42,40,0),24 }, + { IPv4(129,42,41,0),24 }, + { IPv4(129,42,42,0),24 }, + { IPv4(129,42,43,0),24 }, + { IPv4(129,42,44,0),24 }, + { IPv4(129,42,45,0),24 }, + { IPv4(129,42,46,0),24 }, + { IPv4(129,42,47,0),24 }, + { IPv4(129,42,48,0),24 }, + { IPv4(129,42,50,0),24 }, + { IPv4(129,42,52,0),24 }, + { IPv4(129,42,53,0),24 }, + { IPv4(129,42,54,0),24 }, + { IPv4(129,42,56,0),24 }, + { IPv4(129,42,57,0),24 }, + { IPv4(129,42,59,0),24 }, + { IPv4(129,42,208,0),24 }, + { IPv4(129,42,240,0),24 }, + { IPv4(129,42,241,0),24 }, + { IPv4(129,42,242,0),24 }, + { IPv4(129,42,243,0),24 }, + { IPv4(129,42,244,0),24 }, + { IPv4(129,42,246,0),24 }, + { IPv4(129,46,0,0),16 }, + { IPv4(129,48,0,0),16 }, + { IPv4(129,49,0,0),16 }, + { IPv4(129,51,0,0),16 }, + { IPv4(129,52,0,0),16 }, + { IPv4(129,53,0,0),16 }, + { IPv4(129,54,0,0),16 }, + { IPv4(129,57,0,0),16 }, + { IPv4(129,59,0,0),16 }, + { IPv4(129,61,0,0),16 }, + { IPv4(129,62,0,0),16 }, + { IPv4(129,65,0,0),16 }, + { IPv4(129,72,0,0),16 }, + { IPv4(129,73,0,0),16 }, + { IPv4(129,79,0,0),16 }, + { IPv4(129,81,0,0),16 }, + { IPv4(129,82,0,0),16 }, + { IPv4(129,85,0,0),16 }, + { IPv4(129,92,0,0),16 }, + { IPv4(129,98,0,0),16 }, + { IPv4(129,99,0,0),16 }, + { IPv4(129,100,0,0),16 }, + { IPv4(129,105,0,0),16 }, + { IPv4(129,106,0,0),16 }, + { IPv4(129,116,0,0),16 }, + { IPv4(129,123,0,0),16 }, + { IPv4(129,131,0,0),16 }, + { IPv4(129,139,0,0),16 }, + { IPv4(129,141,0,0),16 }, + { IPv4(129,164,0,0),16 }, + { IPv4(129,165,0,0),16 }, + { IPv4(129,172,0,0),16 }, + { IPv4(129,176,0,0),16 }, + { IPv4(129,179,0,0),16 }, + { IPv4(129,186,0,0),16 }, + { IPv4(129,190,0,0),16 }, + { IPv4(129,191,0,0),16 }, + { IPv4(129,196,0,0),16 }, + { IPv4(129,197,0,0),16 }, + { IPv4(129,198,0,0),16 }, + { IPv4(129,200,0,0),16 }, + { IPv4(129,210,0,0),16 }, + { IPv4(129,212,0,0),16 }, + { IPv4(129,218,0,0),16 }, + { IPv4(129,219,0,0),16 }, + { IPv4(129,223,96,0),19 }, + { IPv4(129,223,123,0),24 }, + { IPv4(129,223,136,0),21 }, + { IPv4(129,223,148,0),22 }, + { IPv4(129,223,152,0),24 }, + { IPv4(129,223,153,0),24 }, + { IPv4(129,223,155,0),24 }, + { IPv4(129,225,0,0),16 }, + { IPv4(129,227,0,0),16 }, + { IPv4(129,229,0,0),16 }, + { IPv4(129,235,0,0),16 }, + { IPv4(129,236,0,0),16 }, + { IPv4(129,238,0,0),16 }, + { IPv4(129,239,0,0),16 }, + { IPv4(129,246,0,0),16 }, + { IPv4(129,246,6,0),24 }, + { IPv4(129,250,0,0),16 }, + { IPv4(129,252,0,0),16 }, + { IPv4(129,253,0,0),16 }, + { IPv4(129,254,0,0),16 }, + { IPv4(129,255,0,0),16 }, + { IPv4(130,11,0,0),16 }, + { IPv4(130,13,0,0),16 }, + { IPv4(130,17,0,0),16 }, + { IPv4(130,20,0,0),16 }, + { IPv4(130,22,0,0),16 }, + { IPv4(130,27,0,0),16 }, + { IPv4(130,29,0,0),16 }, + { IPv4(130,30,0,0),16 }, + { IPv4(130,36,61,0),24 }, + { IPv4(130,38,0,0),16 }, + { IPv4(130,44,0,0),16 }, + { IPv4(130,46,0,0),16 }, + { IPv4(130,49,0,0),16 }, + { IPv4(130,49,0,0),17 }, + { IPv4(130,49,246,0),23 }, + { IPv4(130,50,0,0),16 }, + { IPv4(130,50,0,0),17 }, + { IPv4(130,53,0,0),16 }, + { IPv4(130,57,0,0),16 }, + { IPv4(130,64,0,0),16 }, + { IPv4(130,64,128,0),19 }, + { IPv4(130,65,0,0),16 }, + { IPv4(130,71,0,0),16 }, + { IPv4(130,86,0,0),16 }, + { IPv4(130,91,0,0),16 }, + { IPv4(130,94,0,0),16 }, + { IPv4(130,99,0,0),16 }, + { IPv4(130,102,28,0),24 }, + { IPv4(130,107,0,0),16 }, + { IPv4(130,109,0,0),16 }, + { IPv4(130,110,0,0),16 }, + { IPv4(130,114,0,0),16 }, + { IPv4(130,118,0,0),16 }, + { IPv4(130,123,0,0),16 }, + { IPv4(130,126,0,0),16 }, + { IPv4(130,127,0,0),16 }, + { IPv4(130,134,0,0),16 }, + { IPv4(130,135,0,0),16 }, + { IPv4(130,150,0,0),16 }, + { IPv4(130,154,0,0),16 }, + { IPv4(130,157,0,0),16 }, + { IPv4(130,162,0,0),16 }, + { IPv4(130,163,0,0),16 }, + { IPv4(130,164,0,0),16 }, + { IPv4(130,164,143,0),24 }, + { IPv4(130,164,166,0),24 }, + { IPv4(130,164,168,0),24 }, + { IPv4(130,164,175,0),24 }, + { IPv4(130,164,254,0),24 }, + { IPv4(130,166,0,0),16 }, + { IPv4(130,167,0,0),16 }, + { IPv4(130,182,0,0),16 }, + { IPv4(130,187,0,0),16 }, + { IPv4(130,191,0,0),16 }, + { IPv4(130,199,0,0),16 }, + { IPv4(130,202,0,0),16 }, + { IPv4(130,203,0,0),16 }, + { IPv4(130,205,0,0),16 }, + { IPv4(130,207,0,0),16 }, + { IPv4(130,212,0,0),16 }, + { IPv4(130,216,0,0),16 }, + { IPv4(130,218,0,0),16 }, + { IPv4(130,245,0,0),16 }, + { IPv4(130,253,0,0),16 }, + { IPv4(130,254,0,0),16 }, + { IPv4(131,2,0,0),15 }, + { IPv4(131,4,0,0),14 }, + { IPv4(131,8,0,0),13 }, + { IPv4(131,16,0,0),12 }, + { IPv4(131,32,0,0),11 }, + { IPv4(131,36,0,0),16 }, + { IPv4(131,38,0,0),16 }, + { IPv4(131,49,0,0),16 }, + { IPv4(131,64,0,0),12 }, + { IPv4(131,71,0,0),16 }, + { IPv4(131,80,0,0),14 }, + { IPv4(131,86,0,0),15 }, + { IPv4(131,86,1,0),24 }, + { IPv4(131,92,0,0),16 }, + { IPv4(131,96,0,0),16 }, + { IPv4(131,100,0,0),16 }, + { IPv4(131,103,0,0),16 }, + { IPv4(131,107,0,0),16 }, + { IPv4(131,110,0,0),16 }, + { IPv4(131,113,0,0),16 }, + { IPv4(131,120,0,0),16 }, + { IPv4(131,121,0,0),16 }, + { IPv4(131,122,0,0),16 }, + { IPv4(131,123,0,0),16 }, + { IPv4(131,124,96,0),19 }, + { IPv4(131,132,0,0),16 }, + { IPv4(131,135,0,0),16 }, + { IPv4(131,136,0,0),16 }, + { IPv4(131,137,0,0),16 }, + { IPv4(131,144,0,0),16 }, + { IPv4(131,148,0,0),16 }, + { IPv4(131,149,0,0),16 }, + { IPv4(131,151,0,0),16 }, + { IPv4(131,161,0,0),16 }, + { IPv4(131,161,54,0),24 }, + { IPv4(131,161,200,0),22 }, + { IPv4(131,161,200,0),21 }, + { IPv4(131,161,208,0),20 }, + { IPv4(131,161,217,0),24 }, + { IPv4(131,167,0,0),16 }, + { IPv4(131,178,0,0),16 }, + { IPv4(131,179,0,0),16 }, + { IPv4(131,182,0,0),16 }, + { IPv4(131,184,0,0),16 }, + { IPv4(131,184,146,0),24 }, + { IPv4(131,193,0,0),16 }, + { IPv4(131,197,66,0),24 }, + { IPv4(131,197,192,0),24 }, + { IPv4(131,197,196,0),24 }, + { IPv4(131,197,224,0),24 }, + { IPv4(131,197,228,0),24 }, + { IPv4(131,201,0,0),16 }, + { IPv4(131,203,0,0),16 }, + { IPv4(131,204,0,0),16 }, + { IPv4(131,212,0,0),16 }, + { IPv4(131,214,0,0),16 }, + { IPv4(131,218,0,0),16 }, + { IPv4(131,222,0,0),16 }, + { IPv4(131,225,0,0),16 }, + { IPv4(131,230,0,0),16 }, + { IPv4(131,230,224,0),20 }, + { IPv4(131,233,0,0),16 }, + { IPv4(131,239,0,0),16 }, + { IPv4(131,243,0,0),16 }, + { IPv4(131,244,0,0),15 }, + { IPv4(131,250,0,0),16 }, + { IPv4(132,0,0,0),10 }, + { IPv4(132,8,1,0),24 }, + { IPv4(132,15,0,0),16 }, + { IPv4(132,16,0,0),16 }, + { IPv4(132,20,0,0),16 }, + { IPv4(132,61,0,0),16 }, + { IPv4(132,79,0,0),16 }, + { IPv4(132,80,0,0),12 }, + { IPv4(132,96,0,0),11 }, + { IPv4(132,128,0,0),12 }, + { IPv4(132,146,0,0),16 }, + { IPv4(132,151,0,0),18 }, + { IPv4(132,151,0,0),16 }, + { IPv4(132,151,64,0),24 }, + { IPv4(132,156,0,0),16 }, + { IPv4(132,159,0,0),16 }, + { IPv4(132,161,0,0),16 }, + { IPv4(132,163,0,0),16 }, + { IPv4(132,175,0,0),16 }, + { IPv4(132,188,0,0),19 }, + { IPv4(132,189,0,0),16 }, + { IPv4(132,193,0,0),16 }, + { IPv4(132,194,0,0),16 }, + { IPv4(132,200,0,0),16 }, + { IPv4(132,221,0,0),16 }, + { IPv4(132,226,0,0),16 }, + { IPv4(132,228,0,0),16 }, + { IPv4(132,236,0,0),16 }, + { IPv4(132,237,0,0),16 }, + { IPv4(132,239,0,0),16 }, + { IPv4(132,240,0,0),16 }, + { IPv4(132,241,0,0),16 }, + { IPv4(132,247,0,0),16 }, + { IPv4(132,248,0,0),16 }, + { IPv4(132,249,0,0),16 }, + { IPv4(132,249,20,0),24 }, + { IPv4(132,249,30,0),24 }, + { IPv4(132,250,0,0),16 }, + { IPv4(132,254,0,0),16 }, + { IPv4(132,254,0,0),19 }, + { IPv4(132,254,48,0),21 }, + { IPv4(132,254,56,0),21 }, + { IPv4(132,254,72,0),21 }, + { IPv4(132,254,78,0),24 }, + { IPv4(132,254,80,0),21 }, + { IPv4(132,254,88,0),21 }, + { IPv4(132,254,96,0),21 }, + { IPv4(132,254,112,0),21 }, + { IPv4(132,254,120,0),21 }, + { IPv4(132,254,128,0),21 }, + { IPv4(132,254,144,0),21 }, + { IPv4(132,254,192,0),20 }, + { IPv4(132,254,208,0),21 }, + { IPv4(132,254,208,0),20 }, + { IPv4(132,254,216,0),21 }, + { IPv4(132,254,224,0),19 }, + { IPv4(132,254,232,0),24 }, + { IPv4(133,9,0,0),16 }, + { IPv4(133,12,0,0),16 }, + { IPv4(133,18,0,0),16 }, + { IPv4(133,27,0,0),16 }, + { IPv4(133,53,0,0),16 }, + { IPv4(133,54,0,0),16 }, + { IPv4(133,63,0,0),16 }, + { IPv4(133,69,0,0),16 }, + { IPv4(133,105,0,0),16 }, + { IPv4(133,121,0,0),16 }, + { IPv4(133,123,0,0),16 }, + { IPv4(133,126,0,0),16 }, + { IPv4(133,137,0,0),16 }, + { IPv4(133,138,0,0),16 }, + { IPv4(133,144,0,0),16 }, + { IPv4(133,145,0,0),16 }, + { IPv4(133,146,0,0),16 }, + { IPv4(133,170,0,0),16 }, + { IPv4(133,175,0,0),16 }, + { IPv4(133,186,0,0),16 }, + { IPv4(133,187,0,0),16 }, + { IPv4(133,188,0,0),16 }, + { IPv4(133,205,0,0),16 }, + { IPv4(133,217,0,0),16 }, + { IPv4(133,232,0,0),16 }, + { IPv4(133,235,0,0),16 }, + { IPv4(133,243,0,0),16 }, + { IPv4(133,250,0,0),16 }, + { IPv4(134,5,0,0),16 }, + { IPv4(134,8,5,0),24 }, + { IPv4(134,9,0,0),16 }, + { IPv4(134,10,0,0),16 }, + { IPv4(134,12,0,0),16 }, + { IPv4(134,13,0,0),16 }, + { IPv4(134,15,0,0),16 }, + { IPv4(134,17,0,0),16 }, + { IPv4(134,18,0,0),16 }, + { IPv4(134,20,0,0),16 }, + { IPv4(134,24,0,0),16 }, + { IPv4(134,24,10,0),24 }, + { IPv4(134,24,71,0),24 }, + { IPv4(134,24,92,0),24 }, + { IPv4(134,24,100,0),24 }, + { IPv4(134,24,123,0),24 }, + { IPv4(134,24,125,0),24 }, + { IPv4(134,24,153,0),24 }, + { IPv4(134,29,0,0),16 }, + { IPv4(134,43,10,0),23 }, + { IPv4(134,43,12,0),24 }, + { IPv4(134,43,61,0),24 }, + { IPv4(134,43,101,0),24 }, + { IPv4(134,49,0,0),16 }, + { IPv4(134,49,68,0),22 }, + { IPv4(134,49,72,0),22 }, + { IPv4(134,49,128,0),21 }, + { IPv4(134,49,136,0),21 }, + { IPv4(134,50,0,0),16 }, + { IPv4(134,54,0,0),16 }, + { IPv4(134,55,0,0),16 }, + { IPv4(134,57,0,0),16 }, + { IPv4(134,65,0,0),16 }, + { IPv4(134,66,0,0),16 }, + { IPv4(134,71,0,0),16 }, + { IPv4(134,74,0,0),16 }, + { IPv4(134,75,0,0),16 }, + { IPv4(134,75,7,0),24 }, + { IPv4(134,75,12,0),24 }, + { IPv4(134,75,18,0),24 }, + { IPv4(134,75,30,0),24 }, + { IPv4(134,75,50,0),24 }, + { IPv4(134,75,55,0),24 }, + { IPv4(134,75,122,0),24 }, + { IPv4(134,75,171,0),24 }, + { IPv4(134,75,172,0),24 }, + { IPv4(134,75,180,0),24 }, + { IPv4(134,75,196,0),24 }, + { IPv4(134,75,197,0),24 }, + { IPv4(134,75,217,0),24 }, + { IPv4(134,75,226,0),24 }, + { IPv4(134,78,0,0),16 }, + { IPv4(134,78,0,0),15 }, + { IPv4(134,79,0,0),16 }, + { IPv4(134,82,0,0),16 }, + { IPv4(134,84,0,0),16 }, + { IPv4(134,114,0,0),16 }, + { IPv4(134,120,0,0),16 }, + { IPv4(134,124,0,0),16 }, + { IPv4(134,125,0,0),16 }, + { IPv4(134,127,0,0),16 }, + { IPv4(134,128,160,0),22 }, + { IPv4(134,131,0,0),16 }, + { IPv4(134,136,0,0),16 }, + { IPv4(134,137,0,0),16 }, + { IPv4(134,139,0,0),16 }, + { IPv4(134,141,0,0),16 }, + { IPv4(134,141,242,0),24 }, + { IPv4(134,160,0,0),16 }, + { IPv4(134,161,0,0),16 }, + { IPv4(134,164,0,0),16 }, + { IPv4(134,167,0,0),16 }, + { IPv4(134,172,0,0),16 }, + { IPv4(134,180,0,0),16 }, + { IPv4(134,193,0,0),16 }, + { IPv4(134,194,0,0),16 }, + { IPv4(134,201,0,0),16 }, + { IPv4(134,207,0,0),16 }, + { IPv4(134,208,0,0),16 }, + { IPv4(134,217,0,0),16 }, + { IPv4(134,224,0,0),16 }, + { IPv4(134,229,0,0),16 }, + { IPv4(134,231,0,0),16 }, + { IPv4(134,233,0,0),16 }, + { IPv4(134,235,0,0),16 }, + { IPv4(134,238,0,0),16 }, + { IPv4(134,239,0,0),16 }, + { IPv4(134,240,0,0),16 }, + { IPv4(134,241,0,0),17 }, + { IPv4(134,241,0,0),16 }, + { IPv4(134,241,128,0),17 }, + { IPv4(134,247,0,0),16 }, + { IPv4(134,250,0,0),16 }, + { IPv4(134,252,0,0),16 }, + { IPv4(134,253,0,0),16 }, + { IPv4(135,53,0,0),16 }, + { IPv4(135,76,9,0),24 }, + { IPv4(135,118,6,0),24 }, + { IPv4(135,118,7,0),24 }, + { IPv4(135,118,8,0),24 }, + { IPv4(135,118,9,0),24 }, + { IPv4(135,120,254,0),24 }, + { IPv4(135,138,233,0),24 }, + { IPv4(135,145,0,0),16 }, + { IPv4(135,155,0,0),16 }, + { IPv4(135,197,0,0),16 }, + { IPv4(135,206,0,0),16 }, + { IPv4(135,209,0,0),18 }, + { IPv4(135,209,64,0),19 }, + { IPv4(135,209,96,0),19 }, + { IPv4(135,209,128,0),17 }, + { IPv4(135,214,0,0),16 }, + { IPv4(135,216,0,0),16 }, + { IPv4(135,218,0,0),16 }, + { IPv4(135,250,0,0),16 }, + { IPv4(136,1,0,0),16 }, + { IPv4(136,2,0,0),16 }, + { IPv4(136,141,0,0),16 }, + { IPv4(136,142,0,0),16 }, + { IPv4(136,149,0,0),16 }, + { IPv4(136,150,0,0),16 }, + { IPv4(136,150,2,0),24 }, + { IPv4(136,150,4,0),24 }, + { IPv4(136,150,40,0),24 }, + { IPv4(136,150,45,0),24 }, + { IPv4(136,150,46,0),24 }, + { IPv4(136,150,60,0),24 }, + { IPv4(136,150,100,0),24 }, + { IPv4(136,150,102,0),24 }, + { IPv4(136,150,103,0),24 }, + { IPv4(136,152,0,0),16 }, + { IPv4(136,154,0,0),16 }, + { IPv4(136,166,0,0),16 }, + { IPv4(136,167,0,0),16 }, + { IPv4(136,168,0,0),16 }, + { IPv4(136,175,0,0),16 }, + { IPv4(136,176,0,0),16 }, + { IPv4(136,177,0,0),16 }, + { IPv4(136,184,0,0),16 }, + { IPv4(136,204,0,0),16 }, + { IPv4(136,204,192,0),19 }, + { IPv4(136,204,224,0),22 }, + { IPv4(136,204,228,0),23 }, + { IPv4(136,205,0,0),16 }, + { IPv4(136,207,0,0),16 }, + { IPv4(136,209,0,0),16 }, + { IPv4(136,212,0,0),14 }, + { IPv4(136,216,0,0),13 }, + { IPv4(136,223,0,0),20 }, + { IPv4(136,223,16,0),24 }, + { IPv4(136,223,17,0),24 }, + { IPv4(136,223,18,0),24 }, + { IPv4(136,223,19,0),24 }, + { IPv4(136,223,32,0),24 }, + { IPv4(136,223,96,0),24 }, + { IPv4(136,223,97,0),24 }, + { IPv4(136,224,0,0),18 }, + { IPv4(136,224,0,0),16 }, + { IPv4(136,224,64,0),19 }, + { IPv4(136,224,96,0),19 }, + { IPv4(136,224,124,0),23 }, + { IPv4(136,224,128,0),20 }, + { IPv4(136,224,144,0),20 }, + { IPv4(136,224,160,0),20 }, + { IPv4(136,224,176,0),20 }, + { IPv4(136,224,192,0),19 }, + { IPv4(136,224,224,0),21 }, + { IPv4(136,224,232,0),21 }, + { IPv4(136,224,240,0),21 }, + { IPv4(136,224,248,0),21 }, + { IPv4(136,226,0,0),16 }, + { IPv4(136,229,0,0),16 }, + { IPv4(136,234,0,0),16 }, + { IPv4(136,237,0,0),16 }, + { IPv4(136,244,0,0),16 }, + { IPv4(136,244,0,0),19 }, + { IPv4(136,244,32,0),19 }, + { IPv4(136,244,64,0),19 }, + { IPv4(136,244,96,0),19 }, + { IPv4(136,244,128,0),17 }, + { IPv4(136,248,0,0),16 }, + { IPv4(137,0,0,0),13 }, + { IPv4(137,8,0,0),14 }, + { IPv4(137,14,0,0),16 }, + { IPv4(137,16,0,0),16 }, + { IPv4(137,21,0,0),16 }, + { IPv4(137,22,0,0),16 }, + { IPv4(137,24,0,0),16 }, + { IPv4(137,32,0,0),16 }, + { IPv4(137,33,0,0),16 }, + { IPv4(137,37,0,0),16 }, + { IPv4(137,38,0,0),16 }, + { IPv4(137,49,0,0),16 }, + { IPv4(137,65,0,0),16 }, + { IPv4(137,66,0,0),16 }, + { IPv4(137,67,0,0),16 }, + { IPv4(137,68,0,0),16 }, + { IPv4(137,70,0,0),16 }, + { IPv4(137,75,0,0),16 }, + { IPv4(137,77,0,0),16 }, + { IPv4(137,78,0,0),16 }, + { IPv4(137,79,0,0),16 }, + { IPv4(137,80,0,0),16 }, + { IPv4(137,95,0,0),16 }, + { IPv4(137,97,0,0),16 }, + { IPv4(137,103,0,0),16 }, + { IPv4(137,110,0,0),16 }, + { IPv4(137,118,192,0),22 }, + { IPv4(137,124,0,0),16 }, + { IPv4(137,125,0,0),16 }, + { IPv4(137,128,0,0),16 }, + { IPv4(137,131,0,0),16 }, + { IPv4(137,132,0,0),16 }, + { IPv4(137,139,0,0),16 }, + { IPv4(137,140,0,0),16 }, + { IPv4(137,141,0,0),16 }, + { IPv4(137,142,0,0),16 }, + { IPv4(137,143,0,0),16 }, + { IPv4(137,143,128,0),17 }, + { IPv4(137,145,0,0),16 }, + { IPv4(137,150,0,0),16 }, + { IPv4(137,151,0,0),16 }, + { IPv4(137,158,0,0),16 }, + { IPv4(137,159,0,0),16 }, + { IPv4(137,164,1,0),24 }, + { IPv4(137,164,2,0),24 }, + { IPv4(137,164,3,0),24 }, + { IPv4(137,164,4,0),24 }, + { IPv4(137,164,5,0),24 }, + { IPv4(137,164,6,0),24 }, + { IPv4(137,164,7,0),24 }, + { IPv4(137,164,8,0),24 }, + { IPv4(137,164,9,0),24 }, + { IPv4(137,164,10,0),24 }, + { IPv4(137,164,11,0),24 }, + { IPv4(137,164,12,0),24 }, + { IPv4(137,164,13,0),24 }, + { IPv4(137,164,14,0),24 }, + { IPv4(137,169,0,0),16 }, + { IPv4(137,169,80,0),24 }, + { IPv4(137,169,81,0),24 }, + { IPv4(137,169,144,0),20 }, + { IPv4(137,170,0,0),16 }, + { IPv4(137,190,0,0),16 }, + { IPv4(137,192,0,0),16 }, + { IPv4(137,209,0,0),16 }, + { IPv4(137,214,0,0),15 }, + { IPv4(137,227,0,0),16 }, + { IPv4(137,228,0,0),16 }, + { IPv4(137,230,0,0),16 }, + { IPv4(137,240,0,0),14 }, + { IPv4(137,244,0,0),16 }, + { IPv4(137,246,0,0),16 }, + { IPv4(137,247,0,0),16 }, + { IPv4(137,252,0,0),16 }, + { IPv4(138,5,0,0),16 }, + { IPv4(138,12,0,0),16 }, + { IPv4(138,13,0,0),16 }, + { IPv4(138,18,0,0),16 }, + { IPv4(138,18,144,0),24 }, + { IPv4(138,23,0,0),16 }, + { IPv4(138,27,0,0),16 }, + { IPv4(138,29,0,0),16 }, + { IPv4(138,32,32,0),20 }, + { IPv4(138,32,48,0),20 }, + { IPv4(138,39,0,0),16 }, + { IPv4(138,46,0,0),16 }, + { IPv4(138,50,0,0),16 }, + { IPv4(138,60,0,0),16 }, + { IPv4(138,67,0,0),16 }, + { IPv4(138,72,0,0),16 }, + { IPv4(138,84,0,0),16 }, + { IPv4(138,86,0,0),16 }, + { IPv4(138,87,0,0),16 }, + { IPv4(138,92,0,0),16 }, + { IPv4(138,101,0,0),16 }, + { IPv4(138,105,0,0),16 }, + { IPv4(138,107,0,0),16 }, + { IPv4(138,115,0,0),16 }, + { IPv4(138,116,0,0),16 }, + { IPv4(138,125,0,0),16 }, + { IPv4(138,127,0,0),16 }, + { IPv4(138,129,0,0),16 }, + { IPv4(138,132,0,0),16 }, + { IPv4(138,136,0,0),13 }, + { IPv4(138,144,0,0),12 }, + { IPv4(138,164,0,0),16 }, + { IPv4(138,164,0,0),14 }, + { IPv4(138,168,0,0),14 }, + { IPv4(138,168,0,0),16 }, + { IPv4(138,178,0,0),15 }, + { IPv4(138,180,0,0),14 }, + { IPv4(138,181,0,0),16 }, + { IPv4(138,183,0,0),17 }, + { IPv4(138,184,0,0),16 }, + { IPv4(138,189,0,0),16 }, + { IPv4(138,198,0,0),16 }, + { IPv4(138,226,0,0),16 }, + { IPv4(138,229,0,0),16 }, + { IPv4(138,230,0,0),16 }, + { IPv4(138,234,0,0),16 }, + { IPv4(139,2,0,0),16 }, + { IPv4(139,27,0,0),16 }, + { IPv4(139,47,0,0),16 }, + { IPv4(139,48,0,0),16 }, + { IPv4(139,53,0,0),16 }, + { IPv4(139,56,64,0),19 }, + { IPv4(139,62,0,0),16 }, + { IPv4(139,65,0,0),16 }, + { IPv4(139,67,0,0),16 }, + { IPv4(139,72,0,0),16 }, + { IPv4(139,87,0,0),16 }, + { IPv4(139,88,0,0),16 }, + { IPv4(139,92,0,0),16 }, + { IPv4(139,93,0,0),16 }, + { IPv4(139,131,128,0),18 }, + { IPv4(139,131,192,0),19 }, + { IPv4(139,139,0,0),16 }, + { IPv4(139,141,0,0),16 }, + { IPv4(139,144,0,0),16 }, + { IPv4(139,152,0,0),16 }, + { IPv4(139,161,0,0),16 }, + { IPv4(139,169,0,0),16 }, + { IPv4(139,171,0,0),19 }, + { IPv4(139,171,0,0),16 }, + { IPv4(139,171,24,0),21 }, + { IPv4(139,175,0,0),16 }, + { IPv4(139,175,12,0),23 }, + { IPv4(139,175,56,0),24 }, + { IPv4(139,175,57,0),24 }, + { IPv4(139,175,58,0),24 }, + { IPv4(139,175,59,0),24 }, + { IPv4(139,175,169,0),24 }, + { IPv4(139,175,192,0),18 }, + { IPv4(139,175,252,0),24 }, + { IPv4(139,180,0,0),16 }, + { IPv4(139,182,0,0),16 }, + { IPv4(139,223,0,0),17 }, + { IPv4(139,223,0,0),16 }, + { IPv4(139,223,0,0),22 }, + { IPv4(139,223,2,0),24 }, + { IPv4(139,223,4,0),22 }, + { IPv4(139,223,8,0),21 }, + { IPv4(139,223,16,0),20 }, + { IPv4(139,223,32,0),19 }, + { IPv4(139,223,64,0),18 }, + { IPv4(139,223,128,0),18 }, + { IPv4(139,223,160,0),20 }, + { IPv4(139,223,187,0),24 }, + { IPv4(139,223,188,0),24 }, + { IPv4(139,223,189,0),24 }, + { IPv4(139,223,190,0),24 }, + { IPv4(139,223,191,0),24 }, + { IPv4(139,223,192,0),19 }, + { IPv4(139,223,192,0),24 }, + { IPv4(139,223,193,0),24 }, + { IPv4(139,223,195,0),24 }, + { IPv4(139,223,196,0),24 }, + { IPv4(139,223,197,0),24 }, + { IPv4(139,223,198,0),24 }, + { IPv4(139,223,199,0),24 }, + { IPv4(139,223,200,0),24 }, + { IPv4(139,223,220,0),22 }, + { IPv4(139,223,224,0),19 }, + { IPv4(139,223,232,0),24 }, + { IPv4(139,231,17,0),24 }, + { IPv4(139,232,0,0),16 }, + { IPv4(140,31,0,0),18 }, + { IPv4(140,31,192,0),21 }, + { IPv4(140,32,0,0),16 }, + { IPv4(140,35,0,0),16 }, + { IPv4(140,45,0,0),16 }, + { IPv4(140,47,0,0),16 }, + { IPv4(140,88,0,0),16 }, + { IPv4(140,89,0,0),16 }, + { IPv4(140,92,0,0),16 }, + { IPv4(140,95,0,0),16 }, + { IPv4(140,95,9,0),24 }, + { IPv4(140,95,205,0),24 }, + { IPv4(140,95,224,0),24 }, + { IPv4(140,96,0,0),16 }, + { IPv4(140,99,0,0),16 }, + { IPv4(140,99,96,0),19 }, + { IPv4(140,100,0,0),17 }, + { IPv4(140,100,0,0),16 }, + { IPv4(140,100,4,0),24 }, + { IPv4(140,100,128,0),18 }, + { IPv4(140,100,192,0),18 }, + { IPv4(140,107,0,0),16 }, + { IPv4(140,109,0,0),16 }, + { IPv4(140,110,0,0),15 }, + { IPv4(140,112,0,0),14 }, + { IPv4(140,112,0,0),16 }, + { IPv4(140,113,0,0),16 }, + { IPv4(140,114,0,0),15 }, + { IPv4(140,116,0,0),14 }, + { IPv4(140,120,0,0),14 }, + { IPv4(140,124,0,0),15 }, + { IPv4(140,126,0,0),16 }, + { IPv4(140,127,0,0),16 }, + { IPv4(140,128,0,0),13 }, + { IPv4(140,136,0,0),15 }, + { IPv4(140,138,0,0),16 }, + { IPv4(140,139,0,0),16 }, + { IPv4(140,139,28,64),27 }, + { IPv4(140,140,0,0),16 }, + { IPv4(140,144,0,0),16 }, + { IPv4(140,145,0,0),16 }, + { IPv4(140,148,0,0),16 }, + { IPv4(140,152,0,0),14 }, + { IPv4(140,153,5,0),25 }, + { IPv4(140,153,13,0),25 }, + { IPv4(140,153,18,0),25 }, + { IPv4(140,153,21,0),25 }, + { IPv4(140,153,99,0),25 }, + { IPv4(140,153,107,0),25 }, + { IPv4(140,153,189,0),27 }, + { IPv4(140,156,0,0),16 }, + { IPv4(140,157,38,0),23 }, + { IPv4(140,157,40,0),23 }, + { IPv4(140,157,42,0),23 }, + { IPv4(140,157,44,0),23 }, + { IPv4(140,157,48,0),23 }, + { IPv4(140,157,52,0),23 }, + { IPv4(140,163,0,0),16 }, + { IPv4(140,169,0,0),16 }, + { IPv4(140,172,0,0),16 }, + { IPv4(140,174,0,0),16 }, + { IPv4(140,174,85,0),24 }, + { IPv4(140,174,105,0),24 }, + { IPv4(140,174,208,0),24 }, + { IPv4(140,175,0,0),16 }, + { IPv4(140,176,0,0),16 }, + { IPv4(140,178,0,0),16 }, + { IPv4(140,180,0,0),16 }, + { IPv4(140,182,0,0),16 }, + { IPv4(140,183,0,0),16 }, + { IPv4(140,186,0,0),16 }, + { IPv4(140,186,46,0),24 }, + { IPv4(140,186,70,0),24 }, + { IPv4(140,186,96,0),24 }, + { IPv4(140,186,112,0),24 }, + { IPv4(140,186,129,0),24 }, + { IPv4(140,186,130,0),23 }, + { IPv4(140,186,132,0),23 }, + { IPv4(140,186,144,0),23 }, + { IPv4(140,186,160,0),22 }, + { IPv4(140,187,0,0),16 }, + { IPv4(140,192,0,0),16 }, + { IPv4(140,195,0,0),16 }, + { IPv4(140,196,0,0),16 }, + { IPv4(140,198,0,0),16 }, + { IPv4(140,201,0,0),16 }, + { IPv4(140,204,240,0),21 }, + { IPv4(140,209,0,0),16 }, + { IPv4(140,212,0,0),16 }, + { IPv4(140,212,200,0),22 }, + { IPv4(140,212,204,0),24 }, + { IPv4(140,212,205,0),24 }, + { IPv4(140,212,206,0),24 }, + { IPv4(140,214,0,0),15 }, + { IPv4(140,216,0,0),14 }, + { IPv4(140,221,0,0),16 }, + { IPv4(140,225,0,0),16 }, + { IPv4(140,226,0,0),16 }, + { IPv4(140,229,0,0),16 }, + { IPv4(140,233,0,0),16 }, + { IPv4(140,237,32,0),19 }, + { IPv4(140,239,0,0),16 }, + { IPv4(140,239,177,0),24 }, + { IPv4(140,239,214,0),24 }, + { IPv4(140,241,0,0),16 }, + { IPv4(140,251,0,0),16 }, + { IPv4(140,252,0,0),16 }, + { IPv4(141,92,0,0),16 }, + { IPv4(141,93,0,0),16 }, + { IPv4(141,102,0,0),16 }, + { IPv4(141,103,0,0),16 }, + { IPv4(141,111,0,0),16 }, + { IPv4(141,121,0,0),16 }, + { IPv4(141,122,0,0),16 }, + { IPv4(141,129,0,0),16 }, + { IPv4(141,140,0,0),16 }, + { IPv4(141,141,0,0),16 }, + { IPv4(141,142,0,0),16 }, + { IPv4(141,160,0,0),16 }, + { IPv4(141,164,0,0),16 }, + { IPv4(141,165,0,0),16 }, + { IPv4(141,173,0,0),16 }, + { IPv4(141,176,0,0),16 }, + { IPv4(141,178,0,0),16 }, + { IPv4(141,179,0,0),16 }, + { IPv4(141,183,0,0),16 }, + { IPv4(141,184,0,0),16 }, + { IPv4(141,187,0,0),16 }, + { IPv4(141,188,0,0),16 }, + { IPv4(141,189,0,0),16 }, + { IPv4(141,190,0,0),16 }, + { IPv4(141,197,4,0),23 }, + { IPv4(141,197,8,0),23 }, + { IPv4(141,198,0,0),16 }, + { IPv4(141,204,0,0),16 }, + { IPv4(141,205,0,0),16 }, + { IPv4(141,221,0,0),16 }, + { IPv4(141,222,0,0),16 }, + { IPv4(141,223,0,0),18 }, + { IPv4(141,223,0,0),16 }, + { IPv4(141,223,64,0),18 }, + { IPv4(141,223,128,0),18 }, + { IPv4(141,223,192,0),18 }, + { IPv4(141,224,0,0),16 }, + { IPv4(141,234,0,0),15 }, + { IPv4(141,236,0,0),16 }, + { IPv4(141,238,0,0),16 }, + { IPv4(141,238,64,0),20 }, + { IPv4(141,238,80,0),20 }, + { IPv4(141,238,96,0),19 }, + { IPv4(141,240,0,0),16 }, + { IPv4(141,242,0,0),16 }, + { IPv4(141,246,0,0),16 }, + { IPv4(141,248,0,0),16 }, + { IPv4(141,254,0,0),16 }, + { IPv4(142,21,0,0),16 }, + { IPv4(142,42,0,0),16 }, + { IPv4(142,42,242,0),24 }, + { IPv4(142,44,0,0),16 }, + { IPv4(142,51,0,0),16 }, + { IPv4(142,66,31,0),24 }, + { IPv4(142,78,0,0),16 }, + { IPv4(142,79,0,0),16 }, + { IPv4(142,89,0,0),16 }, + { IPv4(142,130,0,0),16 }, + { IPv4(142,144,0,0),16 }, + { IPv4(142,146,0,0),16 }, + { IPv4(142,146,41,0),24 }, + { IPv4(142,146,42,0),24 }, + { IPv4(142,146,246,0),24 }, + { IPv4(142,146,247,0),24 }, + { IPv4(142,146,248,0),24 }, + { IPv4(142,146,253,0),24 }, + { IPv4(142,147,0,0),16 }, + { IPv4(142,154,0,0),16 }, + { IPv4(142,154,224,0),19 }, + { IPv4(142,158,0,0),16 }, + { IPv4(142,192,200,0),24 }, + { IPv4(142,194,0,0),16 }, + { IPv4(142,194,32,0),19 }, + { IPv4(142,194,96,0),19 }, + { IPv4(142,194,128,0),19 }, + { IPv4(142,194,160,0),19 }, + { IPv4(142,194,192,0),19 }, + { IPv4(142,194,224,0),19 }, + { IPv4(142,201,0,0),16 }, + { IPv4(142,205,0,0),16 }, + { IPv4(142,205,54,0),23 }, + { IPv4(142,205,60,0),23 }, + { IPv4(142,205,232,0),23 }, + { IPv4(142,205,240,0),23 }, + { IPv4(142,205,248,0),23 }, + { IPv4(142,206,0,0),16 }, + { IPv4(142,238,0,0),16 }, + { IPv4(142,245,0,0),16 }, + { IPv4(142,245,0,0),19 }, + { IPv4(142,245,192,0),22 }, + { IPv4(143,43,0,0),16 }, + { IPv4(143,43,112,0),20 }, + { IPv4(143,43,192,0),18 }, + { IPv4(143,45,0,0),16 }, + { IPv4(143,46,0,0),16 }, + { IPv4(143,48,0,0),16 }, + { IPv4(143,56,0,0),16 }, + { IPv4(143,58,0,0),16 }, + { IPv4(143,58,0,0),19 }, + { IPv4(143,58,32,0),22 }, + { IPv4(143,58,36,0),22 }, + { IPv4(143,58,40,0),22 }, + { IPv4(143,58,100,0),22 }, + { IPv4(143,58,104,0),22 }, + { IPv4(143,58,164,0),22 }, + { IPv4(143,58,168,0),22 }, + { IPv4(143,58,172,0),22 }, + { IPv4(143,58,176,0),22 }, + { IPv4(143,58,180,0),22 }, + { IPv4(143,58,184,0),22 }, + { IPv4(143,58,245,0),24 }, + { IPv4(143,58,246,0),24 }, + { IPv4(143,61,0,0),16 }, + { IPv4(143,61,38,0),24 }, + { IPv4(143,61,153,0),24 }, + { IPv4(143,61,154,0),24 }, + { IPv4(143,61,156,0),24 }, + { IPv4(143,61,233,0),24 }, + { IPv4(143,62,0,0),16 }, + { IPv4(143,66,0,0),16 }, + { IPv4(143,67,0,0),16 }, + { IPv4(143,68,0,0),16 }, + { IPv4(143,77,0,0),16 }, + { IPv4(143,78,0,0),16 }, + { IPv4(143,81,0,0),16 }, + { IPv4(143,83,0,0),16 }, + { IPv4(143,85,0,0),16 }, + { IPv4(143,85,107,0),25 }, + { IPv4(143,96,0,0),16 }, + { IPv4(143,100,0,0),16 }, + { IPv4(143,104,0,0),16 }, + { IPv4(143,110,0,0),16 }, + { IPv4(143,111,0,0),16 }, + { IPv4(143,113,0,0),16 }, + { IPv4(143,115,0,0),16 }, + { IPv4(143,115,160,0),19 }, + { IPv4(143,116,0,0),16 }, + { IPv4(143,119,0,0),16 }, + { IPv4(143,127,0,0),19 }, + { IPv4(143,128,0,0),16 }, + { IPv4(143,134,0,0),16 }, + { IPv4(143,138,0,0),16 }, + { IPv4(143,138,0,0),15 }, + { IPv4(143,152,0,0),14 }, + { IPv4(143,158,0,0),16 }, + { IPv4(143,160,0,0),16 }, + { IPv4(143,164,96,0),22 }, + { IPv4(143,166,0,0),16 }, + { IPv4(143,176,0,0),14 }, + { IPv4(143,187,0,0),16 }, + { IPv4(143,191,0,0),16 }, + { IPv4(143,192,0,0),16 }, + { IPv4(143,195,0,0),16 }, + { IPv4(143,197,0,0),16 }, + { IPv4(143,211,0,0),16 }, + { IPv4(143,212,0,0),15 }, + { IPv4(143,214,0,0),16 }, + { IPv4(143,217,0,0),16 }, + { IPv4(143,223,20,0),24 }, + { IPv4(143,226,0,0),16 }, + { IPv4(143,227,0,0),16 }, + { IPv4(143,227,48,0),20 }, + { IPv4(143,229,0,0),16 }, + { IPv4(143,230,0,0),16 }, + { IPv4(143,232,0,0),16 }, + { IPv4(143,243,0,0),16 }, + { IPv4(143,244,0,0),16 }, + { IPv4(143,245,0,0),16 }, + { IPv4(143,247,0,0),16 }, + { IPv4(143,248,0,0),16 }, + { IPv4(143,249,0,0),16 }, + { IPv4(143,250,0,0),16 }, + { IPv4(144,3,0,0),16 }, + { IPv4(144,11,0,0),16 }, + { IPv4(144,15,0,0),16 }, + { IPv4(144,15,249,0),24 }, + { IPv4(144,15,252,0),24 }, + { IPv4(144,17,0,0),16 }, + { IPv4(144,18,0,0),16 }, + { IPv4(144,34,0,0),16 }, + { IPv4(144,35,0,0),16 }, + { IPv4(144,37,0,0),16 }, + { IPv4(144,38,0,0),16 }, + { IPv4(144,39,0,0),16 }, + { IPv4(144,45,0,0),16 }, + { IPv4(144,47,0,0),16 }, + { IPv4(144,49,0,0),16 }, + { IPv4(144,49,1,0),24 }, + { IPv4(144,49,2,0),24 }, + { IPv4(144,49,8,0),24 }, + { IPv4(144,58,0,0),16 }, + { IPv4(144,59,0,0),16 }, + { IPv4(144,73,0,0),16 }, + { IPv4(144,74,0,0),16 }, + { IPv4(144,80,14,0),23 }, + { IPv4(144,80,60,0),22 }, + { IPv4(144,80,92,0),22 }, + { IPv4(144,81,0,0),16 }, + { IPv4(144,86,0,0),16 }, + { IPv4(144,90,0,0),16 }, + { IPv4(144,95,0,0),16 }, + { IPv4(144,99,0,0),16 }, + { IPv4(144,100,0,0),14 }, + { IPv4(144,104,0,0),14 }, + { IPv4(144,109,0,0),16 }, + { IPv4(144,119,0,0),16 }, + { IPv4(144,141,0,0),16 }, + { IPv4(144,147,0,0),16 }, + { IPv4(144,169,0,0),16 }, + { IPv4(144,170,0,0),16 }, + { IPv4(144,182,0,0),15 }, + { IPv4(144,183,200,0),21 }, + { IPv4(144,183,208,0),21 }, + { IPv4(144,183,208,0),24 }, + { IPv4(144,184,0,0),16 }, + { IPv4(144,197,0,0),16 }, + { IPv4(144,198,0,0),16 }, + { IPv4(144,198,20,0),24 }, + { IPv4(144,198,24,0),22 }, + { IPv4(144,198,32,0),19 }, + { IPv4(144,198,70,0),24 }, + { IPv4(144,198,191,0),24 }, + { IPv4(144,198,192,0),24 }, + { IPv4(144,198,200,0),24 }, + { IPv4(144,198,207,0),24 }, + { IPv4(144,198,226,0),24 }, + { IPv4(144,199,0,0),16 }, + { IPv4(144,207,0,0),16 }, + { IPv4(144,213,0,0),16 }, + { IPv4(144,244,0,0),16 }, + { IPv4(144,245,0,0),16 }, + { IPv4(144,246,0,0),16 }, + { IPv4(144,247,0,0),16 }, + { IPv4(144,247,216,0),21 }, + { IPv4(144,247,224,0),21 }, + { IPv4(144,247,240,0),20 }, + { IPv4(144,251,0,0),16 }, + { IPv4(144,252,0,0),16 }, + { IPv4(144,254,0,0),16 }, + { IPv4(145,7,0,0),16 }, + { IPv4(145,8,0,0),16 }, + { IPv4(145,10,0,0),16 }, + { IPv4(145,53,0,0),16 }, + { IPv4(145,61,0,0),16 }, + { IPv4(145,63,0,0),16 }, + { IPv4(145,66,0,0),16 }, + { IPv4(145,69,0,0),16 }, + { IPv4(145,77,103,0),24 }, + { IPv4(145,224,0,0),16 }, + { IPv4(145,224,255,0),24 }, + { IPv4(145,225,203,0),24 }, + { IPv4(145,225,204,0),24 }, + { IPv4(145,229,0,0),16 }, + { IPv4(145,232,0,0),16 }, + { IPv4(146,1,8,0),21 }, + { IPv4(146,5,0,0),16 }, + { IPv4(146,6,0,0),16 }, + { IPv4(146,7,0,0),16 }, + { IPv4(146,18,0,0),16 }, + { IPv4(146,20,18,0),23 }, + { IPv4(146,20,23,0),24 }, + { IPv4(146,20,33,0),24 }, + { IPv4(146,20,34,0),24 }, + { IPv4(146,53,0,0),16 }, + { IPv4(146,57,0,0),16 }, + { IPv4(146,58,0,0),16 }, + { IPv4(146,64,0,0),16 }, + { IPv4(146,68,0,0),16 }, + { IPv4(146,79,0,0),16 }, + { IPv4(146,83,132,0),24 }, + { IPv4(146,83,135,0),24 }, + { IPv4(146,83,149,0),24 }, + { IPv4(146,83,164,0),24 }, + { IPv4(146,84,0,0),16 }, + { IPv4(146,86,0,0),16 }, + { IPv4(146,94,0,0),16 }, + { IPv4(146,95,0,0),16 }, + { IPv4(146,96,0,0),16 }, + { IPv4(146,99,0,0),16 }, + { IPv4(146,111,0,0),16 }, + { IPv4(146,115,0,0),16 }, + { IPv4(146,122,0,0),16 }, + { IPv4(146,126,0,0),16 }, + { IPv4(146,126,2,0),24 }, + { IPv4(146,126,51,0),24 }, + { IPv4(146,126,61,0),24 }, + { IPv4(146,126,73,0),24 }, + { IPv4(146,126,86,0),24 }, + { IPv4(146,126,88,0),24 }, + { IPv4(146,132,0,0),16 }, + { IPv4(146,137,0,0),16 }, + { IPv4(146,139,0,0),16 }, + { IPv4(146,141,0,0),16 }, + { IPv4(146,145,153,0),24 }, + { IPv4(146,150,0,0),16 }, + { IPv4(146,152,0,0),16 }, + { IPv4(146,153,0,0),16 }, + { IPv4(146,154,0,0),16 }, + { IPv4(146,155,0,0),16 }, + { IPv4(146,157,0,0),16 }, + { IPv4(146,163,0,0),16 }, + { IPv4(146,165,0,0),16 }, + { IPv4(146,167,0,0),16 }, + { IPv4(146,168,14,0),24 }, + { IPv4(146,174,0,0),16 }, + { IPv4(146,181,0,0),16 }, + { IPv4(146,182,0,0),16 }, + { IPv4(146,186,0,0),16 }, + { IPv4(146,196,0,0),16 }, + { IPv4(146,197,0,0),16 }, + { IPv4(146,202,0,0),16 }, + { IPv4(146,203,0,0),16 }, + { IPv4(146,206,0,0),16 }, + { IPv4(146,208,0,0),16 }, + { IPv4(146,209,160,0),19 }, + { IPv4(146,215,64,0),24 }, + { IPv4(146,215,65,0),24 }, + { IPv4(146,215,66,0),24 }, + { IPv4(146,217,0,0),16 }, + { IPv4(146,218,0,0),16 }, + { IPv4(146,220,224,0),20 }, + { IPv4(146,222,13,0),24 }, + { IPv4(146,222,14,0),24 }, + { IPv4(146,222,30,0),24 }, + { IPv4(146,222,31,0),24 }, + { IPv4(146,222,32,0),24 }, + { IPv4(146,222,33,0),24 }, + { IPv4(146,222,34,0),24 }, + { IPv4(146,222,45,0),24 }, + { IPv4(146,222,69,0),24 }, + { IPv4(146,222,156,0),23 }, + { IPv4(146,222,158,0),24 }, + { IPv4(146,222,187,0),24 }, + { IPv4(146,222,188,0),24 }, + { IPv4(146,222,194,0),24 }, + { IPv4(146,222,196,0),24 }, + { IPv4(146,222,197,0),24 }, + { IPv4(146,223,0,0),16 }, + { IPv4(146,230,0,0),16 }, + { IPv4(146,231,0,0),16 }, + { IPv4(146,232,0,0),16 }, + { IPv4(146,235,0,0),18 }, + { IPv4(146,235,64,0),18 }, + { IPv4(146,235,128,0),18 }, + { IPv4(146,244,0,0),16 }, + { IPv4(146,245,0,0),16 }, + { IPv4(146,246,0,0),16 }, + { IPv4(147,2,0,0),16 }, + { IPv4(147,4,0,0),16 }, + { IPv4(147,4,101,0),24 }, + { IPv4(147,6,0,0),16 }, + { IPv4(147,9,0,0),16 }, + { IPv4(147,16,0,0),16 }, + { IPv4(147,17,0,0),16 }, + { IPv4(147,21,0,0),16 }, + { IPv4(147,24,0,0),16 }, + { IPv4(147,25,0,0),16 }, + { IPv4(147,28,0,0),16 }, + { IPv4(147,35,0,0),16 }, + { IPv4(147,37,0,0),16 }, + { IPv4(147,39,0,0),16 }, + { IPv4(147,40,0,0),16 }, + { IPv4(147,43,0,0),16 }, + { IPv4(147,46,0,0),16 }, + { IPv4(147,51,0,0),16 }, + { IPv4(147,58,0,0),16 }, + { IPv4(147,71,0,0),16 }, + { IPv4(147,72,0,0),16 }, + { IPv4(147,72,64,0),18 }, + { IPv4(147,74,0,0),16 }, + { IPv4(147,78,0,0),16 }, + { IPv4(147,80,0,0),16 }, + { IPv4(147,92,0,0),20 }, + { IPv4(147,92,240,0),20 }, + { IPv4(147,103,0,0),16 }, + { IPv4(147,106,0,0),16 }, + { IPv4(147,118,0,0),16 }, + { IPv4(147,120,0,0),16 }, + { IPv4(147,128,68,0),22 }, + { IPv4(147,129,0,0),16 }, + { IPv4(147,130,0,0),15 }, + { IPv4(147,135,0,0),16 }, + { IPv4(147,137,0,0),16 }, + { IPv4(147,144,0,0),16 }, + { IPv4(147,147,0,0),16 }, + { IPv4(147,148,0,0),14 }, + { IPv4(147,153,0,0),16 }, + { IPv4(147,154,0,0),16 }, + { IPv4(147,155,0,0),16 }, + { IPv4(147,164,0,0),16 }, + { IPv4(147,166,0,0),16 }, + { IPv4(147,169,0,0),16 }, + { IPv4(147,178,0,0),16 }, + { IPv4(147,179,0,0),16 }, + { IPv4(147,182,0,0),16 }, + { IPv4(147,191,0,0),16 }, + { IPv4(147,198,0,0),16 }, + { IPv4(147,202,0,0),16 }, + { IPv4(147,202,60,0),24 }, + { IPv4(147,205,0,0),16 }, + { IPv4(147,208,0,0),19 }, + { IPv4(147,208,0,0),16 }, + { IPv4(147,208,128,0),18 }, + { IPv4(147,208,224,0),19 }, + { IPv4(147,216,0,0),15 }, + { IPv4(147,221,0,0),16 }, + { IPv4(147,222,0,0),16 }, + { IPv4(147,227,100,0),24 }, + { IPv4(147,235,0,0),16 }, + { IPv4(147,235,0,0),17 }, + { IPv4(147,235,128,0),19 }, + { IPv4(147,235,192,0),19 }, + { IPv4(147,235,224,0),22 }, + { IPv4(147,235,248,0),21 }, + { IPv4(147,237,232,0),24 }, + { IPv4(147,238,0,0),16 }, + { IPv4(147,239,0,0),16 }, + { IPv4(147,240,0,0),16 }, + { IPv4(147,241,0,0),16 }, + { IPv4(147,242,0,0),16 }, + { IPv4(147,248,0,0),16 }, + { IPv4(147,249,0,0),16 }, + { IPv4(148,5,0,0),16 }, + { IPv4(148,16,0,0),12 }, + { IPv4(148,55,0,0),16 }, + { IPv4(148,56,0,0),16 }, + { IPv4(148,59,0,0),16 }, + { IPv4(148,70,0,0),16 }, + { IPv4(148,71,0,0),16 }, + { IPv4(148,74,0,0),16 }, + { IPv4(148,75,0,0),16 }, + { IPv4(148,76,0,0),16 }, + { IPv4(148,77,0,0),16 }, + { IPv4(148,78,250,0),24 }, + { IPv4(148,78,251,0),24 }, + { IPv4(148,78,252,0),24 }, + { IPv4(148,78,253,0),24 }, + { IPv4(148,78,254,0),24 }, + { IPv4(148,84,0,0),16 }, + { IPv4(148,87,0,0),19 }, + { IPv4(148,89,252,0),24 }, + { IPv4(148,89,253,0),24 }, + { IPv4(148,89,254,0),24 }, + { IPv4(148,100,0,0),16 }, + { IPv4(148,107,0,0),16 }, + { IPv4(148,107,0,0),19 }, + { IPv4(148,107,1,0),24 }, + { IPv4(148,107,3,0),24 }, + { IPv4(148,107,4,0),24 }, + { IPv4(148,107,5,0),24 }, + { IPv4(148,107,6,0),24 }, + { IPv4(148,107,7,0),24 }, + { IPv4(148,107,8,0),24 }, + { IPv4(148,107,9,0),24 }, + { IPv4(148,107,10,0),24 }, + { IPv4(148,107,11,0),24 }, + { IPv4(148,107,12,0),24 }, + { IPv4(148,107,13,0),24 }, + { IPv4(148,107,14,0),24 }, + { IPv4(148,114,0,0),16 }, + { IPv4(148,115,0,0),16 }, + { IPv4(148,116,0,0),16 }, + { IPv4(148,126,0,0),16 }, + { IPv4(148,133,0,0),16 }, + { IPv4(148,141,0,0),16 }, + { IPv4(148,142,0,0),16 }, + { IPv4(148,146,0,0),16 }, + { IPv4(148,154,0,0),16 }, + { IPv4(148,163,0,0),16 }, + { IPv4(148,163,108,0),24 }, + { IPv4(148,165,0,0),16 }, + { IPv4(148,167,0,0),16 }, + { IPv4(148,168,0,0),16 }, + { IPv4(148,168,32,0),19 }, + { IPv4(148,168,96,0),19 }, + { IPv4(148,176,0,0),16 }, + { IPv4(148,176,248,0),24 }, + { IPv4(148,177,0,0),21 }, + { IPv4(148,177,0,0),16 }, + { IPv4(148,177,8,0),21 }, + { IPv4(148,177,128,0),21 }, + { IPv4(148,183,0,0),16 }, + { IPv4(148,199,0,0),16 }, + { IPv4(148,201,0,0),16 }, + { IPv4(148,203,196,0),24 }, + { IPv4(148,205,0,0),16 }, + { IPv4(148,208,128,0),17 }, + { IPv4(148,208,130,0),24 }, + { IPv4(148,208,131,0),24 }, + { IPv4(148,208,132,0),24 }, + { IPv4(148,208,134,0),24 }, + { IPv4(148,208,135,0),24 }, + { IPv4(148,208,137,0),24 }, + { IPv4(148,208,138,0),24 }, + { IPv4(148,208,140,0),24 }, + { IPv4(148,208,141,0),24 }, + { IPv4(148,208,143,0),24 }, + { IPv4(148,208,144,0),24 }, + { IPv4(148,208,145,0),24 }, + { IPv4(148,208,146,0),24 }, + { IPv4(148,208,147,0),24 }, + { IPv4(148,208,148,0),24 }, + { IPv4(148,208,149,0),24 }, + { IPv4(148,208,150,0),24 }, + { IPv4(148,208,151,0),24 }, + { IPv4(148,208,152,0),24 }, + { IPv4(148,208,153,0),24 }, + { IPv4(148,208,154,0),24 }, + { IPv4(148,208,155,0),24 }, + { IPv4(148,208,156,0),24 }, + { IPv4(148,208,157,0),24 }, + { IPv4(148,208,159,0),24 }, + { IPv4(148,208,161,0),24 }, + { IPv4(148,208,162,0),24 }, + { IPv4(148,208,163,0),24 }, + { IPv4(148,208,164,0),24 }, + { IPv4(148,208,165,0),24 }, + { IPv4(148,208,166,0),24 }, + { IPv4(148,208,167,0),24 }, + { IPv4(148,208,168,0),24 }, + { IPv4(148,208,169,0),24 }, + { IPv4(148,208,170,0),24 }, + { IPv4(148,208,171,0),24 }, + { IPv4(148,208,172,0),24 }, + { IPv4(148,208,174,0),24 }, + { IPv4(148,208,175,0),24 }, + { IPv4(148,208,176,0),24 }, + { IPv4(148,208,177,0),24 }, + { IPv4(148,208,178,0),24 }, + { IPv4(148,208,179,0),24 }, + { IPv4(148,208,180,0),24 }, + { IPv4(148,208,181,0),24 }, + { IPv4(148,208,182,0),24 }, + { IPv4(148,208,183,0),24 }, + { IPv4(148,208,184,0),24 }, + { IPv4(148,208,185,0),24 }, + { IPv4(148,208,186,0),24 }, + { IPv4(148,208,187,0),24 }, + { IPv4(148,208,188,0),24 }, + { IPv4(148,208,189,0),24 }, + { IPv4(148,208,190,0),24 }, + { IPv4(148,208,191,0),24 }, + { IPv4(148,208,192,0),24 }, + { IPv4(148,208,194,0),24 }, + { IPv4(148,208,195,0),24 }, + { IPv4(148,208,196,0),24 }, + { IPv4(148,208,198,0),24 }, + { IPv4(148,208,199,0),24 }, + { IPv4(148,208,200,0),24 }, + { IPv4(148,208,204,0),24 }, + { IPv4(148,208,205,0),24 }, + { IPv4(148,208,206,0),24 }, + { IPv4(148,208,207,0),24 }, + { IPv4(148,208,210,0),24 }, + { IPv4(148,208,214,0),24 }, + { IPv4(148,208,215,0),24 }, + { IPv4(148,208,216,0),24 }, + { IPv4(148,208,217,0),24 }, + { IPv4(148,208,218,0),24 }, + { IPv4(148,208,219,0),24 }, + { IPv4(148,208,220,0),24 }, + { IPv4(148,208,221,0),24 }, + { IPv4(148,208,222,0),24 }, + { IPv4(148,208,223,0),24 }, + { IPv4(148,208,224,0),24 }, + { IPv4(148,208,227,0),24 }, + { IPv4(148,208,228,0),24 }, + { IPv4(148,208,229,0),24 }, + { IPv4(148,208,230,0),24 }, + { IPv4(148,208,231,0),24 }, + { IPv4(148,208,232,0),24 }, + { IPv4(148,208,234,0),24 }, + { IPv4(148,208,236,0),24 }, + { IPv4(148,208,237,0),24 }, + { IPv4(148,208,238,0),24 }, + { IPv4(148,208,239,0),24 }, + { IPv4(148,208,240,0),24 }, + { IPv4(148,208,241,0),24 }, + { IPv4(148,208,242,0),24 }, + { IPv4(148,208,243,0),24 }, + { IPv4(148,208,246,0),24 }, + { IPv4(148,208,247,0),24 }, + { IPv4(148,208,248,0),24 }, + { IPv4(148,208,250,0),24 }, + { IPv4(148,208,251,0),24 }, + { IPv4(148,208,252,0),24 }, + { IPv4(148,208,254,0),24 }, + { IPv4(148,209,0,0),16 }, + { IPv4(148,210,0,0),16 }, + { IPv4(148,211,0,0),16 }, + { IPv4(148,214,0,0),16 }, + { IPv4(148,215,0,0),16 }, + { IPv4(148,216,0,0),16 }, + { IPv4(148,218,0,0),16 }, + { IPv4(148,219,0,0),16 }, + { IPv4(148,220,0,0),16 }, + { IPv4(148,221,0,0),19 }, + { IPv4(148,221,0,0),16 }, + { IPv4(148,221,32,0),19 }, + { IPv4(148,221,64,0),19 }, + { IPv4(148,221,96,0),19 }, + { IPv4(148,221,128,0),18 }, + { IPv4(148,221,192,0),18 }, + { IPv4(148,222,0,0),16 }, + { IPv4(148,223,0,0),18 }, + { IPv4(148,223,0,0),16 }, + { IPv4(148,223,64,0),19 }, + { IPv4(148,223,96,0),20 }, + { IPv4(148,223,112,0),20 }, + { IPv4(148,223,128,0),18 }, + { IPv4(148,223,152,0),24 }, + { IPv4(148,223,154,0),24 }, + { IPv4(148,223,192,0),19 }, + { IPv4(148,223,224,0),19 }, + { IPv4(148,224,6,0),24 }, + { IPv4(148,227,0,0),16 }, + { IPv4(148,230,0,0),16 }, + { IPv4(148,233,0,0),16 }, + { IPv4(148,233,0,0),19 }, + { IPv4(148,233,32,0),19 }, + { IPv4(148,233,64,0),19 }, + { IPv4(148,233,71,0),24 }, + { IPv4(148,233,77,0),24 }, + { IPv4(148,233,96,0),19 }, + { IPv4(148,233,128,0),18 }, + { IPv4(148,233,148,0),24 }, + { IPv4(148,233,152,0),24 }, + { IPv4(148,233,192,0),18 }, + { IPv4(148,233,241,0),24 }, + { IPv4(148,234,0,0),16 }, + { IPv4(148,235,0,0),16 }, + { IPv4(148,235,0,0),19 }, + { IPv4(148,235,32,0),19 }, + { IPv4(148,235,64,0),19 }, + { IPv4(148,235,96,0),19 }, + { IPv4(148,235,128,0),18 }, + { IPv4(148,235,192,0),18 }, + { IPv4(148,236,0,0),16 }, + { IPv4(148,237,0,0),16 }, + { IPv4(148,238,0,0),16 }, + { IPv4(148,239,0,0),16 }, + { IPv4(148,241,0,0),19 }, + { IPv4(148,241,32,0),19 }, + { IPv4(148,241,64,0),19 }, + { IPv4(148,242,0,0),16 }, + { IPv4(148,243,64,0),21 }, + { IPv4(148,244,0,0),17 }, + { IPv4(148,244,0,0),16 }, + { IPv4(148,244,0,0),18 }, + { IPv4(148,244,128,0),17 }, + { IPv4(148,245,228,0),24 }, + { IPv4(148,246,0,0),16 }, + { IPv4(148,248,0,0),16 }, + { IPv4(148,248,250,0),24 }, + { IPv4(148,249,0,0),16 }, + { IPv4(149,1,0,0),16 }, + { IPv4(149,2,22,0),24 }, + { IPv4(149,2,24,0),23 }, + { IPv4(149,2,28,0),24 }, + { IPv4(149,2,32,0),21 }, + { IPv4(149,2,78,0),24 }, + { IPv4(149,2,80,0),24 }, + { IPv4(149,2,121,0),24 }, + { IPv4(149,2,122,0),24 }, + { IPv4(149,2,123,0),24 }, + { IPv4(149,2,132,0),24 }, + { IPv4(149,2,143,0),24 }, + { IPv4(149,4,0,0),16 }, + { IPv4(149,15,0,0),16 }, + { IPv4(149,28,0,0),16 }, + { IPv4(149,28,0,0),20 }, + { IPv4(149,31,0,0),16 }, + { IPv4(149,43,0,0),16 }, + { IPv4(149,46,0,0),24 }, + { IPv4(149,48,0,0),16 }, + { IPv4(149,54,0,0),16 }, + { IPv4(149,58,0,0),16 }, + { IPv4(149,61,0,0),16 }, + { IPv4(149,63,0,0),16 }, + { IPv4(149,64,0,0),16 }, + { IPv4(149,65,0,0),16 }, + { IPv4(149,68,0,0),16 }, + { IPv4(149,70,0,0),16 }, + { IPv4(149,81,0,0),16 }, + { IPv4(149,83,208,0),24 }, + { IPv4(149,84,0,0),16 }, + { IPv4(149,89,0,0),16 }, + { IPv4(149,105,0,0),16 }, + { IPv4(149,114,0,0),16 }, + { IPv4(149,119,0,0),16 }, + { IPv4(149,123,0,0),16 }, + { IPv4(149,123,254,0),24 }, + { IPv4(149,125,0,0),16 }, + { IPv4(149,134,0,0),16 }, + { IPv4(149,137,0,0),16 }, + { IPv4(149,142,0,0),16 }, + { IPv4(149,145,0,0),16 }, + { IPv4(149,158,0,0),16 }, + { IPv4(149,159,0,0),16 }, + { IPv4(149,169,0,0),16 }, + { IPv4(149,172,0,0),17 }, + { IPv4(149,172,0,0),16 }, + { IPv4(149,172,128,0),17 }, + { IPv4(149,172,150,0),24 }, + { IPv4(149,173,0,0),16 }, + { IPv4(149,174,0,0),16 }, + { IPv4(149,199,0,0),16 }, + { IPv4(149,206,0,0),16 }, + { IPv4(149,211,0,0),16 }, + { IPv4(149,214,0,0),16 }, + { IPv4(149,221,0,0),16 }, + { IPv4(149,236,0,0),16 }, + { IPv4(149,239,0,0),16 }, + { IPv4(149,242,212,0),24 }, + { IPv4(149,244,0,0),16 }, + { IPv4(150,18,0,0),16 }, + { IPv4(150,19,0,0),16 }, + { IPv4(150,26,0,0),16 }, + { IPv4(150,29,0,0),16 }, + { IPv4(150,32,0,0),16 }, + { IPv4(150,48,240,0),23 }, + { IPv4(150,52,0,0),16 }, + { IPv4(150,61,0,0),16 }, + { IPv4(150,63,0,0),16 }, + { IPv4(150,65,0,0),16 }, + { IPv4(150,70,32,0),22 }, + { IPv4(150,82,0,0),16 }, + { IPv4(150,91,0,0),16 }, + { IPv4(150,105,0,0),16 }, + { IPv4(150,105,16,0),20 }, + { IPv4(150,105,32,0),20 }, + { IPv4(150,112,0,0),16 }, + { IPv4(150,113,0,0),16 }, + { IPv4(150,114,0,0),16 }, + { IPv4(150,131,0,0),16 }, + { IPv4(150,133,0,0),16 }, + { IPv4(150,135,0,0),16 }, + { IPv4(150,137,0,0),16 }, + { IPv4(150,142,0,0),16 }, + { IPv4(150,143,0,0),16 }, + { IPv4(150,144,0,0),16 }, + { IPv4(150,149,0,0),16 }, + { IPv4(150,150,0,0),16 }, + { IPv4(150,152,0,0),16 }, + { IPv4(150,155,0,0),16 }, + { IPv4(150,156,0,0),16 }, + { IPv4(150,167,0,0),16 }, + { IPv4(150,177,0,0),16 }, + { IPv4(150,180,0,0),16 }, + { IPv4(150,183,0,0),16 }, + { IPv4(150,183,10,0),24 }, + { IPv4(150,183,92,0),24 }, + { IPv4(150,184,0,0),16 }, + { IPv4(150,185,128,0),18 }, + { IPv4(150,190,0,0),16 }, + { IPv4(150,192,0,0),15 }, + { IPv4(150,195,0,0),16 }, + { IPv4(150,197,0,0),16 }, + { IPv4(150,199,0,0),16 }, + { IPv4(150,200,0,0),16 }, + { IPv4(150,201,0,0),16 }, + { IPv4(150,202,0,0),16 }, + { IPv4(150,202,8,0),24 }, + { IPv4(150,209,0,0),16 }, + { IPv4(150,210,0,0),16 }, + { IPv4(150,220,10,0),24 }, + { IPv4(150,225,0,0),16 }, + { IPv4(150,226,0,0),16 }, + { IPv4(150,228,0,0),16 }, + { IPv4(150,231,0,0),16 }, + { IPv4(150,232,0,0),16 }, + { IPv4(150,243,0,0),16 }, + { IPv4(150,250,0,0),16 }, + { IPv4(150,253,0,0),16 }, + { IPv4(151,87,0,0),16 }, + { IPv4(151,96,0,0),16 }, + { IPv4(151,110,206,0),24 }, + { IPv4(151,111,0,0),16 }, + { IPv4(151,113,0,0),16 }, + { IPv4(151,118,0,0),16 }, + { IPv4(151,120,0,0),16 }, + { IPv4(151,124,0,0),16 }, + { IPv4(151,125,0,0),16 }, + { IPv4(151,126,0,0),16 }, + { IPv4(151,140,0,0),16 }, + { IPv4(151,142,218,0),24 }, + { IPv4(151,148,0,0),16 }, + { IPv4(151,153,0,0),16 }, + { IPv4(151,155,0,0),16 }, + { IPv4(151,163,0,0),16 }, + { IPv4(151,163,2,0),24 }, + { IPv4(151,163,56,0),24 }, + { IPv4(151,163,57,0),24 }, + { IPv4(151,164,88,0),24 }, + { IPv4(151,164,169,0),24 }, + { IPv4(151,164,170,0),23 }, + { IPv4(151,164,172,0),23 }, + { IPv4(151,164,174,0),24 }, + { IPv4(151,164,230,0),24 }, + { IPv4(151,164,231,0),24 }, + { IPv4(151,166,0,0),16 }, + { IPv4(151,186,0,0),16 }, + { IPv4(151,190,0,0),16 }, + { IPv4(151,193,0,0),16 }, + { IPv4(151,195,0,0),16 }, + { IPv4(151,210,0,0),16 }, + { IPv4(151,212,0,0),16 }, + { IPv4(152,61,0,0),16 }, + { IPv4(152,61,1,0),24 }, + { IPv4(152,67,0,0),16 }, + { IPv4(152,67,13,0),24 }, + { IPv4(152,67,109,0),24 }, + { IPv4(152,67,220,0),22 }, + { IPv4(152,67,224,0),23 }, + { IPv4(152,67,226,0),24 }, + { IPv4(152,76,0,0),16 }, + { IPv4(152,79,0,0),16 }, + { IPv4(152,80,0,0),16 }, + { IPv4(152,85,0,0),16 }, + { IPv4(152,85,2,0),24 }, + { IPv4(152,85,3,0),24 }, + { IPv4(152,86,0,0),16 }, + { IPv4(152,87,0,0),16 }, + { IPv4(152,99,0,0),16 }, + { IPv4(152,99,0,0),17 }, + { IPv4(152,99,128,0),18 }, + { IPv4(152,99,192,0),18 }, + { IPv4(152,104,224,0),19 }, + { IPv4(152,107,0,0),16 }, + { IPv4(152,110,0,0),16 }, + { IPv4(152,111,0,0),16 }, + { IPv4(152,112,0,0),16 }, + { IPv4(152,114,0,0),16 }, + { IPv4(152,131,100,0),22 }, + { IPv4(152,131,104,0),24 }, + { IPv4(152,131,110,0),23 }, + { IPv4(152,131,112,0),23 }, + { IPv4(152,131,114,0),24 }, + { IPv4(152,137,0,0),16 }, + { IPv4(152,149,0,0),16 }, + { IPv4(152,158,0,0),16 }, + { IPv4(152,158,160,0),19 }, + { IPv4(152,158,192,0),18 }, + { IPv4(152,160,0,0),16 }, + { IPv4(152,163,0,0),20 }, + { IPv4(152,163,0,0),16 }, + { IPv4(152,229,0,0),16 }, + { IPv4(153,2,0,0),16 }, + { IPv4(153,2,228,0),24 }, + { IPv4(153,2,229,0),24 }, + { IPv4(153,2,230,0),24 }, + { IPv4(153,2,231,0),24 }, + { IPv4(153,2,234,0),24 }, + { IPv4(153,2,244,0),24 }, + { IPv4(153,2,247,0),24 }, + { IPv4(153,4,0,0),16 }, + { IPv4(153,9,0,0),16 }, + { IPv4(153,10,0,0),16 }, + { IPv4(153,11,0,0),16 }, + { IPv4(153,18,0,0),16 }, + { IPv4(153,20,0,0),16 }, + { IPv4(153,24,0,0),14 }, + { IPv4(153,33,0,0),16 }, + { IPv4(153,45,0,0),16 }, + { IPv4(153,46,0,0),16 }, + { IPv4(153,69,0,0),24 }, + { IPv4(153,69,128,0),24 }, + { IPv4(153,91,0,0),16 }, + { IPv4(153,102,0,0),16 }, + { IPv4(153,103,0,0),16 }, + { IPv4(153,105,0,0),16 }, + { IPv4(154,2,0,0),16 }, + { IPv4(155,5,0,0),16 }, + { IPv4(155,6,0,0),16 }, + { IPv4(155,8,0,0),15 }, + { IPv4(155,14,0,0),16 }, + { IPv4(155,36,0,0),16 }, + { IPv4(155,41,0,0),17 }, + { IPv4(155,48,0,0),16 }, + { IPv4(155,53,0,0),16 }, + { IPv4(155,59,2,0),24 }, + { IPv4(155,60,0,0),16 }, + { IPv4(155,68,0,0),16 }, + { IPv4(155,72,0,0),16 }, + { IPv4(155,72,145,0),24 }, + { IPv4(155,72,147,0),24 }, + { IPv4(155,72,148,0),22 }, + { IPv4(155,91,0,0),16 }, + { IPv4(155,91,2,0),24 }, + { IPv4(155,91,4,0),24 }, + { IPv4(155,91,6,0),24 }, + { IPv4(155,91,8,0),24 }, + { IPv4(155,91,16,0),24 }, + { IPv4(155,91,17,0),24 }, + { IPv4(155,94,0,0),16 }, + { IPv4(155,94,104,0),21 }, + { IPv4(155,95,0,0),16 }, + { IPv4(155,99,0,0),16 }, + { IPv4(155,100,0,0),16 }, + { IPv4(155,101,0,0),16 }, + { IPv4(155,106,0,0),16 }, + { IPv4(155,131,0,0),16 }, + { IPv4(155,131,0,0),19 }, + { IPv4(155,131,96,0),19 }, + { IPv4(155,135,0,0),16 }, + { IPv4(155,136,0,0),16 }, + { IPv4(155,141,0,0),16 }, + { IPv4(155,147,0,0),16 }, + { IPv4(155,147,25,0),24 }, + { IPv4(155,148,0,0),16 }, + { IPv4(155,149,0,0),16 }, + { IPv4(155,150,0,0),15 }, + { IPv4(155,152,0,0),14 }, + { IPv4(155,152,120,0),21 }, + { IPv4(155,159,0,0),16 }, + { IPv4(155,161,0,0),16 }, + { IPv4(155,162,0,0),15 }, + { IPv4(155,164,0,0),14 }, + { IPv4(155,168,0,0),15 }, + { IPv4(155,170,0,0),16 }, + { IPv4(155,170,0,0),17 }, + { IPv4(155,173,0,0),16 }, + { IPv4(155,174,0,0),16 }, + { IPv4(155,176,0,0),16 }, + { IPv4(155,177,174,0),24 }, + { IPv4(155,182,104,0),24 }, + { IPv4(155,192,0,0),17 }, + { IPv4(155,192,0,0),16 }, + { IPv4(155,192,160,0),19 }, + { IPv4(155,201,0,0),16 }, + { IPv4(155,201,35,0),24 }, + { IPv4(155,201,36,0),24 }, + { IPv4(155,201,63,0),24 }, + { IPv4(155,201,64,0),18 }, + { IPv4(155,201,128,0),18 }, + { IPv4(155,201,139,0),24 }, + { IPv4(155,201,240,0),24 }, + { IPv4(155,201,241,0),24 }, + { IPv4(155,201,242,0),24 }, + { IPv4(155,201,243,0),24 }, + { IPv4(155,201,244,0),24 }, + { IPv4(155,201,245,0),24 }, + { IPv4(155,201,246,0),24 }, + { IPv4(155,202,0,0),16 }, + { IPv4(155,202,254,0),24 }, + { IPv4(155,208,0,0),16 }, + { IPv4(155,211,0,0),16 }, + { IPv4(155,211,112,0),24 }, + { IPv4(155,211,128,0),24 }, + { IPv4(155,211,251,0),24 }, + { IPv4(155,212,0,0),16 }, + { IPv4(155,213,0,0),16 }, + { IPv4(155,214,0,0),15 }, + { IPv4(155,216,0,0),14 }, + { IPv4(155,218,0,0),16 }, + { IPv4(155,223,0,0),16 }, + { IPv4(155,225,0,0),16 }, + { IPv4(155,226,0,0),16 }, + { IPv4(155,230,0,0),16 }, + { IPv4(155,232,0,0),16 }, + { IPv4(155,234,0,0),16 }, + { IPv4(155,235,0,0),16 }, + { IPv4(155,236,150,0),23 }, + { IPv4(155,236,152,0),24 }, + { IPv4(155,237,0,0),16 }, + { IPv4(155,238,0,0),16 }, + { IPv4(155,239,0,0),16 }, + { IPv4(155,240,0,0),16 }, + { IPv4(155,244,0,0),16 }, + { IPv4(155,250,0,0),16 }, + { IPv4(155,252,0,0),24 }, + { IPv4(155,252,0,0),16 }, + { IPv4(155,252,1,0),24 }, + { IPv4(155,252,2,0),24 }, + { IPv4(155,252,4,0),22 }, + { IPv4(155,252,16,0),20 }, + { IPv4(155,252,64,0),21 }, + { IPv4(155,252,72,0),21 }, + { IPv4(155,252,80,0),21 }, + { IPv4(155,252,88,0),21 }, + { IPv4(155,252,96,0),21 }, + { IPv4(155,252,104,0),22 }, + { IPv4(155,252,112,0),22 }, + { IPv4(155,252,116,0),22 }, + { IPv4(155,252,128,0),21 }, + { IPv4(155,252,140,0),22 }, + { IPv4(155,252,144,0),21 }, + { IPv4(155,252,152,0),22 }, + { IPv4(155,252,158,0),24 }, + { IPv4(155,252,160,0),22 }, + { IPv4(155,252,164,0),23 }, + { IPv4(155,252,192,0),21 }, + { IPv4(155,252,204,0),22 }, + { IPv4(155,252,224,0),20 }, + { IPv4(155,252,240,0),21 }, + { IPv4(155,252,248,0),22 }, + { IPv4(155,252,252,0),24 }, + { IPv4(156,6,0,0),16 }, + { IPv4(156,7,0,0),16 }, + { IPv4(156,8,0,0),16 }, + { IPv4(156,19,0,0),16 }, + { IPv4(156,29,0,0),16 }, + { IPv4(156,42,0,0),16 }, + { IPv4(156,46,25,0),24 }, + { IPv4(156,46,140,0),22 }, + { IPv4(156,55,0,0),16 }, + { IPv4(156,55,126,0),23 }, + { IPv4(156,55,128,0),22 }, + { IPv4(156,55,132,0),22 }, + { IPv4(156,56,0,0),16 }, + { IPv4(156,59,0,0),16 }, + { IPv4(156,61,0,0),16 }, + { IPv4(156,62,0,0),16 }, + { IPv4(156,63,0,0),16 }, + { IPv4(156,68,0,0),16 }, + { IPv4(156,77,0,0),16 }, + { IPv4(156,77,64,0),19 }, + { IPv4(156,79,0,0),16 }, + { IPv4(156,98,0,0),15 }, + { IPv4(156,107,168,0),23 }, + { IPv4(156,111,0,0),16 }, + { IPv4(156,114,200,0),24 }, + { IPv4(156,140,0,0),16 }, + { IPv4(156,145,0,0),16 }, + { IPv4(156,147,0,0),16 }, + { IPv4(156,152,0,0),15 }, + { IPv4(156,152,224,0),24 }, + { IPv4(156,153,37,0),24 }, + { IPv4(157,2,0,0),16 }, + { IPv4(157,22,0,0),16 }, + { IPv4(157,22,112,0),20 }, + { IPv4(157,22,208,0),23 }, + { IPv4(157,22,237,0),24 }, + { IPv4(157,28,0,0),15 }, + { IPv4(157,64,0,0),16 }, + { IPv4(157,66,0,0),16 }, + { IPv4(157,71,0,0),16 }, + { IPv4(157,72,0,0),16 }, + { IPv4(157,73,0,0),16 }, + { IPv4(157,74,0,0),16 }, + { IPv4(157,75,0,0),16 }, + { IPv4(157,77,0,0),16 }, + { IPv4(157,79,0,0),16 }, + { IPv4(157,84,0,0),16 }, + { IPv4(157,89,0,0),16 }, + { IPv4(157,92,0,0),16 }, + { IPv4(157,100,1,0),24 }, + { IPv4(157,100,2,0),24 }, + { IPv4(157,100,8,0),24 }, + { IPv4(157,100,16,0),24 }, + { IPv4(157,100,21,0),24 }, + { IPv4(157,100,24,0),24 }, + { IPv4(157,100,25,0),24 }, + { IPv4(157,100,27,0),24 }, + { IPv4(157,100,28,0),24 }, + { IPv4(157,100,29,0),24 }, + { IPv4(157,100,33,0),24 }, + { IPv4(157,100,37,0),24 }, + { IPv4(157,100,45,0),24 }, + { IPv4(157,100,46,0),24 }, + { IPv4(157,100,50,0),24 }, + { IPv4(157,100,58,0),24 }, + { IPv4(157,100,59,0),24 }, + { IPv4(157,100,61,0),24 }, + { IPv4(157,100,71,0),24 }, + { IPv4(157,100,72,0),24 }, + { IPv4(157,100,97,0),24 }, + { IPv4(157,100,98,0),24 }, + { IPv4(157,100,100,0),24 }, + { IPv4(157,100,103,0),24 }, + { IPv4(157,100,104,0),24 }, + { IPv4(157,100,111,0),24 }, + { IPv4(157,100,112,0),24 }, + { IPv4(157,100,113,0),24 }, + { IPv4(157,100,125,0),24 }, + { IPv4(157,100,136,0),24 }, + { IPv4(157,100,141,0),24 }, + { IPv4(157,100,144,0),24 }, + { IPv4(157,100,147,0),24 }, + { IPv4(157,100,158,0),24 }, + { IPv4(157,100,165,0),24 }, + { IPv4(157,100,183,0),24 }, + { IPv4(157,100,217,0),24 }, + { IPv4(157,100,251,0),24 }, + { IPv4(157,109,0,0),16 }, + { IPv4(157,111,0,0),16 }, + { IPv4(157,120,0,0),16 }, + { IPv4(157,125,0,0),16 }, + { IPv4(157,127,0,0),16 }, + { IPv4(157,128,0,0),16 }, + { IPv4(157,132,0,0),16 }, + { IPv4(157,139,0,0),16 }, + { IPv4(157,141,0,0),16 }, + { IPv4(157,151,0,0),16 }, + { IPv4(157,154,0,0),16 }, + { IPv4(157,162,0,0),16 }, + { IPv4(157,165,0,0),16 }, + { IPv4(157,176,0,0),16 }, + { IPv4(157,178,0,0),16 }, + { IPv4(157,179,0,0),20 }, + { IPv4(157,179,16,0),24 }, + { IPv4(157,187,0,0),16 }, + { IPv4(157,187,16,0),20 }, + { IPv4(157,187,32,0),20 }, + { IPv4(157,187,48,0),20 }, + { IPv4(157,198,0,0),16 }, + { IPv4(157,199,0,0),16 }, + { IPv4(157,201,0,0),16 }, + { IPv4(157,205,0,0),16 }, + { IPv4(157,205,128,0),17 }, + { IPv4(157,209,0,0),16 }, + { IPv4(157,226,0,0),16 }, + { IPv4(157,229,0,0),16 }, + { IPv4(157,231,16,0),24 }, + { IPv4(157,238,0,0),16 }, + { IPv4(157,252,0,0),16 }, + { IPv4(158,0,0,0),13 }, + { IPv4(158,2,0,0),16 }, + { IPv4(158,8,0,0),14 }, + { IPv4(158,12,0,0),16 }, + { IPv4(158,14,0,0),15 }, + { IPv4(158,16,0,0),14 }, + { IPv4(158,20,0,0),16 }, + { IPv4(158,44,20,0),22 }, + { IPv4(158,44,24,0),23 }, + { IPv4(158,44,26,0),24 }, + { IPv4(158,52,0,0),16 }, + { IPv4(158,54,0,0),16 }, + { IPv4(158,57,0,0),16 }, + { IPv4(158,73,0,0),16 }, + { IPv4(158,81,0,0),17 }, + { IPv4(158,81,128,0),17 }, + { IPv4(158,83,0,0),16 }, + { IPv4(158,91,0,0),16 }, + { IPv4(158,93,0,0),16 }, + { IPv4(158,97,0,0),16 }, + { IPv4(158,100,0,0),16 }, + { IPv4(158,102,0,0),16 }, + { IPv4(158,107,0,0),16 }, + { IPv4(158,107,48,0),22 }, + { IPv4(158,113,0,0),16 }, + { IPv4(158,114,0,0),16 }, + { IPv4(158,116,131,0),24 }, + { IPv4(158,118,10,0),24 }, + { IPv4(158,118,11,0),24 }, + { IPv4(158,122,0,0),16 }, + { IPv4(158,130,0,0),16 }, + { IPv4(158,132,0,0),16 }, + { IPv4(158,135,0,0),16 }, + { IPv4(158,142,0,0),16 }, + { IPv4(158,151,0,0),16 }, + { IPv4(158,152,0,0),16 }, + { IPv4(158,153,0,0),16 }, + { IPv4(158,154,0,0),16 }, + { IPv4(158,157,0,0),16 }, + { IPv4(158,158,0,0),16 }, + { IPv4(158,161,0,0),16 }, + { IPv4(158,171,192,0),24 }, + { IPv4(158,171,193,0),24 }, + { IPv4(158,171,194,0),24 }, + { IPv4(158,171,195,0),24 }, + { IPv4(158,171,210,0),24 }, + { IPv4(158,171,211,0),24 }, + { IPv4(158,201,0,0),16 }, + { IPv4(158,203,0,0),16 }, + { IPv4(158,210,0,0),16 }, + { IPv4(158,222,224,0),20 }, + { IPv4(158,236,0,0),14 }, + { IPv4(158,239,0,0),16 }, + { IPv4(158,240,0,0),14 }, + { IPv4(158,240,0,0),16 }, + { IPv4(158,244,0,0),16 }, + { IPv4(158,245,0,0),16 }, + { IPv4(158,247,0,0),16 }, + { IPv4(158,251,0,0),16 }, + { IPv4(159,7,135,0),24 }, + { IPv4(159,12,0,0),16 }, + { IPv4(159,16,0,0),16 }, + { IPv4(159,21,0,0),16 }, + { IPv4(159,33,0,0),16 }, + { IPv4(159,49,0,0),16 }, + { IPv4(159,53,0,0),16 }, + { IPv4(159,62,0,0),16 }, + { IPv4(159,71,0,0),16 }, + { IPv4(159,75,0,0),16 }, + { IPv4(159,77,0,0),16 }, + { IPv4(159,82,0,0),16 }, + { IPv4(159,83,0,0),16 }, + { IPv4(159,99,0,0),16 }, + { IPv4(159,104,6,0),24 }, + { IPv4(159,104,7,0),24 }, + { IPv4(159,108,0,0),16 }, + { IPv4(159,113,0,0),16 }, + { IPv4(159,115,0,0),16 }, + { IPv4(159,115,14,0),24 }, + { IPv4(159,119,0,0),16 }, + { IPv4(159,120,0,0),16 }, + { IPv4(159,124,0,0),16 }, + { IPv4(159,133,0,0),16 }, + { IPv4(159,137,0,0),16 }, + { IPv4(159,140,0,0),16 }, + { IPv4(159,140,174,0),24 }, + { IPv4(159,140,213,0),24 }, + { IPv4(159,140,214,0),24 }, + { IPv4(159,140,218,0),24 }, + { IPv4(159,140,219,0),24 }, + { IPv4(159,140,244,0),24 }, + { IPv4(159,140,254,0),24 }, + { IPv4(159,143,0,0),16 }, + { IPv4(159,153,0,0),17 }, + { IPv4(159,153,0,0),16 }, + { IPv4(159,153,128,0),19 }, + { IPv4(159,153,160,0),21 }, + { IPv4(159,153,192,0),19 }, + { IPv4(159,153,224,0),19 }, + { IPv4(159,157,16,0),24 }, + { IPv4(159,157,254,0),24 }, + { IPv4(159,182,0,0),16 }, + { IPv4(159,189,0,0),16 }, + { IPv4(159,199,0,0),16 }, + { IPv4(159,204,0,0),16 }, + { IPv4(159,212,0,0),16 }, + { IPv4(159,221,0,0),16 }, + { IPv4(159,223,0,0),16 }, + { IPv4(159,226,0,0),16 }, + { IPv4(159,240,0,0),16 }, + { IPv4(159,247,0,0),16 }, + { IPv4(159,251,0,0),16 }, + { IPv4(160,7,0,0),16 }, + { IPv4(160,10,0,0),16 }, + { IPv4(160,23,0,0),16 }, + { IPv4(160,33,0,0),19 }, + { IPv4(160,33,0,0),16 }, + { IPv4(160,39,0,0),16 }, + { IPv4(160,41,0,0),16 }, + { IPv4(160,42,0,0),16 }, + { IPv4(160,43,0,0),16 }, + { IPv4(160,54,0,0),15 }, + { IPv4(160,56,0,0),15 }, + { IPv4(160,58,0,0),16 }, + { IPv4(160,69,0,0),23 }, + { IPv4(160,79,0,0),16 }, + { IPv4(160,79,80,0),24 }, + { IPv4(160,79,190,0),23 }, + { IPv4(160,79,198,0),23 }, + { IPv4(160,79,214,0),23 }, + { IPv4(160,79,216,0),23 }, + { IPv4(160,79,224,0),22 }, + { IPv4(160,79,240,0),22 }, + { IPv4(160,79,248,0),22 }, + { IPv4(160,83,32,0),19 }, + { IPv4(160,87,0,0),16 }, + { IPv4(160,91,0,0),16 }, + { IPv4(160,93,0,0),16 }, + { IPv4(160,94,0,0),16 }, + { IPv4(160,96,0,0),16 }, + { IPv4(160,115,0,0),16 }, + { IPv4(160,118,0,0),16 }, + { IPv4(160,123,0,0),16 }, + { IPv4(160,125,0,0),16 }, + { IPv4(160,126,0,0),15 }, + { IPv4(160,128,0,0),18 }, + { IPv4(160,129,0,0),16 }, + { IPv4(160,132,0,0),15 }, + { IPv4(160,134,0,0),16 }, + { IPv4(160,135,0,0),16 }, + { IPv4(160,136,0,0),13 }, + { IPv4(160,144,0,0),13 }, + { IPv4(160,147,0,0),16 }, + { IPv4(160,189,0,0),16 }, + { IPv4(160,190,0,0),16 }, + { IPv4(160,192,0,0),16 }, + { IPv4(160,194,0,0),16 }, + { IPv4(160,199,0,0),16 }, + { IPv4(160,201,0,0),16 }, + { IPv4(160,202,0,0),16 }, + { IPv4(160,205,0,0),16 }, + { IPv4(160,206,0,0),16 }, + { IPv4(160,207,0,0),16 }, + { IPv4(160,211,0,0),16 }, + { IPv4(160,212,0,0),16 }, + { IPv4(160,219,0,0),16 }, + { IPv4(160,221,0,0),16 }, + { IPv4(160,227,0,0),16 }, + { IPv4(160,231,0,0),16 }, + { IPv4(160,231,1,0),24 }, + { IPv4(160,239,0,0),16 }, + { IPv4(160,239,1,0),24 }, + { IPv4(160,240,0,0),16 }, + { IPv4(160,243,0,0),16 }, + { IPv4(160,248,0,0),16 }, + { IPv4(160,254,0,0),16 }, + { IPv4(160,254,107,0),24 }, + { IPv4(160,254,115,0),24 }, + { IPv4(160,254,123,0),24 }, + { IPv4(161,1,0,0),17 }, + { IPv4(161,1,0,0),16 }, + { IPv4(161,2,0,0),16 }, + { IPv4(161,13,0,0),16 }, + { IPv4(161,16,0,0),16 }, + { IPv4(161,21,0,0),18 }, + { IPv4(161,21,20,0),23 }, + { IPv4(161,21,22,0),23 }, + { IPv4(161,21,24,0),23 }, + { IPv4(161,21,26,0),23 }, + { IPv4(161,21,28,0),23 }, + { IPv4(161,21,30,0),23 }, + { IPv4(161,21,32,0),23 }, + { IPv4(161,21,34,0),23 }, + { IPv4(161,21,36,0),23 }, + { IPv4(161,21,38,0),23 }, + { IPv4(161,21,40,0),23 }, + { IPv4(161,21,42,0),23 }, + { IPv4(161,21,44,0),23 }, + { IPv4(161,21,46,0),23 }, + { IPv4(161,21,48,0),23 }, + { IPv4(161,21,50,0),23 }, + { IPv4(161,21,52,0),23 }, + { IPv4(161,21,54,0),23 }, + { IPv4(161,21,56,0),23 }, + { IPv4(161,21,58,0),23 }, + { IPv4(161,21,60,0),23 }, + { IPv4(161,21,62,0),23 }, + { IPv4(161,21,64,0),19 }, + { IPv4(161,21,64,0),23 }, + { IPv4(161,21,66,0),23 }, + { IPv4(161,21,68,0),23 }, + { IPv4(161,21,70,0),23 }, + { IPv4(161,21,72,0),23 }, + { IPv4(161,21,74,0),23 }, + { IPv4(161,21,76,0),23 }, + { IPv4(161,21,78,0),23 }, + { IPv4(161,21,80,0),23 }, + { IPv4(161,21,82,0),23 }, + { IPv4(161,21,84,0),23 }, + { IPv4(161,21,86,0),23 }, + { IPv4(161,21,88,0),23 }, + { IPv4(161,28,0,0),16 }, + { IPv4(161,33,0,0),16 }, + { IPv4(161,33,3,0),24 }, + { IPv4(161,38,0,0),16 }, + { IPv4(161,40,0,0),16 }, + { IPv4(161,44,0,0),16 }, + { IPv4(161,46,0,0),16 }, + { IPv4(161,51,224,0),20 }, + { IPv4(161,58,0,0),16 }, + { IPv4(161,65,0,0),16 }, + { IPv4(161,69,0,0),16 }, + { IPv4(161,69,211,0),24 }, + { IPv4(161,69,212,0),24 }, + { IPv4(161,69,213,0),24 }, + { IPv4(161,71,171,0),24 }, + { IPv4(161,81,0,0),16 }, + { IPv4(161,97,0,0),16 }, + { IPv4(161,98,0,0),16 }, + { IPv4(161,98,128,0),17 }, + { IPv4(161,114,180,0),24 }, + { IPv4(161,114,188,0),24 }, + { IPv4(161,114,189,0),24 }, + { IPv4(161,114,192,0),20 }, + { IPv4(161,119,0,0),16 }, + { IPv4(161,122,0,0),16 }, + { IPv4(161,124,0,0),16 }, + { IPv4(161,130,0,0),16 }, + { IPv4(161,132,232,0),21 }, + { IPv4(161,132,240,0),21 }, + { IPv4(161,134,0,0),16 }, + { IPv4(161,135,0,0),16 }, + { IPv4(161,136,0,0),16 }, + { IPv4(161,137,0,0),16 }, + { IPv4(161,139,0,0),16 }, + { IPv4(161,142,0,0),17 }, + { IPv4(161,142,0,0),16 }, + { IPv4(161,142,128,0),17 }, + { IPv4(161,149,0,0),16 }, + { IPv4(161,150,0,0),17 }, + { IPv4(161,150,128,0),18 }, + { IPv4(161,150,192,0),18 }, + { IPv4(161,155,0,0),16 }, + { IPv4(161,159,0,0),16 }, + { IPv4(161,160,0,0),16 }, + { IPv4(161,161,0,0),16 }, + { IPv4(161,165,0,0),16 }, + { IPv4(161,173,0,0),16 }, + { IPv4(161,173,11,0),24 }, + { IPv4(161,180,0,0),16 }, + { IPv4(161,181,246,0),24 }, + { IPv4(161,185,0,0),16 }, + { IPv4(161,186,0,0),16 }, + { IPv4(161,195,0,0),16 }, + { IPv4(161,207,0,0),16 }, + { IPv4(161,210,0,0),16 }, + { IPv4(161,213,0,0),16 }, + { IPv4(161,217,0,0),16 }, + { IPv4(161,222,0,0),16 }, + { IPv4(161,223,0,0),16 }, + { IPv4(161,223,0,0),21 }, + { IPv4(161,223,8,0),21 }, + { IPv4(161,223,16,0),20 }, + { IPv4(161,223,32,0),19 }, + { IPv4(161,223,64,0),18 }, + { IPv4(161,223,128,0),18 }, + { IPv4(161,223,192,0),20 }, + { IPv4(161,223,208,0),21 }, + { IPv4(161,223,216,0),22 }, + { IPv4(161,223,225,0),24 }, + { IPv4(161,223,226,0),23 }, + { IPv4(161,223,228,0),22 }, + { IPv4(161,223,232,0),21 }, + { IPv4(161,223,240,0),20 }, + { IPv4(161,224,0,0),16 }, + { IPv4(161,225,0,0),16 }, + { IPv4(161,229,0,0),16 }, + { IPv4(161,230,0,0),16 }, + { IPv4(161,232,0,0),16 }, + { IPv4(161,233,0,0),16 }, + { IPv4(161,242,0,0),16 }, + { IPv4(161,242,208,0),20 }, + { IPv4(161,243,0,0),16 }, + { IPv4(161,246,0,0),16 }, + { IPv4(161,253,0,0),16 }, + { IPv4(162,5,0,0),16 }, + { IPv4(162,5,128,0),17 }, + { IPv4(162,8,0,0),16 }, + { IPv4(162,8,230,0),24 }, + { IPv4(162,8,231,0),24 }, + { IPv4(162,8,232,0),24 }, + { IPv4(162,8,233,0),24 }, + { IPv4(162,10,0,0),16 }, + { IPv4(162,13,0,0),16 }, + { IPv4(162,13,32,0),20 }, + { IPv4(162,24,0,0),16 }, + { IPv4(162,27,0,0),16 }, + { IPv4(162,33,1,0),24 }, + { IPv4(162,33,96,0),21 }, + { IPv4(162,33,96,0),19 }, + { IPv4(162,33,104,0),21 }, + { IPv4(162,33,112,0),21 }, + { IPv4(162,33,120,0),21 }, + { IPv4(162,33,163,0),24 }, + { IPv4(162,36,0,0),16 }, + { IPv4(162,40,107,0),24 }, + { IPv4(162,48,0,0),16 }, + { IPv4(162,51,0,0),16 }, + { IPv4(162,57,0,0),16 }, + { IPv4(162,69,0,0),16 }, + { IPv4(162,88,0,0),16 }, + { IPv4(162,93,0,0),16 }, + { IPv4(162,93,64,0),19 }, + { IPv4(162,93,160,0),19 }, + { IPv4(162,93,192,0),19 }, + { IPv4(162,93,224,0),19 }, + { IPv4(162,94,0,0),16 }, + { IPv4(162,96,0,0),16 }, + { IPv4(162,116,0,0),16 }, + { IPv4(162,123,0,0),16 }, + { IPv4(162,126,205,0),24 }, + { IPv4(162,126,206,0),24 }, + { IPv4(162,126,207,0),24 }, + { IPv4(162,126,208,0),24 }, + { IPv4(162,129,0,0),16 }, + { IPv4(162,130,0,0),16 }, + { IPv4(162,136,0,0),16 }, + { IPv4(162,136,40,0),22 }, + { IPv4(163,2,0,0),16 }, + { IPv4(163,6,0,0),16 }, + { IPv4(163,7,0,0),16 }, + { IPv4(163,10,0,0),16 }, + { IPv4(163,12,0,0),16 }, + { IPv4(163,13,0,0),16 }, + { IPv4(163,14,0,0),15 }, + { IPv4(163,16,0,0),15 }, + { IPv4(163,17,32,0),22 }, + { IPv4(163,17,36,0),23 }, + { IPv4(163,17,40,0),21 }, + { IPv4(163,17,48,0),20 }, + { IPv4(163,17,88,0),21 }, + { IPv4(163,17,96,0),22 }, + { IPv4(163,17,108,0),22 }, + { IPv4(163,17,112,0),20 }, + { IPv4(163,17,128,0),23 }, + { IPv4(163,17,130,0),24 }, + { IPv4(163,17,146,0),23 }, + { IPv4(163,17,148,0),22 }, + { IPv4(163,17,152,0),23 }, + { IPv4(163,17,154,0),24 }, + { IPv4(163,17,156,0),22 }, + { IPv4(163,17,160,0),21 }, + { IPv4(163,17,169,0),24 }, + { IPv4(163,17,170,0),23 }, + { IPv4(163,17,172,0),22 }, + { IPv4(163,17,176,0),22 }, + { IPv4(163,17,180,0),23 }, + { IPv4(163,17,182,0),24 }, + { IPv4(163,17,184,0),21 }, + { IPv4(163,17,192,0),20 }, + { IPv4(163,17,208,0),20 }, + { IPv4(163,17,224,0),20 }, + { IPv4(163,17,240,0),24 }, + { IPv4(163,18,0,0),16 }, + { IPv4(163,19,0,0),16 }, + { IPv4(163,20,0,0),14 }, + { IPv4(163,24,0,0),14 }, + { IPv4(163,28,0,0),16 }, + { IPv4(163,28,8,0),21 }, + { IPv4(163,29,0,0),16 }, + { IPv4(163,30,0,0),16 }, + { IPv4(163,31,0,0),16 }, + { IPv4(163,42,0,0),16 }, + { IPv4(163,49,144,0),22 }, + { IPv4(163,126,0,0),16 }, + { IPv4(163,129,0,0),16 }, + { IPv4(163,139,0,0),16 }, + { IPv4(163,142,0,0),16 }, + { IPv4(163,149,0,0),16 }, + { IPv4(163,152,0,0),16 }, + { IPv4(163,152,151,0),24 }, + { IPv4(163,152,152,0),24 }, + { IPv4(163,152,153,0),24 }, + { IPv4(163,152,154,0),24 }, + { IPv4(163,152,161,0),24 }, + { IPv4(163,152,162,0),24 }, + { IPv4(163,152,163,0),24 }, + { IPv4(163,152,164,0),24 }, + { IPv4(163,152,171,0),24 }, + { IPv4(163,152,172,0),24 }, + { IPv4(163,152,173,0),24 }, + { IPv4(163,152,174,0),24 }, + { IPv4(163,153,0,0),16 }, + { IPv4(163,153,238,0),24 }, + { IPv4(163,156,0,0),16 }, + { IPv4(163,157,0,0),16 }, + { IPv4(163,164,0,0),16 }, + { IPv4(163,166,0,0),16 }, + { IPv4(163,175,0,0),16 }, + { IPv4(163,179,0,0),16 }, + { IPv4(163,179,38,0),24 }, + { IPv4(163,179,107,0),24 }, + { IPv4(163,179,161,0),24 }, + { IPv4(163,180,0,0),18 }, + { IPv4(163,180,0,0),17 }, + { IPv4(163,180,64,0),19 }, + { IPv4(163,180,96,0),19 }, + { IPv4(163,180,128,0),18 }, + { IPv4(163,180,128,0),19 }, + { IPv4(163,180,160,0),21 }, + { IPv4(163,180,168,0),23 }, + { IPv4(163,180,170,0),24 }, + { IPv4(163,191,0,0),16 }, + { IPv4(163,191,0,0),19 }, + { IPv4(163,191,96,0),19 }, + { IPv4(163,191,128,0),19 }, + { IPv4(163,191,192,0),19 }, + { IPv4(163,191,224,0),19 }, + { IPv4(163,196,0,0),16 }, + { IPv4(163,197,0,0),16 }, + { IPv4(163,198,0,0),15 }, + { IPv4(163,199,132,0),24 }, + { IPv4(163,200,0,0),16 }, + { IPv4(163,201,0,0),16 }, + { IPv4(163,202,0,0),15 }, + { IPv4(163,205,0,0),16 }, + { IPv4(163,206,0,0),16 }, + { IPv4(163,207,0,0),16 }, + { IPv4(163,215,0,0),16 }, + { IPv4(163,220,0,0),16 }, + { IPv4(163,221,0,0),16 }, + { IPv4(163,224,0,0),16 }, + { IPv4(163,228,0,0),16 }, + { IPv4(163,230,0,0),16 }, + { IPv4(163,231,0,0),16 }, + { IPv4(163,234,0,0),16 }, + { IPv4(163,238,0,0),16 }, + { IPv4(163,239,0,0),17 }, + { IPv4(163,239,128,0),18 }, + { IPv4(163,239,251,0),24 }, + { IPv4(163,244,0,0),16 }, + { IPv4(163,247,0,0),16 }, + { IPv4(163,247,40,0),24 }, + { IPv4(163,247,41,0),24 }, + { IPv4(163,247,42,0),24 }, + { IPv4(163,247,43,0),24 }, + { IPv4(163,247,44,0),24 }, + { IPv4(163,247,46,0),24 }, + { IPv4(163,247,47,0),24 }, + { IPv4(163,247,48,0),24 }, + { IPv4(163,247,49,0),24 }, + { IPv4(163,247,50,0),24 }, + { IPv4(163,247,51,0),24 }, + { IPv4(163,247,52,0),24 }, + { IPv4(163,247,53,0),24 }, + { IPv4(163,247,54,0),24 }, + { IPv4(163,247,55,0),24 }, + { IPv4(163,247,56,0),24 }, + { IPv4(163,247,57,0),24 }, + { IPv4(163,247,58,0),24 }, + { IPv4(163,247,59,0),24 }, + { IPv4(163,247,60,0),24 }, + { IPv4(163,247,61,0),24 }, + { IPv4(163,247,62,0),24 }, + { IPv4(163,247,63,0),24 }, + { IPv4(163,247,64,0),24 }, + { IPv4(163,247,65,0),24 }, + { IPv4(163,247,69,0),24 }, + { IPv4(163,247,70,0),24 }, + { IPv4(163,247,71,0),24 }, + { IPv4(163,247,72,0),24 }, + { IPv4(163,248,0,0),16 }, + { IPv4(163,249,0,0),16 }, + { IPv4(163,249,43,0),24 }, + { IPv4(163,249,53,0),24 }, + { IPv4(163,249,54,0),24 }, + { IPv4(163,249,57,0),24 }, + { IPv4(163,249,140,0),22 }, + { IPv4(163,249,160,0),21 }, + { IPv4(163,249,168,0),23 }, + { IPv4(163,249,170,0),24 }, + { IPv4(163,251,0,0),19 }, + { IPv4(163,251,32,0),22 }, + { IPv4(163,251,36,0),22 }, + { IPv4(163,251,40,0),22 }, + { IPv4(163,251,44,0),22 }, + { IPv4(163,251,48,0),22 }, + { IPv4(163,251,52,0),22 }, + { IPv4(163,251,64,0),19 }, + { IPv4(163,251,96,0),22 }, + { IPv4(163,251,224,0),19 }, + { IPv4(163,251,226,0),24 }, + { IPv4(163,251,228,0),24 }, + { IPv4(163,251,229,0),24 }, + { IPv4(163,251,240,0),21 }, + { IPv4(163,251,250,0),24 }, + { IPv4(163,251,251,0),24 }, + { IPv4(163,251,252,0),24 }, + { IPv4(163,251,254,0),24 }, + { IPv4(164,38,0,0),16 }, + { IPv4(164,39,185,0),24 }, + { IPv4(164,43,0,0),16 }, + { IPv4(164,47,0,0),16 }, + { IPv4(164,48,199,0),24 }, + { IPv4(164,49,0,0),16 }, + { IPv4(164,50,0,0),16 }, + { IPv4(164,54,0,0),16 }, + { IPv4(164,55,0,0),16 }, + { IPv4(164,57,0,0),16 }, + { IPv4(164,65,0,0),16 }, + { IPv4(164,66,0,0),16 }, + { IPv4(164,67,0,0),16 }, + { IPv4(164,68,0,0),16 }, + { IPv4(164,78,0,0),16 }, + { IPv4(164,79,0,0),16 }, + { IPv4(164,83,0,0),16 }, + { IPv4(164,87,0,0),16 }, + { IPv4(164,88,0,0),16 }, + { IPv4(164,92,0,0),16 }, + { IPv4(164,92,24,0),24 }, + { IPv4(164,92,144,0),24 }, + { IPv4(164,92,146,0),24 }, + { IPv4(164,92,155,0),24 }, + { IPv4(164,94,0,0),16 }, + { IPv4(164,99,0,0),16 }, + { IPv4(164,103,0,0),16 }, + { IPv4(164,104,0,0),16 }, + { IPv4(164,105,168,0),24 }, + { IPv4(164,113,0,0),19 }, + { IPv4(164,113,32,0),19 }, + { IPv4(164,113,240,0),21 }, + { IPv4(164,117,0,0),16 }, + { IPv4(164,121,0,0),16 }, + { IPv4(164,122,0,0),16 }, + { IPv4(164,124,0,0),16 }, + { IPv4(164,125,0,0),16 }, + { IPv4(164,140,0,0),16 }, + { IPv4(164,143,248,0),24 }, + { IPv4(164,143,249,0),24 }, + { IPv4(164,143,250,0),24 }, + { IPv4(164,143,251,0),24 }, + { IPv4(164,145,0,0),16 }, + { IPv4(164,155,0,0),16 }, + { IPv4(164,158,0,0),16 }, + { IPv4(164,164,0,0),16 }, + { IPv4(164,164,42,0),24 }, + { IPv4(164,164,45,0),24 }, + { IPv4(164,164,46,0),24 }, + { IPv4(164,164,48,0),24 }, + { IPv4(164,164,97,0),24 }, + { IPv4(164,167,0,0),16 }, + { IPv4(164,171,0,0),16 }, + { IPv4(164,178,0,0),16 }, + { IPv4(164,190,0,0),16 }, + { IPv4(164,191,0,0),16 }, + { IPv4(164,216,0,0),16 }, + { IPv4(164,218,0,0),16 }, + { IPv4(164,220,0,0),16 }, + { IPv4(164,221,0,0),18 }, + { IPv4(164,221,0,0),16 }, + { IPv4(164,221,64,0),19 }, + { IPv4(164,221,184,0),21 }, + { IPv4(164,221,192,0),21 }, + { IPv4(164,221,200,0),21 }, + { IPv4(164,221,208,0),20 }, + { IPv4(164,221,224,0),19 }, + { IPv4(164,223,0,0),16 }, + { IPv4(164,224,0,0),16 }, + { IPv4(164,226,0,0),16 }, + { IPv4(164,227,0,0),16 }, + { IPv4(164,230,0,0),15 }, + { IPv4(164,230,0,0),16 }, + { IPv4(164,231,72,0),24 }, + { IPv4(165,1,0,0),16 }, + { IPv4(165,4,0,0),16 }, + { IPv4(165,8,0,0),16 }, + { IPv4(165,10,0,0),16 }, + { IPv4(165,11,0,0),16 }, + { IPv4(165,21,0,0),16 }, + { IPv4(165,21,24,0),21 }, + { IPv4(165,21,112,0),21 }, + { IPv4(165,21,124,0),22 }, + { IPv4(165,21,128,0),22 }, + { IPv4(165,21,132,0),24 }, + { IPv4(165,21,134,0),24 }, + { IPv4(165,24,0,0),16 }, + { IPv4(165,25,0,0),16 }, + { IPv4(165,26,0,0),16 }, + { IPv4(165,28,0,0),16 }, + { IPv4(165,30,0,0),16 }, + { IPv4(165,64,0,0),16 }, + { IPv4(165,65,0,0),16 }, + { IPv4(165,72,32,0),19 }, + { IPv4(165,76,0,0),16 }, + { IPv4(165,83,0,0),16 }, + { IPv4(165,87,0,0),16 }, + { IPv4(165,87,15,0),24 }, + { IPv4(165,87,17,0),24 }, + { IPv4(165,87,44,0),23 }, + { IPv4(165,87,49,0),24 }, + { IPv4(165,87,56,0),24 }, + { IPv4(165,87,108,0),22 }, + { IPv4(165,87,112,0),21 }, + { IPv4(165,87,113,0),24 }, + { IPv4(165,87,114,0),24 }, + { IPv4(165,87,173,0),24 }, + { IPv4(165,87,177,0),24 }, + { IPv4(165,89,0,0),16 }, + { IPv4(165,97,0,0),16 }, + { IPv4(165,98,4,0),24 }, + { IPv4(165,98,8,0),24 }, + { IPv4(165,98,11,0),24 }, + { IPv4(165,98,12,0),24 }, + { IPv4(165,98,101,0),24 }, + { IPv4(165,98,102,0),24 }, + { IPv4(165,98,103,0),24 }, + { IPv4(165,98,104,0),22 }, + { IPv4(165,113,0,0),16 }, + { IPv4(165,113,127,0),24 }, + { IPv4(165,113,128,0),24 }, + { IPv4(165,113,129,0),24 }, + { IPv4(165,113,156,0),24 }, + { IPv4(165,113,161,0),24 }, + { IPv4(165,113,176,0),24 }, + { IPv4(165,113,187,0),24 }, + { IPv4(165,113,189,0),24 }, + { IPv4(165,113,190,0),24 }, + { IPv4(165,113,191,0),24 }, + { IPv4(165,113,193,0),24 }, + { IPv4(165,113,208,0),24 }, + { IPv4(165,113,239,0),24 }, + { IPv4(165,121,0,0),16 }, + { IPv4(165,121,96,0),20 }, + { IPv4(165,123,0,0),16 }, + { IPv4(165,124,0,0),16 }, + { IPv4(165,125,32,0),20 }, + { IPv4(165,127,0,0),16 }, + { IPv4(165,130,0,0),16 }, + { IPv4(165,132,0,0),16 }, + { IPv4(165,132,224,0),22 }, + { IPv4(165,132,228,0),22 }, + { IPv4(165,132,232,0),21 }, + { IPv4(165,132,240,0),22 }, + { IPv4(165,132,244,0),23 }, + { IPv4(165,132,246,0),23 }, + { IPv4(165,133,0,0),17 }, + { IPv4(165,133,40,0),24 }, + { IPv4(165,133,128,0),17 }, + { IPv4(165,137,0,0),16 }, + { IPv4(165,141,0,0),16 }, + { IPv4(165,141,0,0),22 }, + { IPv4(165,141,4,0),23 }, + { IPv4(165,141,6,0),24 }, + { IPv4(165,141,8,0),21 }, + { IPv4(165,141,16,0),20 }, + { IPv4(165,141,96,0),20 }, + { IPv4(165,141,112,0),20 }, + { IPv4(165,141,128,0),19 }, + { IPv4(165,141,160,0),20 }, + { IPv4(165,141,184,0),22 }, + { IPv4(165,141,200,0),22 }, + { IPv4(165,141,208,0),21 }, + { IPv4(165,141,216,0),22 }, + { IPv4(165,141,220,0),22 }, + { IPv4(165,141,222,0),23 }, + { IPv4(165,141,224,0),22 }, + { IPv4(165,141,228,0),23 }, + { IPv4(165,141,238,0),23 }, + { IPv4(165,141,240,0),23 }, + { IPv4(165,141,244,0),24 }, + { IPv4(165,141,249,0),24 }, + { IPv4(165,150,0,0),16 }, + { IPv4(165,152,0,0),16 }, + { IPv4(165,155,0,0),16 }, + { IPv4(165,166,0,0),16 }, + { IPv4(165,170,24,0),21 }, + { IPv4(165,170,64,0),24 }, + { IPv4(165,170,128,0),24 }, + { IPv4(165,170,176,0),20 }, + { IPv4(165,170,208,0),24 }, + { IPv4(165,173,0,0),16 }, + { IPv4(165,186,0,0),16 }, + { IPv4(165,190,122,0),23 }, + { IPv4(165,190,124,0),22 }, + { IPv4(165,190,128,0),21 }, + { IPv4(165,193,0,0),16 }, + { IPv4(165,194,0,0),16 }, + { IPv4(165,194,128,0),17 }, + { IPv4(165,196,0,0),16 }, + { IPv4(165,201,0,0),16 }, + { IPv4(165,206,0,0),16 }, + { IPv4(165,206,238,0),24 }, + { IPv4(165,212,0,0),18 }, + { IPv4(165,212,0,0),16 }, + { IPv4(165,212,63,0),24 }, + { IPv4(165,217,0,0),16 }, + { IPv4(165,225,194,0),24 }, + { IPv4(165,229,0,0),16 }, + { IPv4(165,230,0,0),16 }, + { IPv4(165,232,0,0),16 }, + { IPv4(165,233,0,0),16 }, + { IPv4(165,236,0,0),16 }, + { IPv4(165,238,0,0),16 }, + { IPv4(165,243,0,0),16 }, + { IPv4(165,244,0,0),16 }, + { IPv4(165,246,0,0),16 }, + { IPv4(165,247,0,0),16 }, + { IPv4(165,247,120,0),21 }, + { IPv4(165,247,196,0),22 }, + { IPv4(165,247,200,0),21 }, + { IPv4(165,247,208,0),20 }, + { IPv4(165,247,224,0),22 }, + { IPv4(165,247,248,0),21 }, + { IPv4(165,251,0,0),16 }, + { IPv4(165,251,24,0),22 }, + { IPv4(165,251,28,0),22 }, + { IPv4(165,251,32,0),22 }, + { IPv4(165,251,36,0),22 }, + { IPv4(165,251,252,0),22 }, + { IPv4(165,252,93,0),24 }, + { IPv4(165,254,0,0),16 }, + { IPv4(165,254,85,0),24 }, + { IPv4(166,16,0,0),16 }, + { IPv4(166,19,0,0),16 }, + { IPv4(166,20,0,0),16 }, + { IPv4(166,21,0,0),16 }, + { IPv4(166,30,0,0),16 }, + { IPv4(166,49,128,0),17 }, + { IPv4(166,49,128,0),23 }, + { IPv4(166,49,130,0),24 }, + { IPv4(166,49,131,0),24 }, + { IPv4(166,49,132,0),24 }, + { IPv4(166,49,133,0),24 }, + { IPv4(166,49,134,0),24 }, + { IPv4(166,49,137,0),24 }, + { IPv4(166,49,138,0),24 }, + { IPv4(166,49,139,0),24 }, + { IPv4(166,49,144,0),24 }, + { IPv4(166,49,149,0),24 }, + { IPv4(166,49,156,0),24 }, + { IPv4(166,49,172,0),22 }, + { IPv4(166,49,180,0),23 }, + { IPv4(166,49,182,0),23 }, + { IPv4(166,49,184,0),23 }, + { IPv4(166,49,186,0),23 }, + { IPv4(166,49,190,0),23 }, + { IPv4(166,49,192,0),22 }, + { IPv4(166,49,224,0),19 }, + { IPv4(166,70,0,0),16 }, + { IPv4(166,72,0,0),16 }, + { IPv4(166,72,88,0),24 }, + { IPv4(166,72,96,0),24 }, + { IPv4(166,72,121,0),24 }, + { IPv4(166,72,122,0),24 }, + { IPv4(166,72,126,0),24 }, + { IPv4(166,72,149,0),24 }, + { IPv4(166,72,151,0),24 }, + { IPv4(166,72,158,0),24 }, + { IPv4(166,72,159,0),24 }, + { IPv4(166,72,162,0),24 }, + { IPv4(166,72,169,0),24 }, + { IPv4(166,72,173,0),24 }, + { IPv4(166,72,181,0),24 }, + { IPv4(166,72,202,0),24 }, + { IPv4(166,72,203,0),24 }, + { IPv4(166,72,208,0),24 }, + { IPv4(166,72,214,0),24 }, + { IPv4(166,72,220,0),24 }, + { IPv4(166,72,233,0),24 }, + { IPv4(166,72,249,0),24 }, + { IPv4(166,73,0,0),16 }, + { IPv4(166,73,20,0),24 }, + { IPv4(166,77,0,0),16 }, + { IPv4(166,80,8,0),24 }, + { IPv4(166,80,9,0),24 }, + { IPv4(166,80,10,0),24 }, + { IPv4(166,80,16,0),24 }, + { IPv4(166,80,27,0),24 }, + { IPv4(166,80,28,0),23 }, + { IPv4(166,80,30,0),24 }, + { IPv4(166,80,46,0),24 }, + { IPv4(166,80,50,0),24 }, + { IPv4(166,80,54,0),24 }, + { IPv4(166,80,62,0),24 }, + { IPv4(166,80,74,0),24 }, + { IPv4(166,80,78,0),24 }, + { IPv4(166,80,79,0),24 }, + { IPv4(166,80,82,0),24 }, + { IPv4(166,80,83,0),24 }, + { IPv4(166,80,84,0),24 }, + { IPv4(166,80,90,0),24 }, + { IPv4(166,80,106,0),24 }, + { IPv4(166,80,114,0),24 }, + { IPv4(166,80,120,0),24 }, + { IPv4(166,80,126,0),24 }, + { IPv4(166,80,127,0),24 }, + { IPv4(166,80,242,0),24 }, + { IPv4(166,84,0,0),16 }, + { IPv4(166,84,56,0),21 }, + { IPv4(166,84,56,0),22 }, + { IPv4(166,84,60,0),22 }, + { IPv4(166,84,140,0),23 }, + { IPv4(166,84,143,0),24 }, + { IPv4(166,84,144,0),20 }, + { IPv4(166,84,150,0),24 }, + { IPv4(166,84,157,0),24 }, + { IPv4(166,84,168,0),22 }, + { IPv4(166,84,172,0),24 }, + { IPv4(166,84,174,0),24 }, + { IPv4(166,84,185,0),24 }, + { IPv4(166,84,191,0),24 }, + { IPv4(166,88,0,0),16 }, + { IPv4(166,88,88,0),24 }, + { IPv4(166,89,0,0),16 }, + { IPv4(166,90,14,0),24 }, + { IPv4(166,104,0,0),17 }, + { IPv4(166,104,128,0),19 }, + { IPv4(166,104,160,0),20 }, + { IPv4(166,104,176,0),21 }, + { IPv4(166,104,184,0),21 }, + { IPv4(166,104,192,0),18 }, + { IPv4(166,113,0,0),16 }, + { IPv4(166,114,128,0),19 }, + { IPv4(166,114,248,0),21 }, + { IPv4(166,119,0,0),16 }, + { IPv4(166,121,0,0),16 }, + { IPv4(166,124,0,0),16 }, + { IPv4(166,126,0,0),16 }, + { IPv4(166,127,0,0),16 }, + { IPv4(166,128,0,0),16 }, + { IPv4(166,128,0,0),13 }, + { IPv4(166,129,0,0),16 }, + { IPv4(166,130,0,0),16 }, + { IPv4(166,131,0,0),16 }, + { IPv4(166,132,0,0),16 }, + { IPv4(166,133,0,0),16 }, + { IPv4(166,134,0,0),16 }, + { IPv4(166,137,0,0),16 }, + { IPv4(166,147,64,0),18 }, + { IPv4(166,150,0,0),18 }, + { IPv4(166,150,128,0),18 }, + { IPv4(166,164,0,0),16 }, + { IPv4(166,177,0,0),16 }, + { IPv4(166,177,111,0),24 }, + { IPv4(166,183,0,0),16 }, + { IPv4(166,184,0,0),16 }, + { IPv4(166,185,0,0),16 }, + { IPv4(166,186,0,0),16 }, + { IPv4(166,187,0,0),16 }, + { IPv4(166,188,0,0),16 }, + { IPv4(166,189,0,0),16 }, + { IPv4(166,190,0,0),16 }, + { IPv4(166,191,0,0),16 }, + { IPv4(166,192,0,0),16 }, + { IPv4(166,193,0,0),16 }, + { IPv4(166,194,0,0),16 }, + { IPv4(166,195,0,0),16 }, + { IPv4(166,196,0,0),16 }, + { IPv4(166,197,0,0),16 }, + { IPv4(166,198,0,0),16 }, + { IPv4(166,199,0,0),16 }, + { IPv4(166,200,0,0),16 }, + { IPv4(166,201,0,0),16 }, + { IPv4(166,202,0,0),16 }, + { IPv4(166,203,0,0),16 }, + { IPv4(166,204,0,0),16 }, + { IPv4(166,213,0,0),16 }, + { IPv4(167,1,0,0),16 }, + { IPv4(167,1,100,0),24 }, + { IPv4(167,1,101,0),24 }, + { IPv4(167,1,102,0),24 }, + { IPv4(167,1,103,0),24 }, + { IPv4(167,1,104,0),24 }, + { IPv4(167,1,105,0),24 }, + { IPv4(167,1,106,0),24 }, + { IPv4(167,1,107,0),24 }, + { IPv4(167,1,108,0),24 }, + { IPv4(167,1,109,0),24 }, + { IPv4(167,1,110,0),24 }, + { IPv4(167,1,112,0),24 }, + { IPv4(167,1,113,0),24 }, + { IPv4(167,1,118,0),24 }, + { IPv4(167,1,120,0),24 }, + { IPv4(167,1,122,0),24 }, + { IPv4(167,1,123,0),24 }, + { IPv4(167,1,124,0),24 }, + { IPv4(167,1,125,0),24 }, + { IPv4(167,1,127,0),24 }, + { IPv4(167,1,128,0),24 }, + { IPv4(167,1,129,0),24 }, + { IPv4(167,1,130,0),24 }, + { IPv4(167,1,131,0),24 }, + { IPv4(167,1,132,0),24 }, + { IPv4(167,1,133,0),24 }, + { IPv4(167,1,134,0),24 }, + { IPv4(167,1,135,0),24 }, + { IPv4(167,1,136,0),24 }, + { IPv4(167,1,141,0),24 }, + { IPv4(167,6,0,0),16 }, + { IPv4(167,7,0,0),16 }, + { IPv4(167,14,48,0),21 }, + { IPv4(167,23,0,0),16 }, + { IPv4(167,24,0,0),16 }, + { IPv4(167,24,101,0),24 }, + { IPv4(167,24,102,0),24 }, + { IPv4(167,24,103,0),24 }, + { IPv4(167,24,104,0),24 }, + { IPv4(167,24,105,0),24 }, + { IPv4(167,24,241,0),24 }, + { IPv4(167,24,242,0),24 }, + { IPv4(167,24,243,0),24 }, + { IPv4(167,24,244,0),24 }, + { IPv4(167,24,245,0),24 }, + { IPv4(167,25,0,0),16 }, + { IPv4(167,28,0,0),16 }, + { IPv4(167,28,10,0),24 }, + { IPv4(167,28,11,0),24 }, + { IPv4(167,28,15,0),24 }, + { IPv4(167,28,27,0),24 }, + { IPv4(167,28,28,0),24 }, + { IPv4(167,28,29,0),24 }, + { IPv4(167,28,32,0),24 }, + { IPv4(167,28,33,0),24 }, + { IPv4(167,28,37,0),24 }, + { IPv4(167,28,39,0),24 }, + { IPv4(167,28,49,0),24 }, + { IPv4(167,28,51,0),24 }, + { IPv4(167,28,52,0),24 }, + { IPv4(167,28,54,0),24 }, + { IPv4(167,28,73,0),24 }, + { IPv4(167,28,74,0),24 }, + { IPv4(167,28,92,0),24 }, + { IPv4(167,28,141,0),24 }, + { IPv4(167,28,203,0),24 }, + { IPv4(167,33,0,0),16 }, + { IPv4(167,33,21,0),24 }, + { IPv4(167,33,61,0),24 }, + { IPv4(167,33,63,0),24 }, + { IPv4(167,64,0,0),16 }, + { IPv4(167,64,43,0),24 }, + { IPv4(167,64,48,0),24 }, + { IPv4(167,64,57,0),24 }, + { IPv4(167,64,85,0),24 }, + { IPv4(167,66,0,0),16 }, + { IPv4(167,68,0,0),16 }, + { IPv4(167,77,36,0),24 }, + { IPv4(167,79,0,0),16 }, + { IPv4(167,80,246,0),24 }, + { IPv4(167,82,0,0),16 }, + { IPv4(167,83,0,0),16 }, + { IPv4(167,83,96,0),24 }, + { IPv4(167,83,98,0),24 }, + { IPv4(167,83,101,0),24 }, + { IPv4(167,86,0,0),16 }, + { IPv4(167,86,20,0),24 }, + { IPv4(167,86,34,0),24 }, + { IPv4(167,86,48,0),24 }, + { IPv4(167,86,60,0),24 }, + { IPv4(167,86,76,0),24 }, + { IPv4(167,86,98,0),24 }, + { IPv4(167,86,100,0),24 }, + { IPv4(167,89,0,0),16 }, + { IPv4(167,94,0,0),16 }, + { IPv4(167,98,0,0),16 }, + { IPv4(167,107,0,0),16 }, + { IPv4(167,115,0,0),16 }, + { IPv4(167,115,0,0),17 }, + { IPv4(167,120,0,0),16 }, + { IPv4(167,121,0,0),16 }, + { IPv4(167,127,0,0),16 }, + { IPv4(167,127,160,0),21 }, + { IPv4(167,132,0,0),16 }, + { IPv4(167,136,0,0),16 }, + { IPv4(167,136,25,0),24 }, + { IPv4(167,136,35,0),24 }, + { IPv4(167,136,225,0),24 }, + { IPv4(167,136,235,0),24 }, + { IPv4(167,140,0,0),16 }, + { IPv4(167,142,0,0),16 }, + { IPv4(167,147,0,0),16 }, + { IPv4(167,150,0,0),16 }, + { IPv4(167,151,0,0),16 }, + { IPv4(167,153,0,0),16 }, + { IPv4(167,154,0,0),16 }, + { IPv4(167,157,0,0),16 }, + { IPv4(167,160,188,0),23 }, + { IPv4(167,160,212,0),24 }, + { IPv4(167,160,246,0),24 }, + { IPv4(167,160,247,0),24 }, + { IPv4(167,165,0,0),16 }, + { IPv4(167,166,0,0),16 }, + { IPv4(167,167,0,0),16 }, + { IPv4(167,177,0,0),16 }, + { IPv4(167,185,0,0),16 }, + { IPv4(167,186,249,0),24 }, + { IPv4(167,187,0,0),16 }, + { IPv4(167,192,0,0),13 }, + { IPv4(167,200,0,0),16 }, + { IPv4(167,211,0,0),16 }, + { IPv4(167,216,0,0),17 }, + { IPv4(167,216,0,0),16 }, + { IPv4(167,216,128,0),17 }, + { IPv4(167,230,42,0),24 }, + { IPv4(167,232,0,0),16 }, + { IPv4(167,234,0,0),16 }, + { IPv4(167,236,0,0),16 }, + { IPv4(167,239,0,0),16 }, + { IPv4(167,239,176,0),24 }, + { IPv4(167,239,192,0),20 }, + { IPv4(167,239,208,0),20 }, + { IPv4(167,242,0,0),16 }, + { IPv4(167,248,0,0),16 }, + { IPv4(167,252,0,0),16 }, + { IPv4(167,253,0,0),16 }, + { IPv4(168,16,0,0),15 }, + { IPv4(168,18,0,0),15 }, + { IPv4(168,20,0,0),15 }, + { IPv4(168,22,0,0),15 }, + { IPv4(168,24,0,0),15 }, + { IPv4(168,26,0,0),15 }, + { IPv4(168,28,0,0),15 }, + { IPv4(168,30,0,0),15 }, + { IPv4(168,32,0,0),12 }, + { IPv4(168,48,0,0),13 }, + { IPv4(168,56,0,0),14 }, + { IPv4(168,60,0,0),16 }, + { IPv4(168,69,0,0),16 }, + { IPv4(168,73,128,0),17 }, + { IPv4(168,75,0,0),24 }, + { IPv4(168,75,0,0),18 }, + { IPv4(168,75,0,0),16 }, + { IPv4(168,78,0,0),16 }, + { IPv4(168,80,0,0),15 }, + { IPv4(168,84,0,0),16 }, + { IPv4(168,88,224,0),24 }, + { IPv4(168,89,0,0),16 }, + { IPv4(168,95,0,0),16 }, + { IPv4(168,97,0,0),16 }, + { IPv4(168,97,0,0),17 }, + { IPv4(168,97,128,0),17 }, + { IPv4(168,100,0,0),16 }, + { IPv4(168,111,0,0),16 }, + { IPv4(168,115,0,0),16 }, + { IPv4(168,120,0,0),16 }, + { IPv4(168,126,0,0),16 }, + { IPv4(168,126,27,0),24 }, + { IPv4(168,126,60,0),24 }, + { IPv4(168,126,61,0),24 }, + { IPv4(168,126,120,0),24 }, + { IPv4(168,126,120,0),22 }, + { IPv4(168,126,121,0),24 }, + { IPv4(168,126,122,0),24 }, + { IPv4(168,126,123,0),24 }, + { IPv4(168,126,167,0),24 }, + { IPv4(168,126,212,0),24 }, + { IPv4(168,131,0,0),16 }, + { IPv4(168,135,0,0),16 }, + { IPv4(168,142,0,0),16 }, + { IPv4(168,143,0,0),16 }, + { IPv4(168,149,0,0),16 }, + { IPv4(168,151,0,0),16 }, + { IPv4(168,154,0,0),16 }, + { IPv4(168,158,0,0),16 }, + { IPv4(168,160,0,0),16 }, + { IPv4(168,164,0,0),16 }, + { IPv4(168,165,0,0),16 }, + { IPv4(168,166,0,0),16 }, + { IPv4(168,167,0,0),16 }, + { IPv4(168,168,0,0),16 }, + { IPv4(168,170,0,0),16 }, + { IPv4(168,171,0,0),16 }, + { IPv4(168,172,0,0),16 }, + { IPv4(168,173,0,0),16 }, + { IPv4(168,174,0,0),16 }, + { IPv4(168,177,0,0),16 }, + { IPv4(168,178,0,0),16 }, + { IPv4(168,179,0,0),16 }, + { IPv4(168,180,0,0),16 }, + { IPv4(168,183,0,0),16 }, + { IPv4(168,186,0,0),16 }, + { IPv4(168,188,0,0),16 }, + { IPv4(168,200,0,0),16 }, + { IPv4(168,200,2,0),24 }, + { IPv4(168,203,0,0),16 }, + { IPv4(168,205,0,0),16 }, + { IPv4(168,208,0,0),16 }, + { IPv4(168,209,0,0),16 }, + { IPv4(168,210,0,0),16 }, + { IPv4(168,210,1,0),24 }, + { IPv4(168,210,40,0),23 }, + { IPv4(168,210,50,0),24 }, + { IPv4(168,210,68,0),22 }, + { IPv4(168,210,100,0),24 }, + { IPv4(168,210,128,0),17 }, + { IPv4(168,215,81,0),24 }, + { IPv4(168,215,104,0),24 }, + { IPv4(168,215,106,0),23 }, + { IPv4(168,215,108,0),23 }, + { IPv4(168,215,113,0),24 }, + { IPv4(168,215,167,0),24 }, + { IPv4(168,215,224,0),20 }, + { IPv4(168,215,234,0),23 }, + { IPv4(168,215,236,0),23 }, + { IPv4(168,220,0,0),16 }, + { IPv4(168,224,1,0),24 }, + { IPv4(168,226,0,0),16 }, + { IPv4(168,230,0,0),16 }, + { IPv4(168,230,128,0),19 }, + { IPv4(168,231,0,0),16 }, + { IPv4(168,234,52,0),24 }, + { IPv4(168,234,53,0),24 }, + { IPv4(168,234,54,0),24 }, + { IPv4(168,234,55,0),24 }, + { IPv4(168,234,56,0),24 }, + { IPv4(168,234,57,0),24 }, + { IPv4(168,234,58,0),24 }, + { IPv4(168,234,59,0),24 }, + { IPv4(168,234,60,0),24 }, + { IPv4(168,234,61,0),24 }, + { IPv4(168,234,62,0),24 }, + { IPv4(168,234,92,0),24 }, + { IPv4(168,241,0,0),16 }, + { IPv4(168,243,80,0),24 }, + { IPv4(168,243,81,0),24 }, + { IPv4(168,243,176,0),21 }, + { IPv4(168,243,184,0),21 }, + { IPv4(168,243,224,0),20 }, + { IPv4(168,243,231,0),24 }, + { IPv4(168,244,0,0),16 }, + { IPv4(168,248,0,0),15 }, + { IPv4(169,4,0,0),14 }, + { IPv4(169,71,20,0),24 }, + { IPv4(169,71,80,0),24 }, + { IPv4(169,71,97,0),24 }, + { IPv4(169,100,0,0),19 }, + { IPv4(169,130,0,0),16 }, + { IPv4(169,131,0,0),16 }, + { IPv4(169,132,0,0),16 }, + { IPv4(169,133,0,0),16 }, + { IPv4(169,138,0,0),16 }, + { IPv4(169,140,0,0),18 }, + { IPv4(169,140,64,0),18 }, + { IPv4(169,140,128,0),18 }, + { IPv4(169,140,192,0),18 }, + { IPv4(169,142,0,0),16 }, + { IPv4(169,144,0,0),16 }, + { IPv4(169,146,0,0),16 }, + { IPv4(169,149,0,0),16 }, + { IPv4(169,150,0,0),16 }, + { IPv4(169,152,0,0),16 }, + { IPv4(169,153,128,0),24 }, + { IPv4(169,153,130,0),24 }, + { IPv4(169,153,134,0),24 }, + { IPv4(169,154,0,0),16 }, + { IPv4(169,155,0,0),16 }, + { IPv4(169,156,0,0),16 }, + { IPv4(169,157,0,0),16 }, + { IPv4(169,197,0,0),18 }, + { IPv4(169,205,0,0),16 }, + { IPv4(169,206,0,0),16 }, + { IPv4(169,226,0,0),16 }, + { IPv4(169,228,64,0),19 }, + { IPv4(169,228,112,0),20 }, + { IPv4(169,228,128,0),19 }, + { IPv4(169,228,160,0),20 }, + { IPv4(169,229,0,0),16 }, + { IPv4(169,232,0,0),16 }, + { IPv4(169,233,0,0),16 }, + { IPv4(169,237,0,0),16 }, + { IPv4(170,2,0,0),16 }, + { IPv4(170,3,0,0),16 }, + { IPv4(170,5,0,0),16 }, + { IPv4(170,9,64,0),18 }, + { IPv4(170,9,192,0),18 }, + { IPv4(170,11,0,0),16 }, + { IPv4(170,16,0,0),17 }, + { IPv4(170,16,0,0),16 }, + { IPv4(170,16,0,0),23 }, + { IPv4(170,16,8,0),22 }, + { IPv4(170,16,14,0),23 }, + { IPv4(170,16,128,0),17 }, + { IPv4(170,17,0,0),16 }, + { IPv4(170,20,0,0),18 }, + { IPv4(170,20,64,0),19 }, + { IPv4(170,20,144,0),20 }, + { IPv4(170,20,160,0),19 }, + { IPv4(170,20,192,0),18 }, + { IPv4(170,22,0,0),16 }, + { IPv4(170,24,0,0),16 }, + { IPv4(170,25,0,0),16 }, + { IPv4(170,27,132,0),22 }, + { IPv4(170,27,201,0),24 }, + { IPv4(170,28,128,0),20 }, + { IPv4(170,31,0,0),16 }, + { IPv4(170,32,0,0),16 }, + { IPv4(170,35,0,0),16 }, + { IPv4(170,35,224,0),22 }, + { IPv4(170,35,228,0),22 }, + { IPv4(170,35,240,0),22 }, + { IPv4(170,35,244,0),22 }, + { IPv4(170,35,248,0),22 }, + { IPv4(170,35,252,0),22 }, + { IPv4(170,37,237,0),24 }, + { IPv4(170,37,238,0),24 }, + { IPv4(170,37,239,0),24 }, + { IPv4(170,38,0,0),16 }, + { IPv4(170,39,0,0),16 }, + { IPv4(170,46,0,0),16 }, + { IPv4(170,51,255,0),24 }, + { IPv4(170,54,0,0),16 }, + { IPv4(170,54,59,0),24 }, + { IPv4(170,54,240,0),24 }, + { IPv4(170,54,241,0),24 }, + { IPv4(170,55,0,0),16 }, + { IPv4(170,65,122,0),24 }, + { IPv4(170,65,123,0),24 }, + { IPv4(170,65,124,0),24 }, + { IPv4(170,65,128,0),21 }, + { IPv4(170,68,0,0),16 }, + { IPv4(170,70,0,0),16 }, + { IPv4(170,72,0,0),16 }, + { IPv4(170,85,0,0),16 }, + { IPv4(170,91,128,0),18 }, + { IPv4(170,92,0,0),16 }, + { IPv4(170,107,0,0),16 }, + { IPv4(170,108,0,0),16 }, + { IPv4(170,119,0,0),16 }, + { IPv4(170,121,0,0),16 }, + { IPv4(170,128,0,0),16 }, + { IPv4(170,128,170,0),24 }, + { IPv4(170,128,175,0),24 }, + { IPv4(170,131,0,0),19 }, + { IPv4(170,135,0,0),16 }, + { IPv4(170,138,0,0),16 }, + { IPv4(170,138,64,0),21 }, + { IPv4(170,140,0,0),16 }, + { IPv4(170,147,0,0),16 }, + { IPv4(170,152,0,0),16 }, + { IPv4(170,153,0,0),16 }, + { IPv4(170,153,0,0),17 }, + { IPv4(170,153,138,0),23 }, + { IPv4(170,153,140,0),22 }, + { IPv4(170,153,144,0),20 }, + { IPv4(170,153,160,0),19 }, + { IPv4(170,153,192,0),18 }, + { IPv4(170,159,0,0),16 }, + { IPv4(170,160,0,0),16 }, + { IPv4(170,161,0,0),16 }, + { IPv4(170,163,0,0),16 }, + { IPv4(170,165,0,0),16 }, + { IPv4(170,167,0,0),16 }, + { IPv4(170,169,46,0),24 }, + { IPv4(170,169,122,0),24 }, + { IPv4(170,178,0,0),16 }, + { IPv4(170,201,0,0),16 }, + { IPv4(170,202,0,0),16 }, + { IPv4(170,202,1,0),24 }, + { IPv4(170,202,3,0),24 }, + { IPv4(170,202,224,0),19 }, + { IPv4(170,202,224,0),24 }, + { IPv4(170,202,224,0),20 }, + { IPv4(170,202,231,0),24 }, + { IPv4(170,202,232,0),24 }, + { IPv4(170,202,233,0),24 }, + { IPv4(170,202,234,0),24 }, + { IPv4(170,202,240,0),20 }, + { IPv4(170,202,241,0),24 }, + { IPv4(170,202,242,0),24 }, + { IPv4(170,202,243,0),24 }, + { IPv4(170,202,254,0),24 }, + { IPv4(170,206,0,0),16 }, + { IPv4(170,206,0,0),19 }, + { IPv4(170,206,32,0),19 }, + { IPv4(170,206,64,0),19 }, + { IPv4(170,206,96,0),19 }, + { IPv4(170,206,128,0),19 }, + { IPv4(170,206,160,0),19 }, + { IPv4(170,206,192,0),19 }, + { IPv4(170,206,224,0),23 }, + { IPv4(170,206,226,0),23 }, + { IPv4(170,209,0,0),16 }, + { IPv4(170,210,16,0),21 }, + { IPv4(170,215,0,0),16 }, + { IPv4(170,215,0,0),18 }, + { IPv4(170,215,15,0),24 }, + { IPv4(170,215,16,0),20 }, + { IPv4(170,215,96,0),19 }, + { IPv4(170,215,128,0),20 }, + { IPv4(170,215,134,0),24 }, + { IPv4(170,215,144,0),24 }, + { IPv4(170,215,145,0),24 }, + { IPv4(170,215,147,0),24 }, + { IPv4(170,215,159,0),24 }, + { IPv4(170,215,160,0),24 }, + { IPv4(170,215,161,0),24 }, + { IPv4(170,215,162,0),24 }, + { IPv4(170,215,163,0),24 }, + { IPv4(170,215,164,0),24 }, + { IPv4(170,215,171,0),24 }, + { IPv4(170,215,175,0),24 }, + { IPv4(170,215,177,0),24 }, + { IPv4(170,215,179,0),24 }, + { IPv4(170,215,184,0),24 }, + { IPv4(170,215,185,0),24 }, + { IPv4(170,215,186,0),24 }, + { IPv4(170,215,187,0),24 }, + { IPv4(170,215,188,0),24 }, + { IPv4(170,215,192,0),18 }, + { IPv4(170,224,0,0),16 }, + { IPv4(170,224,0,0),20 }, + { IPv4(170,224,16,0),20 }, + { IPv4(170,224,240,0),20 }, + { IPv4(170,235,0,0),16 }, + { IPv4(170,236,14,0),24 }, + { IPv4(170,248,95,0),24 }, + { IPv4(170,248,97,0),24 }, + { IPv4(170,250,0,0),16 }, + { IPv4(170,252,123,0),24 }, + { IPv4(170,252,127,0),24 }, + { IPv4(170,252,188,0),24 }, + { IPv4(170,252,191,0),24 }, + { IPv4(171,27,0,0),16 }, + { IPv4(171,30,128,0),17 }, + { IPv4(171,68,0,0),14 }, + { IPv4(171,72,0,0),16 }, + { IPv4(172,128,0,0),13 }, + { IPv4(172,136,0,0),13 }, + { IPv4(172,144,0,0),13 }, + { IPv4(172,152,0,0),13 }, + { IPv4(172,160,0,0),13 }, + { IPv4(172,168,0,0),13 }, + { IPv4(172,176,0,0),14 }, + { IPv4(172,176,0,0),13 }, + { IPv4(172,180,0,0),16 }, + { IPv4(172,180,0,0),14 }, + { IPv4(172,184,0,0),13 }, + { IPv4(172,187,128,0),17 }, + { IPv4(172,188,0,0),14 }, + { IPv4(192,0,32,0),20 }, + { IPv4(192,0,34,0),24 }, + { IPv4(192,0,36,0),24 }, + { IPv4(192,5,4,0),23 }, + { IPv4(192,5,6,0),24 }, + { IPv4(192,5,7,0),24 }, + { IPv4(192,5,14,0),24 }, + { IPv4(192,5,21,0),24 }, + { IPv4(192,5,22,0),24 }, + { IPv4(192,5,23,0),24 }, + { IPv4(192,5,24,0),24 }, + { IPv4(192,5,25,0),24 }, + { IPv4(192,5,27,0),24 }, + { IPv4(192,5,38,0),24 }, + { IPv4(192,5,41,0),24 }, + { IPv4(192,5,47,0),24 }, + { IPv4(192,5,53,0),24 }, + { IPv4(192,5,54,0),23 }, + { IPv4(192,5,55,0),24 }, + { IPv4(192,5,63,0),24 }, + { IPv4(192,5,73,0),24 }, + { IPv4(192,5,100,0),24 }, + { IPv4(192,5,106,0),24 }, + { IPv4(192,5,147,0),24 }, + { IPv4(192,5,148,0),24 }, + { IPv4(192,5,156,0),24 }, + { IPv4(192,5,157,0),24 }, + { IPv4(192,5,162,0),24 }, + { IPv4(192,5,166,0),24 }, + { IPv4(192,5,170,0),23 }, + { IPv4(192,5,172,0),22 }, + { IPv4(192,5,176,0),20 }, + { IPv4(192,5,192,0),21 }, + { IPv4(192,5,200,0),23 }, + { IPv4(192,5,220,0),24 }, + { IPv4(192,5,240,0),24 }, + { IPv4(192,6,2,0),24 }, + { IPv4(192,6,3,0),24 }, + { IPv4(192,6,6,0),24 }, + { IPv4(192,6,7,0),24 }, + { IPv4(192,6,19,0),24 }, + { IPv4(192,6,21,0),24 }, + { IPv4(192,6,23,0),24 }, + { IPv4(192,6,37,0),24 }, + { IPv4(192,6,38,0),24 }, + { IPv4(192,6,39,0),24 }, + { IPv4(192,6,41,0),24 }, + { IPv4(192,6,59,0),24 }, + { IPv4(192,6,71,0),24 }, + { IPv4(192,6,77,0),24 }, + { IPv4(192,6,86,0),24 }, + { IPv4(192,6,89,0),24 }, + { IPv4(192,6,118,0),24 }, + { IPv4(192,6,120,0),24 }, + { IPv4(192,6,121,0),24 }, + { IPv4(192,6,143,0),24 }, + { IPv4(192,6,151,0),24 }, + { IPv4(192,6,202,0),24 }, + { IPv4(192,6,223,0),24 }, + { IPv4(192,8,0,0),21 }, + { IPv4(192,11,236,0),24 }, + { IPv4(192,12,3,0),24 }, + { IPv4(192,12,5,0),24 }, + { IPv4(192,12,7,0),24 }, + { IPv4(192,12,10,0),24 }, + { IPv4(192,12,15,0),24 }, + { IPv4(192,12,29,0),24 }, + { IPv4(192,12,32,0),24 }, + { IPv4(192,12,33,0),24 }, + { IPv4(192,12,65,0),24 }, + { IPv4(192,12,66,0),24 }, + { IPv4(192,12,67,0),24 }, + { IPv4(192,12,68,0),24 }, + { IPv4(192,12,69,0),24 }, + { IPv4(192,12,73,0),24 }, + { IPv4(192,12,82,0),24 }, + { IPv4(192,12,88,0),24 }, + { IPv4(192,12,89,0),24 }, + { IPv4(192,12,90,0),24 }, + { IPv4(192,12,95,0),24 }, + { IPv4(192,12,100,0),24 }, + { IPv4(192,12,123,0),24 }, + { IPv4(192,12,124,0),24 }, + { IPv4(192,12,133,0),24 }, + { IPv4(192,12,134,0),24 }, + { IPv4(192,12,135,0),24 }, + { IPv4(192,12,207,0),24 }, + { IPv4(192,12,210,0),24 }, + { IPv4(192,12,211,0),24 }, + { IPv4(192,12,237,0),24 }, + { IPv4(192,12,240,0),24 }, + { IPv4(192,16,0,0),19 }, + { IPv4(192,16,13,0),24 }, + { IPv4(192,16,167,0),24 }, + { IPv4(192,16,168,0),24 }, + { IPv4(192,16,204,0),24 }, + { IPv4(192,17,0,0),16 }, + { IPv4(192,18,16,0),22 }, + { IPv4(192,19,192,0),22 }, + { IPv4(192,19,196,0),24 }, + { IPv4(192,19,197,0),24 }, + { IPv4(192,20,2,0),24 }, + { IPv4(192,20,3,0),24 }, + { IPv4(192,20,4,0),24 }, + { IPv4(192,20,8,0),24 }, + { IPv4(192,20,11,0),24 }, + { IPv4(192,20,16,0),24 }, + { IPv4(192,20,239,0),24 }, + { IPv4(192,20,245,0),24 }, + { IPv4(192,20,246,0),24 }, + { IPv4(192,20,250,0),24 }, + { IPv4(192,20,251,0),24 }, + { IPv4(192,20,252,0),24 }, + { IPv4(192,23,144,0),24 }, + { IPv4(192,23,168,0),24 }, + { IPv4(192,24,0,0),16 }, + { IPv4(192,25,42,0),24 }, + { IPv4(192,25,46,0),24 }, + { IPv4(192,25,48,0),24 }, + { IPv4(192,25,52,0),24 }, + { IPv4(192,25,91,0),24 }, + { IPv4(192,25,96,0),23 }, + { IPv4(192,25,106,0),24 }, + { IPv4(192,25,114,0),24 }, + { IPv4(192,25,133,0),24 }, + { IPv4(192,25,139,0),24 }, + { IPv4(192,25,140,0),23 }, + { IPv4(192,25,142,0),24 }, + { IPv4(192,25,151,0),24 }, + { IPv4(192,25,155,0),24 }, + { IPv4(192,25,191,0),24 }, + { IPv4(192,25,199,0),24 }, + { IPv4(192,25,204,0),24 }, + { IPv4(192,25,206,0),24 }, + { IPv4(192,25,214,0),24 }, + { IPv4(192,25,216,0),24 }, + { IPv4(192,25,218,0),24 }, + { IPv4(192,25,240,0),24 }, + { IPv4(192,26,10,0),24 }, + { IPv4(192,26,15,0),24 }, + { IPv4(192,26,85,0),24 }, + { IPv4(192,26,89,0),24 }, + { IPv4(192,26,91,0),24 }, + { IPv4(192,26,92,0),24 }, + { IPv4(192,26,147,0),24 }, + { IPv4(192,26,200,0),24 }, + { IPv4(192,26,212,0),24 }, + { IPv4(192,26,214,0),24 }, + { IPv4(192,26,244,0),23 }, + { IPv4(192,26,245,0),24 }, + { IPv4(192,26,251,0),24 }, + { IPv4(192,27,0,0),16 }, + { IPv4(192,27,56,0),24 }, + { IPv4(192,28,0,0),18 }, + { IPv4(192,28,64,0),19 }, + { IPv4(192,28,96,0),22 }, + { IPv4(192,28,254,0),24 }, + { IPv4(192,30,115,0),24 }, + { IPv4(192,31,3,0),24 }, + { IPv4(192,31,7,0),24 }, + { IPv4(192,31,16,0),24 }, + { IPv4(192,31,17,0),24 }, + { IPv4(192,31,18,0),24 }, + { IPv4(192,31,19,0),24 }, + { IPv4(192,31,20,0),24 }, + { IPv4(192,31,21,0),24 }, + { IPv4(192,31,31,0),24 }, + { IPv4(192,31,74,0),24 }, + { IPv4(192,31,80,0),24 }, + { IPv4(192,31,90,0),24 }, + { IPv4(192,31,96,0),24 }, + { IPv4(192,31,106,0),24 }, + { IPv4(192,31,112,0),24 }, + { IPv4(192,31,146,0),24 }, + { IPv4(192,31,153,0),24 }, + { IPv4(192,31,161,0),24 }, + { IPv4(192,31,174,0),24 }, + { IPv4(192,31,177,0),24 }, + { IPv4(192,31,178,0),24 }, + { IPv4(192,31,179,0),24 }, + { IPv4(192,31,238,0),23 }, + { IPv4(192,31,239,0),24 }, + { IPv4(192,31,246,0),24 }, + { IPv4(192,33,5,0),24 }, + { IPv4(192,33,6,0),23 }, + { IPv4(192,33,8,0),23 }, + { IPv4(192,33,10,0),24 }, + { IPv4(192,33,13,0),24 }, + { IPv4(192,33,14,0),24 }, + { IPv4(192,33,19,0),24 }, + { IPv4(192,33,140,0),23 }, + { IPv4(192,33,186,0),24 }, + { IPv4(192,33,240,0),24 }, + { IPv4(192,34,239,0),24 }, + { IPv4(192,35,20,0),24 }, + { IPv4(192,35,29,0),24 }, + { IPv4(192,35,44,0),24 }, + { IPv4(192,35,51,0),24 }, + { IPv4(192,35,75,0),24 }, + { IPv4(192,35,76,0),24 }, + { IPv4(192,35,82,0),24 }, + { IPv4(192,35,83,0),24 }, + { IPv4(192,35,84,0),24 }, + { IPv4(192,35,99,0),24 }, + { IPv4(192,35,105,0),24 }, + { IPv4(192,35,133,0),24 }, + { IPv4(192,35,142,0),24 }, + { IPv4(192,35,154,0),24 }, + { IPv4(192,35,156,0),24 }, + { IPv4(192,35,171,0),24 }, + { IPv4(192,35,174,0),24 }, + { IPv4(192,35,193,0),24 }, + { IPv4(192,35,208,0),24 }, + { IPv4(192,35,209,0),24 }, + { IPv4(192,35,210,0),24 }, + { IPv4(192,35,217,0),24 }, + { IPv4(192,35,218,0),24 }, + { IPv4(192,35,221,0),24 }, + { IPv4(192,35,222,0),24 }, + { IPv4(192,35,224,0),24 }, + { IPv4(192,35,225,0),24 }, + { IPv4(192,35,226,0),24 }, + { IPv4(192,35,227,0),24 }, + { IPv4(192,35,228,0),24 }, + { IPv4(192,36,95,0),24 }, + { IPv4(192,39,0,0),16 }, + { IPv4(192,39,122,0),24 }, + { IPv4(192,39,124,0),24 }, + { IPv4(192,40,16,0),22 }, + { IPv4(192,40,29,0),24 }, + { IPv4(192,40,65,0),24 }, + { IPv4(192,40,72,0),21 }, + { IPv4(192,40,80,0),24 }, + { IPv4(192,40,254,0),24 }, + { IPv4(192,41,0,0),18 }, + { IPv4(192,41,64,0),24 }, + { IPv4(192,41,70,0),24 }, + { IPv4(192,41,80,0),24 }, + { IPv4(192,41,162,0),24 }, + { IPv4(192,41,170,0),24 }, + { IPv4(192,41,197,0),24 }, + { IPv4(192,41,204,0),24 }, + { IPv4(192,41,206,0),24 }, + { IPv4(192,41,213,0),24 }, + { IPv4(192,41,214,0),24 }, + { IPv4(192,41,249,0),24 }, + { IPv4(192,42,41,0),24 }, + { IPv4(192,42,55,0),24 }, + { IPv4(192,42,70,0),24 }, + { IPv4(192,42,75,0),24 }, + { IPv4(192,42,76,0),24 }, + { IPv4(192,42,77,0),24 }, + { IPv4(192,42,78,0),24 }, + { IPv4(192,42,79,0),24 }, + { IPv4(192,42,80,0),23 }, + { IPv4(192,42,82,0),24 }, + { IPv4(192,42,93,0),24 }, + { IPv4(192,42,98,0),24 }, + { IPv4(192,42,99,0),24 }, + { IPv4(192,42,141,0),24 }, + { IPv4(192,42,142,0),24 }, + { IPv4(192,42,179,0),24 }, + { IPv4(192,42,181,0),24 }, + { IPv4(192,42,182,0),24 }, + { IPv4(192,42,238,0),24 }, + { IPv4(192,42,248,0),24 }, + { IPv4(192,43,64,0),18 }, + { IPv4(192,43,185,0),24 }, + { IPv4(192,43,197,0),24 }, + { IPv4(192,43,217,0),24 }, + { IPv4(192,43,219,0),24 }, + { IPv4(192,43,235,0),24 }, + { IPv4(192,43,240,0),24 }, + { IPv4(192,43,244,0),24 }, + { IPv4(192,43,253,0),24 }, + { IPv4(192,44,253,0),24 }, + { IPv4(192,45,155,0),24 }, + { IPv4(192,46,2,0),24 }, + { IPv4(192,46,4,0),24 }, + { IPv4(192,46,6,0),24 }, + { IPv4(192,46,47,0),24 }, + { IPv4(192,46,54,0),24 }, + { IPv4(192,46,108,0),24 }, + { IPv4(192,47,42,0),24 }, + { IPv4(192,47,44,0),24 }, + { IPv4(192,47,117,0),24 }, + { IPv4(192,47,241,0),24 }, + { IPv4(192,47,243,0),24 }, + { IPv4(192,48,33,0),24 }, + { IPv4(192,48,80,0),24 }, + { IPv4(192,48,97,0),24 }, + { IPv4(192,48,106,0),24 }, + { IPv4(192,48,125,0),24 }, + { IPv4(192,48,212,0),22 }, + { IPv4(192,48,222,0),24 }, + { IPv4(192,48,242,0),24 }, + { IPv4(192,48,245,0),24 }, + { IPv4(192,50,17,0),24 }, + { IPv4(192,50,65,0),24 }, + { IPv4(192,50,74,0),23 }, + { IPv4(192,50,76,0),23 }, + { IPv4(192,50,105,0),24 }, + { IPv4(192,50,110,0),24 }, + { IPv4(192,50,240,0),24 }, + { IPv4(192,51,41,0),24 }, + { IPv4(192,51,144,0),21 }, + { IPv4(192,51,180,0),22 }, + { IPv4(192,52,59,0),24 }, + { IPv4(192,52,83,0),24 }, + { IPv4(192,52,85,0),24 }, + { IPv4(192,52,86,0),24 }, + { IPv4(192,52,88,0),24 }, + { IPv4(192,52,89,0),24 }, + { IPv4(192,52,90,0),24 }, + { IPv4(192,52,91,0),24 }, + { IPv4(192,52,106,0),24 }, + { IPv4(192,52,117,0),24 }, + { IPv4(192,52,183,0),24 }, + { IPv4(192,52,184,0),24 }, + { IPv4(192,52,220,0),24 }, + { IPv4(192,53,35,0),24 }, + { IPv4(192,54,36,0),24 }, + { IPv4(192,54,43,0),24 }, + { IPv4(192,54,45,0),24 }, + { IPv4(192,54,129,0),24 }, + { IPv4(192,54,250,0),24 }, + { IPv4(192,54,253,0),24 }, + { IPv4(192,55,1,0),24 }, + { IPv4(192,55,87,0),24 }, + { IPv4(192,55,90,0),23 }, + { IPv4(192,55,95,0),24 }, + { IPv4(192,55,106,0),24 }, + { IPv4(192,55,120,0),24 }, + { IPv4(192,55,122,0),24 }, + { IPv4(192,55,123,0),24 }, + { IPv4(192,55,124,0),24 }, + { IPv4(192,55,133,0),24 }, + { IPv4(192,55,137,0),24 }, + { IPv4(192,55,138,0),23 }, + { IPv4(192,55,140,0),22 }, + { IPv4(192,55,144,0),20 }, + { IPv4(192,55,160,0),20 }, + { IPv4(192,55,176,0),21 }, + { IPv4(192,55,184,0),23 }, + { IPv4(192,55,186,0),24 }, + { IPv4(192,55,199,0),24 }, + { IPv4(192,55,208,0),24 }, + { IPv4(192,55,210,0),24 }, + { IPv4(192,55,214,0),24 }, + { IPv4(192,55,229,0),24 }, + { IPv4(192,55,240,0),24 }, + { IPv4(192,56,52,0),24 }, + { IPv4(192,56,191,0),24 }, + { IPv4(192,56,231,0),24 }, + { IPv4(192,58,19,0),24 }, + { IPv4(192,58,24,0),23 }, + { IPv4(192,58,36,0),24 }, + { IPv4(192,58,107,0),24 }, + { IPv4(192,58,159,0),24 }, + { IPv4(192,58,172,0),24 }, + { IPv4(192,58,181,0),24 }, + { IPv4(192,58,183,0),24 }, + { IPv4(192,58,184,0),21 }, + { IPv4(192,58,199,0),24 }, + { IPv4(192,58,212,0),24 }, + { IPv4(192,58,220,0),24 }, + { IPv4(192,58,221,0),24 }, + { IPv4(192,58,222,0),24 }, + { IPv4(192,58,223,0),24 }, + { IPv4(192,58,244,0),24 }, + { IPv4(192,63,0,0),16 }, + { IPv4(192,64,157,0),24 }, + { IPv4(192,65,97,0),24 }, + { IPv4(192,65,141,0),24 }, + { IPv4(192,65,144,0),24 }, + { IPv4(192,65,146,0),24 }, + { IPv4(192,65,153,0),24 }, + { IPv4(192,65,171,0),24 }, + { IPv4(192,65,176,0),24 }, + { IPv4(192,65,201,0),24 }, + { IPv4(192,65,202,0),24 }, + { IPv4(192,65,224,0),24 }, + { IPv4(192,65,226,0),24 }, + { IPv4(192,65,228,0),24 }, + { IPv4(192,67,13,0),24 }, + { IPv4(192,67,14,0),24 }, + { IPv4(192,67,21,0),24 }, + { IPv4(192,67,45,0),24 }, + { IPv4(192,67,48,0),24 }, + { IPv4(192,67,53,0),24 }, + { IPv4(192,67,80,0),24 }, + { IPv4(192,67,81,0),24 }, + { IPv4(192,67,82,0),24 }, + { IPv4(192,67,83,0),24 }, + { IPv4(192,67,93,0),24 }, + { IPv4(192,67,96,0),24 }, + { IPv4(192,67,107,0),24 }, + { IPv4(192,67,108,0),24 }, + { IPv4(192,67,109,0),24 }, + { IPv4(192,67,112,0),24 }, + { IPv4(192,67,113,0),24 }, + { IPv4(192,67,157,0),24 }, + { IPv4(192,67,166,0),24 }, + { IPv4(192,67,173,0),24 }, + { IPv4(192,67,209,0),24 }, + { IPv4(192,67,236,0),22 }, + { IPv4(192,67,240,0),21 }, + { IPv4(192,67,251,0),24 }, + { IPv4(192,68,22,0),24 }, + { IPv4(192,68,52,0),24 }, + { IPv4(192,68,108,0),24 }, + { IPv4(192,68,148,0),24 }, + { IPv4(192,68,162,0),24 }, + { IPv4(192,68,171,0),24 }, + { IPv4(192,68,172,0),24 }, + { IPv4(192,68,183,0),24 }, + { IPv4(192,68,189,0),24 }, + { IPv4(192,68,202,0),24 }, + { IPv4(192,68,227,0),24 }, + { IPv4(192,69,46,0),24 }, + { IPv4(192,69,66,0),24 }, + { IPv4(192,69,190,0),24 }, + { IPv4(192,70,125,0),24 }, + { IPv4(192,70,160,0),24 }, + { IPv4(192,70,162,0),24 }, + { IPv4(192,70,175,0),24 }, + { IPv4(192,70,186,0),24 }, + { IPv4(192,70,204,0),24 }, + { IPv4(192,70,211,0),24 }, + { IPv4(192,70,231,0),24 }, + { IPv4(192,70,236,0),24 }, + { IPv4(192,70,237,0),24 }, + { IPv4(192,70,239,0),24 }, + { IPv4(192,70,244,0),24 }, + { IPv4(192,70,245,0),24 }, + { IPv4(192,70,249,0),24 }, + { IPv4(192,71,115,0),24 }, + { IPv4(192,71,129,0),24 }, + { IPv4(192,71,130,0),24 }, + { IPv4(192,71,199,0),24 }, + { IPv4(192,71,213,0),24 }, + { IPv4(192,72,0,0),16 }, + { IPv4(192,72,80,0),23 }, + { IPv4(192,73,3,0),24 }, + { IPv4(192,73,7,0),24 }, + { IPv4(192,73,25,0),24 }, + { IPv4(192,73,26,0),23 }, + { IPv4(192,73,28,0),23 }, + { IPv4(192,73,57,0),24 }, + { IPv4(192,73,60,0),24 }, + { IPv4(192,73,62,0),24 }, + { IPv4(192,73,64,0),24 }, + { IPv4(192,73,207,0),24 }, + { IPv4(192,73,208,0),22 }, + { IPv4(192,73,212,0),24 }, + { IPv4(192,73,213,0),24 }, + { IPv4(192,73,216,0),24 }, + { IPv4(192,73,220,0),24 }, + { IPv4(192,73,228,0),24 }, + { IPv4(192,74,216,0),24 }, + { IPv4(192,75,17,0),24 }, + { IPv4(192,75,48,0),24 }, + { IPv4(192,75,49,0),24 }, + { IPv4(192,75,99,0),24 }, + { IPv4(192,75,104,0),24 }, + { IPv4(192,75,120,0),22 }, + { IPv4(192,75,131,0),24 }, + { IPv4(192,75,134,0),24 }, + { IPv4(192,75,137,0),24 }, + { IPv4(192,75,178,0),24 }, + { IPv4(192,75,238,0),24 }, + { IPv4(192,76,121,0),24 }, + { IPv4(192,76,133,0),24 }, + { IPv4(192,76,151,0),24 }, + { IPv4(192,76,175,0),24 }, + { IPv4(192,76,177,0),24 }, + { IPv4(192,76,178,0),24 }, + { IPv4(192,76,184,0),24 }, + { IPv4(192,76,237,0),24 }, + { IPv4(192,76,238,0),24 }, + { IPv4(192,76,239,0),24 }, + { IPv4(192,76,249,0),24 }, + { IPv4(192,77,9,0),24 }, + { IPv4(192,77,14,0),24 }, + { IPv4(192,77,27,0),24 }, + { IPv4(192,77,30,0),24 }, + { IPv4(192,77,31,0),24 }, + { IPv4(192,77,32,0),24 }, + { IPv4(192,77,33,0),24 }, + { IPv4(192,77,36,0),24 }, + { IPv4(192,77,40,0),24 }, + { IPv4(192,77,43,0),24 }, + { IPv4(192,77,44,0),24 }, + { IPv4(192,77,45,0),24 }, + { IPv4(192,77,77,0),24 }, + { IPv4(192,77,84,0),24 }, + { IPv4(192,77,86,0),24 }, + { IPv4(192,77,87,0),24 }, + { IPv4(192,77,88,0),24 }, + { IPv4(192,77,95,0),24 }, + { IPv4(192,77,147,0),24 }, + { IPv4(192,77,161,0),24 }, + { IPv4(192,77,173,0),24 }, + { IPv4(192,77,175,0),24 }, + { IPv4(192,77,198,0),24 }, + { IPv4(192,77,205,0),24 }, + { IPv4(192,77,209,0),24 }, + { IPv4(192,77,210,0),24 }, + { IPv4(192,78,99,0),24 }, + { IPv4(192,79,238,0),24 }, + { IPv4(192,80,12,0),22 }, + { IPv4(192,80,16,0),24 }, + { IPv4(192,80,17,0),24 }, + { IPv4(192,80,29,0),24 }, + { IPv4(192,80,30,0),24 }, + { IPv4(192,80,43,0),24 }, + { IPv4(192,80,64,0),24 }, + { IPv4(192,80,68,0),24 }, + { IPv4(192,80,211,0),24 }, + { IPv4(192,81,48,0),24 }, + { IPv4(192,81,67,0),24 }, + { IPv4(192,81,68,0),24 }, + { IPv4(192,81,69,0),24 }, + { IPv4(192,82,0,0),19 }, + { IPv4(192,82,104,0),24 }, + { IPv4(192,82,113,0),24 }, + { IPv4(192,82,115,0),24 }, + { IPv4(192,82,118,0),24 }, + { IPv4(192,82,122,0),24 }, + { IPv4(192,82,142,0),24 }, + { IPv4(192,83,111,0),24 }, + { IPv4(192,83,119,0),24 }, + { IPv4(192,83,159,0),24 }, + { IPv4(192,83,166,0),23 }, + { IPv4(192,83,168,0),21 }, + { IPv4(192,83,171,0),24 }, + { IPv4(192,83,176,0),24 }, + { IPv4(192,83,176,0),20 }, + { IPv4(192,83,180,0),24 }, + { IPv4(192,83,192,0),24 }, + { IPv4(192,83,192,0),22 }, + { IPv4(192,83,196,0),24 }, + { IPv4(192,83,203,0),24 }, + { IPv4(192,83,224,0),24 }, + { IPv4(192,83,228,0),24 }, + { IPv4(192,83,232,0),24 }, + { IPv4(192,83,242,0),24 }, + { IPv4(192,83,246,0),24 }, + { IPv4(192,83,249,0),24 }, + { IPv4(192,83,253,0),24 }, + { IPv4(192,84,8,0),24 }, + { IPv4(192,84,20,0),24 }, + { IPv4(192,84,22,0),24 }, + { IPv4(192,84,88,0),24 }, + { IPv4(192,84,119,0),24 }, + { IPv4(192,84,122,0),23 }, + { IPv4(192,84,171,0),24 }, + { IPv4(192,84,218,0),24 }, + { IPv4(192,84,221,0),24 }, + { IPv4(192,84,243,0),24 }, + { IPv4(192,84,252,0),24 }, + { IPv4(192,85,16,0),23 }, + { IPv4(192,85,241,0),24 }, + { IPv4(192,85,242,0),24 }, + { IPv4(192,86,6,0),24 }, + { IPv4(192,86,7,0),24 }, + { IPv4(192,86,8,0),24 }, + { IPv4(192,86,9,0),24 }, + { IPv4(192,86,19,0),24 }, + { IPv4(192,86,20,0),24 }, + { IPv4(192,86,21,0),24 }, + { IPv4(192,86,22,0),24 }, + { IPv4(192,86,66,0),24 }, + { IPv4(192,86,70,0),24 }, + { IPv4(192,86,71,0),24 }, + { IPv4(192,86,72,0),22 }, + { IPv4(192,86,77,0),24 }, + { IPv4(192,86,78,0),24 }, + { IPv4(192,86,80,0),24 }, + { IPv4(192,86,93,0),24 }, + { IPv4(192,86,96,0),24 }, + { IPv4(192,86,110,0),24 }, + { IPv4(192,86,112,0),21 }, + { IPv4(192,86,126,0),24 }, + { IPv4(192,86,139,0),24 }, + { IPv4(192,86,226,0),24 }, + { IPv4(192,86,228,0),24 }, + { IPv4(192,86,230,0),24 }, + { IPv4(192,86,232,0),21 }, + { IPv4(192,86,253,0),24 }, + { IPv4(192,87,176,0),24 }, + { IPv4(192,88,11,0),24 }, + { IPv4(192,88,26,0),24 }, + { IPv4(192,88,42,0),24 }, + { IPv4(192,88,87,0),24 }, + { IPv4(192,88,99,0),24 }, + { IPv4(192,88,110,0),24 }, + { IPv4(192,88,111,0),24 }, + { IPv4(192,88,112,0),24 }, + { IPv4(192,88,114,0),24 }, + { IPv4(192,88,115,0),24 }, + { IPv4(192,88,201,0),24 }, + { IPv4(192,88,205,0),24 }, + { IPv4(192,88,209,0),24 }, + { IPv4(192,88,210,0),24 }, + { IPv4(192,88,212,0),24 }, + { IPv4(192,88,248,0),24 }, + { IPv4(192,91,73,0),24 }, + { IPv4(192,91,75,0),24 }, + { IPv4(192,91,137,0),24 }, + { IPv4(192,91,138,0),24 }, + { IPv4(192,91,152,0),24 }, + { IPv4(192,91,154,0),24 }, + { IPv4(192,91,159,0),24 }, + { IPv4(192,91,171,0),24 }, + { IPv4(192,91,198,0),24 }, + { IPv4(192,91,201,0),24 }, + { IPv4(192,91,205,0),24 }, + { IPv4(192,92,22,0),24 }, + { IPv4(192,92,30,0),24 }, + { IPv4(192,92,56,0),24 }, + { IPv4(192,92,62,0),24 }, + { IPv4(192,92,63,0),24 }, + { IPv4(192,92,78,0),24 }, + { IPv4(192,92,83,0),24 }, + { IPv4(192,92,90,0),24 }, + { IPv4(192,92,92,0),24 }, + { IPv4(192,92,112,0),24 }, + { IPv4(192,92,115,0),24 }, + { IPv4(192,92,159,0),24 }, + { IPv4(192,92,167,0),24 }, + { IPv4(192,92,168,0),24 }, + { IPv4(192,92,199,0),24 }, + { IPv4(192,94,9,0),24 }, + { IPv4(192,94,38,0),23 }, + { IPv4(192,94,40,0),24 }, + { IPv4(192,94,41,0),24 }, + { IPv4(192,94,47,0),24 }, + { IPv4(192,94,52,0),24 }, + { IPv4(192,94,54,0),24 }, + { IPv4(192,94,59,0),24 }, + { IPv4(192,94,60,0),24 }, + { IPv4(192,94,61,0),24 }, + { IPv4(192,94,65,0),24 }, + { IPv4(192,94,67,0),24 }, + { IPv4(192,94,75,0),24 }, + { IPv4(192,94,94,0),24 }, + { IPv4(192,94,118,0),24 }, + { IPv4(192,94,202,0),24 }, + { IPv4(192,94,210,0),24 }, + { IPv4(192,94,233,0),24 }, + { IPv4(192,94,241,0),24 }, + { IPv4(192,94,242,0),24 }, + { IPv4(192,94,249,0),24 }, + { IPv4(192,96,1,0),24 }, + { IPv4(192,96,2,0),24 }, + { IPv4(192,96,3,0),24 }, + { IPv4(192,96,5,0),24 }, + { IPv4(192,96,6,0),23 }, + { IPv4(192,96,7,0),24 }, + { IPv4(192,96,8,0),23 }, + { IPv4(192,96,8,0),24 }, + { IPv4(192,96,10,0),24 }, + { IPv4(192,96,11,0),24 }, + { IPv4(192,96,12,0),24 }, + { IPv4(192,96,13,0),24 }, + { IPv4(192,96,14,0),24 }, + { IPv4(192,96,15,0),24 }, + { IPv4(192,96,20,0),23 }, + { IPv4(192,96,34,0),24 }, + { IPv4(192,96,36,0),23 }, + { IPv4(192,96,38,0),24 }, + { IPv4(192,96,46,0),24 }, + { IPv4(192,96,57,0),24 }, + { IPv4(192,96,74,0),24 }, + { IPv4(192,96,79,0),24 }, + { IPv4(192,96,80,0),22 }, + { IPv4(192,96,84,0),23 }, + { IPv4(192,96,89,0),24 }, + { IPv4(192,96,90,0),23 }, + { IPv4(192,96,92,0),24 }, + { IPv4(192,96,94,0),23 }, + { IPv4(192,96,106,0),24 }, + { IPv4(192,96,109,0),24 }, + { IPv4(192,96,120,0),21 }, + { IPv4(192,96,128,0),22 }, + { IPv4(192,96,133,0),24 }, + { IPv4(192,96,134,0),24 }, + { IPv4(192,96,135,0),24 }, + { IPv4(192,96,136,0),23 }, + { IPv4(192,96,139,0),24 }, + { IPv4(192,96,140,0),24 }, + { IPv4(192,96,142,0),24 }, + { IPv4(192,96,143,0),24 }, + { IPv4(192,96,145,0),24 }, + { IPv4(192,96,150,0),24 }, + { IPv4(192,96,193,0),24 }, + { IPv4(192,96,194,0),24 }, + { IPv4(192,96,246,0),24 }, + { IPv4(192,96,247,0),24 }, + { IPv4(192,96,248,0),23 }, + { IPv4(192,96,251,0),24 }, + { IPv4(192,96,252,0),24 }, + { IPv4(192,97,38,0),24 }, + { IPv4(192,100,1,0),24 }, + { IPv4(192,100,2,0),24 }, + { IPv4(192,100,4,0),24 }, + { IPv4(192,100,5,0),24 }, + { IPv4(192,100,9,0),24 }, + { IPv4(192,100,12,0),24 }, + { IPv4(192,100,16,0),24 }, + { IPv4(192,100,53,0),24 }, + { IPv4(192,100,55,0),24 }, + { IPv4(192,100,59,0),24 }, + { IPv4(192,100,65,0),24 }, + { IPv4(192,100,69,0),24 }, + { IPv4(192,100,70,0),24 }, + { IPv4(192,100,91,0),24 }, + { IPv4(192,100,92,0),24 }, + { IPv4(192,100,158,0),24 }, + { IPv4(192,100,161,0),24 }, + { IPv4(192,100,162,0),24 }, + { IPv4(192,100,163,0),24 }, + { IPv4(192,100,164,0),24 }, + { IPv4(192,100,165,0),24 }, + { IPv4(192,100,170,0),24 }, + { IPv4(192,100,172,0),24 }, + { IPv4(192,100,174,0),24 }, + { IPv4(192,100,176,0),24 }, + { IPv4(192,100,179,0),24 }, + { IPv4(192,100,180,0),24 }, + { IPv4(192,100,181,0),24 }, + { IPv4(192,100,183,0),24 }, + { IPv4(192,100,189,0),24 }, + { IPv4(192,100,190,0),24 }, + { IPv4(192,100,193,0),24 }, + { IPv4(192,100,194,0),24 }, + { IPv4(192,100,195,0),24 }, + { IPv4(192,100,196,0),24 }, + { IPv4(192,100,199,0),24 }, + { IPv4(192,100,200,0),24 }, + { IPv4(192,100,201,0),24 }, + { IPv4(192,100,204,0),24 }, + { IPv4(192,100,208,0),24 }, + { IPv4(192,100,212,0),24 }, + { IPv4(192,100,213,0),24 }, + { IPv4(192,100,218,0),24 }, + { IPv4(192,100,220,0),24 }, + { IPv4(192,100,221,0),24 }, + { IPv4(192,100,230,0),24 }, + { IPv4(192,100,234,0),24 }, + { IPv4(192,101,17,0),24 }, + { IPv4(192,101,31,0),24 }, + { IPv4(192,101,34,0),24 }, + { IPv4(192,101,42,0),24 }, + { IPv4(192,101,44,0),24 }, + { IPv4(192,101,77,0),24 }, + { IPv4(192,101,98,0),24 }, + { IPv4(192,101,100,0),22 }, + { IPv4(192,101,104,0),22 }, + { IPv4(192,101,108,0),23 }, + { IPv4(192,101,120,0),21 }, + { IPv4(192,101,128,0),22 }, + { IPv4(192,101,132,0),23 }, + { IPv4(192,101,135,0),24 }, + { IPv4(192,101,136,0),24 }, + { IPv4(192,101,138,0),24 }, + { IPv4(192,101,141,0),24 }, + { IPv4(192,101,144,0),24 }, + { IPv4(192,101,148,0),24 }, + { IPv4(192,101,150,0),23 }, + { IPv4(192,101,190,0),24 }, + { IPv4(192,101,191,0),24 }, + { IPv4(192,102,9,0),24 }, + { IPv4(192,102,10,0),24 }, + { IPv4(192,102,12,0),24 }, + { IPv4(192,102,15,0),24 }, + { IPv4(192,102,44,0),24 }, + { IPv4(192,102,90,0),24 }, + { IPv4(192,102,190,0),23 }, + { IPv4(192,102,196,0),24 }, + { IPv4(192,102,197,0),24 }, + { IPv4(192,102,198,0),24 }, + { IPv4(192,102,199,0),24 }, + { IPv4(192,102,200,0),24 }, + { IPv4(192,102,201,0),24 }, + { IPv4(192,102,202,0),24 }, + { IPv4(192,102,216,0),24 }, + { IPv4(192,102,219,0),24 }, + { IPv4(192,102,226,0),24 }, + { IPv4(192,102,230,0),24 }, + { IPv4(192,102,231,0),24 }, + { IPv4(192,102,233,0),24 }, + { IPv4(192,102,234,0),24 }, + { IPv4(192,102,236,0),24 }, + { IPv4(192,102,243,0),24 }, + { IPv4(192,102,244,0),22 }, + { IPv4(192,102,249,0),24 }, + { IPv4(192,102,253,0),24 }, + { IPv4(192,103,8,0),24 }, + { IPv4(192,103,11,0),24 }, + { IPv4(192,103,13,0),24 }, + { IPv4(192,103,41,0),24 }, + { IPv4(192,103,148,0),23 }, + { IPv4(192,103,149,0),24 }, + { IPv4(192,103,151,0),24 }, + { IPv4(192,103,152,0),24 }, + { IPv4(192,103,154,0),24 }, + { IPv4(192,103,155,0),24 }, + { IPv4(192,103,156,0),22 }, + { IPv4(192,103,158,0),23 }, + { IPv4(192,103,160,0),23 }, + { IPv4(192,103,161,0),24 }, + { IPv4(192,103,162,0),24 }, + { IPv4(192,103,175,0),24 }, + { IPv4(192,103,176,0),24 }, + { IPv4(192,103,179,0),24 }, + { IPv4(192,103,180,0),22 }, + { IPv4(192,103,182,0),23 }, + { IPv4(192,103,184,0),22 }, + { IPv4(192,103,186,0),23 }, + { IPv4(192,103,188,0),24 }, + { IPv4(192,103,190,0),23 }, + { IPv4(192,103,191,0),24 }, + { IPv4(192,103,192,0),24 }, + { IPv4(192,103,194,0),23 }, + { IPv4(192,103,196,0),22 }, + { IPv4(192,103,198,0),23 }, + { IPv4(192,103,200,0),22 }, + { IPv4(192,103,202,0),23 }, + { IPv4(192,103,204,0),22 }, + { IPv4(192,103,208,0),23 }, + { IPv4(192,103,210,0),24 }, + { IPv4(192,103,229,0),24 }, + { IPv4(192,103,230,0),23 }, + { IPv4(192,103,232,0),22 }, + { IPv4(192,103,236,0),23 }, + { IPv4(192,103,237,0),24 }, + { IPv4(192,104,1,0),24 }, + { IPv4(192,104,15,0),24 }, + { IPv4(192,104,26,0),24 }, + { IPv4(192,104,65,0),24 }, + { IPv4(192,104,79,0),24 }, + { IPv4(192,104,107,0),24 }, + { IPv4(192,104,108,0),24 }, + { IPv4(192,104,109,0),24 }, + { IPv4(192,104,110,0),24 }, + { IPv4(192,104,153,0),24 }, + { IPv4(192,104,156,0),24 }, + { IPv4(192,104,166,0),24 }, + { IPv4(192,104,171,0),24 }, + { IPv4(192,104,179,0),24 }, + { IPv4(192,104,182,0),23 }, + { IPv4(192,104,186,0),24 }, + { IPv4(192,104,187,0),24 }, + { IPv4(192,104,191,0),24 }, + { IPv4(192,104,214,0),24 }, + { IPv4(192,104,244,0),24 }, + { IPv4(192,105,49,0),24 }, + { IPv4(192,105,254,0),24 }, + { IPv4(192,106,192,0),24 }, + { IPv4(192,107,3,0),24 }, + { IPv4(192,107,28,0),24 }, + { IPv4(192,107,41,0),24 }, + { IPv4(192,107,43,0),24 }, + { IPv4(192,107,44,0),24 }, + { IPv4(192,107,45,0),24 }, + { IPv4(192,107,46,0),24 }, + { IPv4(192,107,103,0),24 }, + { IPv4(192,107,108,0),24 }, + { IPv4(192,107,111,0),24 }, + { IPv4(192,107,123,0),24 }, + { IPv4(192,107,134,0),24 }, + { IPv4(192,107,165,0),24 }, + { IPv4(192,107,166,0),24 }, + { IPv4(192,107,167,0),24 }, + { IPv4(192,107,173,0),24 }, + { IPv4(192,107,175,0),24 }, + { IPv4(192,107,189,0),24 }, + { IPv4(192,107,190,0),24 }, + { IPv4(192,107,191,0),24 }, + { IPv4(192,107,193,0),24 }, + { IPv4(192,107,195,0),24 }, + { IPv4(192,107,196,0),24 }, + { IPv4(192,108,2,0),23 }, + { IPv4(192,108,4,0),22 }, + { IPv4(192,108,8,0),21 }, + { IPv4(192,108,19,0),24 }, + { IPv4(192,108,20,0),24 }, + { IPv4(192,108,21,0),24 }, + { IPv4(192,108,98,0),24 }, + { IPv4(192,108,104,0),24 }, + { IPv4(192,108,105,0),24 }, + { IPv4(192,108,106,0),24 }, + { IPv4(192,108,124,0),24 }, + { IPv4(192,108,176,0),21 }, + { IPv4(192,108,179,0),24 }, + { IPv4(192,108,184,0),24 }, + { IPv4(192,108,186,0),24 }, + { IPv4(192,108,192,0),24 }, + { IPv4(192,108,222,0),23 }, + { IPv4(192,108,225,0),24 }, + { IPv4(192,108,235,0),24 }, + { IPv4(192,108,243,0),24 }, + { IPv4(192,109,81,0),24 }, + { IPv4(192,109,142,0),24 }, + { IPv4(192,109,199,0),24 }, + { IPv4(192,109,213,0),24 }, + { IPv4(192,109,216,0),24 }, + { IPv4(192,110,64,0),20 }, + { IPv4(192,111,36,0),24 }, + { IPv4(192,111,47,0),24 }, + { IPv4(192,111,52,0),24 }, + { IPv4(192,111,53,0),24 }, + { IPv4(192,111,89,0),24 }, + { IPv4(192,111,104,0),24 }, + { IPv4(192,111,110,0),24 }, + { IPv4(192,111,116,0),23 }, + { IPv4(192,111,116,0),24 }, + { IPv4(192,111,121,0),24 }, + { IPv4(192,111,213,0),24 }, + { IPv4(192,111,219,0),24 }, + { IPv4(192,111,221,0),24 }, + { IPv4(192,111,225,0),24 }, + { IPv4(192,111,226,0),24 }, + { IPv4(192,111,227,0),24 }, + { IPv4(192,112,3,0),24 }, + { IPv4(192,112,4,0),24 }, + { IPv4(192,112,6,0),24 }, + { IPv4(192,112,10,0),24 }, + { IPv4(192,112,12,0),24 }, + { IPv4(192,112,15,0),24 }, + { IPv4(192,112,22,0),24 }, + { IPv4(192,112,36,0),24 }, + { IPv4(192,112,38,0),24 }, + { IPv4(192,112,39,0),24 }, + { IPv4(192,112,40,0),22 }, + { IPv4(192,112,49,0),24 }, + { IPv4(192,112,50,0),24 }, + { IPv4(192,112,63,0),24 }, + { IPv4(192,112,68,0),24 }, + { IPv4(192,112,84,0),24 }, + { IPv4(192,112,138,0),24 }, + { IPv4(192,112,139,0),24 }, + { IPv4(192,112,223,0),24 }, + { IPv4(192,112,224,0),24 }, + { IPv4(192,112,225,0),24 }, + { IPv4(192,112,230,0),24 }, + { IPv4(192,112,238,0),24 }, + { IPv4(192,112,239,0),24 }, + { IPv4(192,114,10,0),24 }, + { IPv4(192,114,40,0),21 }, + { IPv4(192,114,80,0),22 }, + { IPv4(192,115,4,0),22 }, + { IPv4(192,115,16,0),20 }, + { IPv4(192,115,56,0),21 }, + { IPv4(192,115,72,0),21 }, + { IPv4(192,115,128,0),21 }, + { IPv4(192,115,176,0),22 }, + { IPv4(192,115,216,0),21 }, + { IPv4(192,115,224,0),20 }, + { IPv4(192,116,64,0),18 }, + { IPv4(192,116,128,0),18 }, + { IPv4(192,117,0,0),18 }, + { IPv4(192,117,96,0),19 }, + { IPv4(192,118,20,0),22 }, + { IPv4(192,118,28,0),22 }, + { IPv4(192,118,48,0),22 }, + { IPv4(192,118,64,0),22 }, + { IPv4(192,118,128,0),22 }, + { IPv4(192,119,135,0),24 }, + { IPv4(192,120,9,0),24 }, + { IPv4(192,120,10,0),23 }, + { IPv4(192,120,12,0),22 }, + { IPv4(192,120,55,0),24 }, + { IPv4(192,120,89,0),24 }, + { IPv4(192,120,90,0),24 }, + { IPv4(192,120,91,0),24 }, + { IPv4(192,120,107,0),24 }, + { IPv4(192,120,193,0),24 }, + { IPv4(192,121,165,0),24 }, + { IPv4(192,122,171,0),24 }, + { IPv4(192,122,173,0),24 }, + { IPv4(192,122,174,0),24 }, + { IPv4(192,122,181,0),24 }, + { IPv4(192,122,212,0),24 }, + { IPv4(192,122,213,0),24 }, + { IPv4(192,122,237,0),24 }, + { IPv4(192,122,244,0),24 }, + { IPv4(192,122,250,0),24 }, + { IPv4(192,124,20,0),24 }, + { IPv4(192,124,42,0),24 }, + { IPv4(192,124,118,0),24 }, + { IPv4(192,124,153,0),24 }, + { IPv4(192,124,154,0),24 }, + { IPv4(192,124,157,0),24 }, + { IPv4(192,124,159,0),24 }, + { IPv4(192,128,3,0),24 }, + { IPv4(192,128,52,0),24 }, + { IPv4(192,128,125,0),24 }, + { IPv4(192,128,126,0),24 }, + { IPv4(192,128,133,0),24 }, + { IPv4(192,128,134,0),24 }, + { IPv4(192,128,166,0),24 }, + { IPv4(192,128,167,0),24 }, + { IPv4(192,128,252,0),24 }, + { IPv4(192,128,254,0),24 }, + { IPv4(192,129,50,0),24 }, + { IPv4(192,129,53,0),24 }, + { IPv4(192,129,55,0),24 }, + { IPv4(192,129,64,0),24 }, + { IPv4(192,129,64,0),22 }, + { IPv4(192,129,68,0),23 }, + { IPv4(192,129,85,0),24 }, + { IPv4(192,131,86,0),24 }, + { IPv4(192,131,99,0),24 }, + { IPv4(192,131,102,0),24 }, + { IPv4(192,131,121,0),24 }, + { IPv4(192,131,129,0),24 }, + { IPv4(192,131,143,0),24 }, + { IPv4(192,131,145,0),24 }, + { IPv4(192,131,155,0),24 }, + { IPv4(192,131,181,0),24 }, + { IPv4(192,131,225,0),24 }, + { IPv4(192,131,226,0),24 }, + { IPv4(192,132,16,0),22 }, + { IPv4(192,132,39,0),24 }, + { IPv4(192,132,51,0),24 }, + { IPv4(192,132,84,0),23 }, + { IPv4(192,132,100,0),24 }, + { IPv4(192,132,206,0),24 }, + { IPv4(192,132,217,0),24 }, + { IPv4(192,132,218,0),24 }, + { IPv4(192,132,222,0),24 }, + { IPv4(192,132,223,0),24 }, + { IPv4(192,132,225,0),24 }, + { IPv4(192,132,228,0),24 }, + { IPv4(192,132,245,0),24 }, + { IPv4(192,132,247,0),24 }, + { IPv4(192,133,2,0),24 }, + { IPv4(192,133,34,0),24 }, + { IPv4(192,133,43,0),24 }, + { IPv4(192,133,51,0),24 }, + { IPv4(192,133,60,0),24 }, + { IPv4(192,133,63,0),24 }, + { IPv4(192,133,84,0),24 }, + { IPv4(192,133,100,0),24 }, + { IPv4(192,133,104,0),24 }, + { IPv4(192,133,105,0),24 }, + { IPv4(192,133,124,0),24 }, + { IPv4(192,133,144,0),20 }, + { IPv4(192,133,160,0),19 }, + { IPv4(192,133,191,0),24 }, + { IPv4(192,133,192,0),19 }, + { IPv4(192,133,224,0),20 }, + { IPv4(192,133,240,0),22 }, + { IPv4(192,133,254,0),24 }, + { IPv4(192,135,43,0),24 }, + { IPv4(192,135,50,0),24 }, + { IPv4(192,135,76,0),24 }, + { IPv4(192,135,80,0),24 }, + { IPv4(192,135,112,0),24 }, + { IPv4(192,135,113,0),24 }, + { IPv4(192,135,114,0),24 }, + { IPv4(192,135,115,0),24 }, + { IPv4(192,135,116,0),24 }, + { IPv4(192,135,118,0),24 }, + { IPv4(192,135,119,0),24 }, + { IPv4(192,135,120,0),24 }, + { IPv4(192,135,121,0),24 }, + { IPv4(192,135,122,0),24 }, + { IPv4(192,135,144,0),24 }, + { IPv4(192,135,174,0),24 }, + { IPv4(192,135,176,0),24 }, + { IPv4(192,135,181,0),24 }, + { IPv4(192,135,183,0),24 }, + { IPv4(192,135,184,0),24 }, + { IPv4(192,135,188,0),24 }, + { IPv4(192,135,189,0),24 }, + { IPv4(192,135,193,0),24 }, + { IPv4(192,135,227,0),24 }, + { IPv4(192,135,237,0),24 }, + { IPv4(192,135,238,0),24 }, + { IPv4(192,135,239,0),24 }, + { IPv4(192,135,240,0),21 }, + { IPv4(192,135,248,0),23 }, + { IPv4(192,135,250,0),24 }, + { IPv4(192,136,8,0),24 }, + { IPv4(192,136,16,0),24 }, + { IPv4(192,136,22,0),24 }, + { IPv4(192,136,32,0),23 }, + { IPv4(192,136,50,0),24 }, + { IPv4(192,136,64,0),24 }, + { IPv4(192,136,70,0),24 }, + { IPv4(192,136,112,0),24 }, + { IPv4(192,136,120,0),21 }, + { IPv4(192,136,128,0),23 }, + { IPv4(192,136,130,0),24 }, + { IPv4(192,136,133,0),24 }, + { IPv4(192,136,154,0),23 }, + { IPv4(192,137,21,0),24 }, + { IPv4(192,137,225,0),24 }, + { IPv4(192,137,252,0),24 }, + { IPv4(192,138,24,0),21 }, + { IPv4(192,138,29,0),24 }, + { IPv4(192,138,32,0),19 }, + { IPv4(192,138,35,0),24 }, + { IPv4(192,138,64,0),20 }, + { IPv4(192,138,78,0),24 }, + { IPv4(192,138,80,0),22 }, + { IPv4(192,138,85,0),24 }, + { IPv4(192,138,87,0),24 }, + { IPv4(192,138,101,0),24 }, + { IPv4(192,138,131,0),24 }, + { IPv4(192,138,170,0),24 }, + { IPv4(192,138,172,0),24 }, + { IPv4(192,138,173,0),24 }, + { IPv4(192,138,174,0),24 }, + { IPv4(192,138,176,0),23 }, + { IPv4(192,138,178,0),24 }, + { IPv4(192,138,184,0),24 }, + { IPv4(192,138,189,0),24 }, + { IPv4(192,138,191,0),24 }, + { IPv4(192,138,253,0),24 }, + { IPv4(192,139,6,0),24 }, + { IPv4(192,139,7,0),24 }, + { IPv4(192,139,23,0),24 }, + { IPv4(192,139,37,0),24 }, + { IPv4(192,139,46,0),24 }, + { IPv4(192,139,80,0),24 }, + { IPv4(192,139,81,0),24 }, + { IPv4(192,139,82,0),24 }, + { IPv4(192,139,133,0),24 }, + { IPv4(192,139,134,0),24 }, + { IPv4(192,139,135,0),24 }, + { IPv4(192,139,136,0),24 }, + { IPv4(192,139,141,0),24 }, + { IPv4(192,139,194,0),24 }, + { IPv4(192,139,195,0),24 }, + { IPv4(192,139,219,0),24 }, + { IPv4(192,139,220,0),24 }, + { IPv4(192,139,233,0),24 }, + { IPv4(192,139,234,0),24 }, + { IPv4(192,139,235,0),24 }, + { IPv4(192,139,238,0),24 }, + { IPv4(192,146,1,0),24 }, + { IPv4(192,146,2,0),24 }, + { IPv4(192,146,3,0),24 }, + { IPv4(192,146,4,0),24 }, + { IPv4(192,146,5,0),24 }, + { IPv4(192,146,25,0),24 }, + { IPv4(192,146,26,0),24 }, + { IPv4(192,146,27,0),24 }, + { IPv4(192,146,28,0),24 }, + { IPv4(192,146,29,0),24 }, + { IPv4(192,146,30,0),24 }, + { IPv4(192,146,31,0),24 }, + { IPv4(192,146,32,0),19 }, + { IPv4(192,146,64,0),19 }, + { IPv4(192,146,96,0),22 }, + { IPv4(192,146,100,0),24 }, + { IPv4(192,146,112,0),24 }, + { IPv4(192,146,150,0),24 }, + { IPv4(192,146,159,0),24 }, + { IPv4(192,146,161,0),24 }, + { IPv4(192,146,162,0),24 }, + { IPv4(192,146,183,0),24 }, + { IPv4(192,146,201,0),24 }, + { IPv4(192,146,214,0),24 }, + { IPv4(192,146,226,0),24 }, + { IPv4(192,146,254,0),24 }, + { IPv4(192,147,7,0),24 }, + { IPv4(192,147,12,0),24 }, + { IPv4(192,147,13,0),24 }, + { IPv4(192,147,35,0),24 }, + { IPv4(192,147,40,0),24 }, + { IPv4(192,147,51,0),24 }, + { IPv4(192,147,73,0),24 }, + { IPv4(192,147,160,0),20 }, + { IPv4(192,147,171,0),24 }, + { IPv4(192,147,176,0),22 }, + { IPv4(192,147,223,0),24 }, + { IPv4(192,147,233,0),24 }, + { IPv4(192,147,236,0),24 }, + { IPv4(192,147,239,0),24 }, + { IPv4(192,147,240,0),24 }, + { IPv4(192,147,242,0),24 }, + { IPv4(192,147,243,0),24 }, + { IPv4(192,147,244,0),24 }, + { IPv4(192,147,249,0),24 }, + { IPv4(192,148,93,0),24 }, + { IPv4(192,148,94,0),23 }, + { IPv4(192,148,96,0),23 }, + { IPv4(192,148,174,0),24 }, + { IPv4(192,148,195,0),24 }, + { IPv4(192,148,252,0),24 }, + { IPv4(192,148,253,0),24 }, + { IPv4(192,149,2,0),24 }, + { IPv4(192,149,18,0),24 }, + { IPv4(192,149,20,0),24 }, + { IPv4(192,149,55,0),24 }, + { IPv4(192,149,81,0),24 }, + { IPv4(192,149,89,0),24 }, + { IPv4(192,149,92,0),24 }, + { IPv4(192,149,104,0),24 }, + { IPv4(192,149,107,0),24 }, + { IPv4(192,149,108,0),24 }, + { IPv4(192,149,138,0),24 }, + { IPv4(192,149,140,0),24 }, + { IPv4(192,149,141,0),24 }, + { IPv4(192,149,142,0),24 }, + { IPv4(192,149,146,0),24 }, + { IPv4(192,149,147,0),24 }, + { IPv4(192,149,148,0),24 }, + { IPv4(192,149,151,0),24 }, + { IPv4(192,149,214,0),24 }, + { IPv4(192,149,216,0),24 }, + { IPv4(192,149,217,0),24 }, + { IPv4(192,149,231,0),24 }, + { IPv4(192,149,235,0),24 }, + { IPv4(192,149,237,0),24 }, + { IPv4(192,149,240,0),24 }, + { IPv4(192,150,14,0),24 }, + { IPv4(192,150,15,0),24 }, + { IPv4(192,150,21,0),24 }, + { IPv4(192,150,27,0),24 }, + { IPv4(192,150,28,0),24 }, + { IPv4(192,150,31,0),24 }, + { IPv4(192,150,32,0),21 }, + { IPv4(192,150,87,0),24 }, + { IPv4(192,150,103,0),24 }, + { IPv4(192,150,113,0),24 }, + { IPv4(192,150,123,0),24 }, + { IPv4(192,150,175,0),24 }, + { IPv4(192,150,176,0),24 }, + { IPv4(192,150,186,0),23 }, + { IPv4(192,150,199,0),24 }, + { IPv4(192,150,210,0),24 }, + { IPv4(192,150,216,0),24 }, + { IPv4(192,150,221,0),24 }, + { IPv4(192,150,224,0),24 }, + { IPv4(192,150,242,0),24 }, + { IPv4(192,150,245,0),24 }, + { IPv4(192,150,249,0),24 }, + { IPv4(192,150,250,0),23 }, + { IPv4(192,150,253,0),24 }, + { IPv4(192,151,7,0),24 }, + { IPv4(192,151,10,0),23 }, + { IPv4(192,151,30,0),24 }, + { IPv4(192,151,34,0),24 }, + { IPv4(192,151,39,0),24 }, + { IPv4(192,151,46,0),24 }, + { IPv4(192,151,110,0),24 }, + { IPv4(192,151,112,0),24 }, + { IPv4(192,152,4,0),24 }, + { IPv4(192,152,16,0),21 }, + { IPv4(192,152,43,0),24 }, + { IPv4(192,152,54,0),24 }, + { IPv4(192,152,95,0),24 }, + { IPv4(192,152,99,0),24 }, + { IPv4(192,152,102,0),24 }, + { IPv4(192,152,106,0),24 }, + { IPv4(192,152,137,0),24 }, + { IPv4(192,152,138,0),24 }, + { IPv4(192,152,183,0),24 }, + { IPv4(192,152,212,0),24 }, + { IPv4(192,152,243,0),24 }, + { IPv4(192,152,245,0),24 }, + { IPv4(192,153,10,0),24 }, + { IPv4(192,153,11,0),24 }, + { IPv4(192,153,20,0),24 }, + { IPv4(192,153,22,0),24 }, + { IPv4(192,153,23,0),24 }, + { IPv4(192,153,24,0),24 }, + { IPv4(192,153,25,0),24 }, + { IPv4(192,153,48,0),21 }, + { IPv4(192,153,51,0),24 }, + { IPv4(192,153,92,0),24 }, + { IPv4(192,153,93,0),24 }, + { IPv4(192,153,124,0),24 }, + { IPv4(192,153,132,0),22 }, + { IPv4(192,153,136,0),21 }, + { IPv4(192,153,144,0),21 }, + { IPv4(192,153,156,0),24 }, + { IPv4(192,153,157,0),24 }, + { IPv4(192,153,159,0),24 }, + { IPv4(192,153,191,0),24 }, + { IPv4(192,153,219,0),24 }, + { IPv4(192,153,244,0),23 }, + { IPv4(192,153,245,0),24 }, + { IPv4(192,153,247,0),24 }, + { IPv4(192,154,57,0),24 }, + { IPv4(192,156,0,0),19 }, + { IPv4(192,156,13,0),24 }, + { IPv4(192,156,26,0),24 }, + { IPv4(192,156,27,0),24 }, + { IPv4(192,156,32,0),19 }, + { IPv4(192,156,33,0),24 }, + { IPv4(192,156,61,0),24 }, + { IPv4(192,156,63,0),24 }, + { IPv4(192,156,64,0),24 }, + { IPv4(192,156,64,0),20 }, + { IPv4(192,156,65,0),24 }, + { IPv4(192,156,66,0),24 }, + { IPv4(192,156,67,0),24 }, + { IPv4(192,156,80,0),23 }, + { IPv4(192,156,81,0),24 }, + { IPv4(192,156,84,0),24 }, + { IPv4(192,156,86,0),23 }, + { IPv4(192,156,86,0),24 }, + { IPv4(192,156,87,0),24 }, + { IPv4(192,156,88,0),24 }, + { IPv4(192,156,88,0),21 }, + { IPv4(192,156,89,0),24 }, + { IPv4(192,156,90,0),24 }, + { IPv4(192,156,91,0),24 }, + { IPv4(192,156,93,0),24 }, + { IPv4(192,156,95,0),24 }, + { IPv4(192,156,98,0),24 }, + { IPv4(192,156,101,0),24 }, + { IPv4(192,156,133,0),24 }, + { IPv4(192,156,134,0),24 }, + { IPv4(192,156,135,0),24 }, + { IPv4(192,156,136,0),24 }, + { IPv4(192,156,166,0),24 }, + { IPv4(192,156,191,0),24 }, + { IPv4(192,156,202,0),24 }, + { IPv4(192,156,212,0),24 }, + { IPv4(192,156,214,0),24 }, + { IPv4(192,156,220,0),24 }, + { IPv4(192,156,226,0),24 }, + { IPv4(192,156,234,0),24 }, + { IPv4(192,156,243,0),24 }, + { IPv4(192,157,130,0),24 }, + { IPv4(192,158,48,0),24 }, + { IPv4(192,158,61,0),24 }, + { IPv4(192,159,13,0),24 }, + { IPv4(192,159,32,0),22 }, + { IPv4(192,159,104,0),24 }, + { IPv4(192,159,111,0),24 }, + { IPv4(192,159,130,0),24 }, + { IPv4(192,160,15,0),24 }, + { IPv4(192,160,35,0),24 }, + { IPv4(192,160,49,0),24 }, + { IPv4(192,160,53,0),24 }, + { IPv4(192,160,55,0),24 }, + { IPv4(192,160,61,0),24 }, + { IPv4(192,160,62,0),24 }, + { IPv4(192,160,69,0),24 }, + { IPv4(192,160,73,0),24 }, + { IPv4(192,160,74,0),24 }, + { IPv4(192,160,97,0),24 }, + { IPv4(192,160,98,0),23 }, + { IPv4(192,160,100,0),24 }, + { IPv4(192,160,122,0),24 }, + { IPv4(192,160,125,0),24 }, + { IPv4(192,160,129,0),24 }, + { IPv4(192,160,130,0),24 }, + { IPv4(192,160,158,0),24 }, + { IPv4(192,160,159,0),24 }, + { IPv4(192,160,165,0),24 }, + { IPv4(192,160,186,0),24 }, + { IPv4(192,160,187,0),24 }, + { IPv4(192,160,242,0),24 }, + { IPv4(192,160,243,0),24 }, + { IPv4(192,160,244,0),24 }, + { IPv4(192,161,36,0),24 }, + { IPv4(192,162,16,0),24 }, + { IPv4(192,164,72,0),21 }, + { IPv4(192,164,128,0),19 }, + { IPv4(192,164,176,0),20 }, + { IPv4(192,164,192,0),20 }, + { IPv4(192,165,188,0),24 }, + { IPv4(192,165,207,0),24 }, + { IPv4(192,169,4,0),24 }, + { IPv4(192,169,5,0),24 }, + { IPv4(192,169,39,0),24 }, + { IPv4(192,169,40,0),23 }, + { IPv4(192,169,64,0),23 }, + { IPv4(192,170,0,0),18 }, + { IPv4(192,170,64,0),19 }, + { IPv4(192,170,66,0),24 }, + { IPv4(192,170,73,0),24 }, + { IPv4(192,170,79,0),24 }, + { IPv4(192,170,96,0),19 }, + { IPv4(192,171,8,0),22 }, + { IPv4(192,171,12,0),24 }, + { IPv4(192,171,16,0),23 }, + { IPv4(192,171,80,0),20 }, + { IPv4(192,171,101,0),24 }, + { IPv4(192,171,108,0),24 }, + { IPv4(192,171,111,0),24 }, + { IPv4(192,171,113,0),24 }, + { IPv4(192,172,0,0),19 }, + { IPv4(192,172,222,0),24 }, + { IPv4(192,172,226,0),24 }, + { IPv4(192,172,241,0),24 }, + { IPv4(192,174,32,0),19 }, + { IPv4(192,175,165,0),24 }, + { IPv4(192,175,173,0),24 }, + { IPv4(192,175,182,0),23 }, + { IPv4(192,175,185,0),24 }, + { IPv4(192,175,198,0),24 }, + { IPv4(192,175,209,0),24 }, + { IPv4(192,175,253,0),24 }, + { IPv4(192,176,253,0),24 }, + { IPv4(192,187,4,0),24 }, + { IPv4(192,187,4,0),22 }, + { IPv4(192,187,128,0),17 }, + { IPv4(192,187,156,0),24 }, + { IPv4(192,187,206,0),24 }, + { IPv4(192,188,3,0),24 }, + { IPv4(192,188,4,0),24 }, + { IPv4(192,188,16,0),24 }, + { IPv4(192,188,17,0),24 }, + { IPv4(192,188,34,0),24 }, + { IPv4(192,188,35,0),24 }, + { IPv4(192,188,53,0),24 }, + { IPv4(192,188,57,0),24 }, + { IPv4(192,188,60,0),24 }, + { IPv4(192,188,70,0),24 }, + { IPv4(192,188,72,0),24 }, + { IPv4(192,188,89,0),24 }, + { IPv4(192,188,90,0),24 }, + { IPv4(192,188,96,0),24 }, + { IPv4(192,188,106,0),24 }, + { IPv4(192,188,107,0),24 }, + { IPv4(192,188,114,0),24 }, + { IPv4(192,188,136,0),24 }, + { IPv4(192,188,148,0),24 }, + { IPv4(192,188,149,0),24 }, + { IPv4(192,188,159,0),24 }, + { IPv4(192,188,193,0),24 }, + { IPv4(192,188,199,0),24 }, + { IPv4(192,188,202,0),24 }, + { IPv4(192,188,204,0),22 }, + { IPv4(192,188,208,0),20 }, + { IPv4(192,188,230,0),24 }, + { IPv4(192,188,231,0),24 }, + { IPv4(192,188,232,0),24 }, + { IPv4(192,188,238,0),23 }, + { IPv4(192,188,240,0),24 }, + { IPv4(192,188,253,0),24 }, + { IPv4(192,189,32,0),24 }, + { IPv4(192,189,44,0),24 }, + { IPv4(192,189,45,0),24 }, + { IPv4(192,189,46,0),24 }, + { IPv4(192,189,47,0),24 }, + { IPv4(192,189,48,0),24 }, + { IPv4(192,189,54,0),24 }, + { IPv4(192,189,62,0),24 }, + { IPv4(192,189,65,0),24 }, + { IPv4(192,189,74,0),24 }, + { IPv4(192,189,172,0),24 }, + { IPv4(192,189,174,0),24 }, + { IPv4(192,189,177,0),24 }, + { IPv4(192,189,184,0),22 }, + { IPv4(192,189,197,0),24 }, + { IPv4(192,189,199,0),24 }, + { IPv4(192,189,218,0),24 }, + { IPv4(192,189,226,0),24 }, + { IPv4(192,189,227,0),24 }, + { IPv4(192,189,247,0),24 }, + { IPv4(192,189,249,0),24 }, + { IPv4(192,190,12,0),24 }, + { IPv4(192,190,37,0),24 }, + { IPv4(192,190,38,0),24 }, + { IPv4(192,190,45,0),24 }, + { IPv4(192,190,60,0),24 }, + { IPv4(192,190,66,0),24 }, + { IPv4(192,190,68,0),24 }, + { IPv4(192,190,106,0),24 }, + { IPv4(192,190,109,0),24 }, + { IPv4(192,190,111,0),24 }, + { IPv4(192,190,224,0),24 }, + { IPv4(192,192,0,0),24 }, + { IPv4(192,192,0,0),16 }, + { IPv4(192,192,1,0),24 }, + { IPv4(192,192,2,0),24 }, + { IPv4(192,192,15,0),24 }, + { IPv4(192,193,44,0),24 }, + { IPv4(192,193,45,0),24 }, + { IPv4(192,193,48,0),24 }, + { IPv4(192,193,70,0),24 }, + { IPv4(192,193,74,0),24 }, + { IPv4(192,193,75,0),24 }, + { IPv4(192,193,76,0),24 }, + { IPv4(192,193,78,0),24 }, + { IPv4(192,193,79,0),24 }, + { IPv4(192,193,85,0),24 }, + { IPv4(192,193,126,0),24 }, + { IPv4(192,193,127,0),24 }, + { IPv4(192,193,192,0),24 }, + { IPv4(192,193,193,0),24 }, + { IPv4(192,193,195,0),24 }, + { IPv4(192,193,196,0),24 }, + { IPv4(192,193,208,0),24 }, + { IPv4(192,193,210,0),24 }, + { IPv4(192,193,211,0),24 }, + { IPv4(192,195,26,0),24 }, + { IPv4(192,195,30,0),24 }, + { IPv4(192,195,38,0),24 }, + { IPv4(192,195,41,0),24 }, + { IPv4(192,195,44,0),24 }, + { IPv4(192,195,49,0),24 }, + { IPv4(192,195,50,0),24 }, + { IPv4(192,195,68,0),23 }, + { IPv4(192,195,70,0),24 }, + { IPv4(192,195,85,0),24 }, + { IPv4(192,195,153,0),24 }, + { IPv4(192,195,154,0),23 }, + { IPv4(192,195,176,0),24 }, + { IPv4(192,195,177,0),24 }, + { IPv4(192,195,190,0),24 }, + { IPv4(192,195,192,0),22 }, + { IPv4(192,195,196,0),24 }, + { IPv4(192,195,243,0),24 }, + { IPv4(192,195,245,0),24 }, + { IPv4(192,197,0,0),19 }, + { IPv4(192,197,48,0),23 }, + { IPv4(192,197,50,0),24 }, + { IPv4(192,197,67,0),24 }, + { IPv4(192,197,69,0),24 }, + { IPv4(192,197,72,0),24 }, + { IPv4(192,197,76,0),24 }, + { IPv4(192,197,77,0),24 }, + { IPv4(192,197,78,0),24 }, + { IPv4(192,197,79,0),24 }, + { IPv4(192,197,82,0),24 }, + { IPv4(192,197,83,0),24 }, + { IPv4(192,197,111,0),24 }, + { IPv4(192,197,114,0),24 }, + { IPv4(192,197,115,0),24 }, + { IPv4(192,197,166,0),24 }, + { IPv4(192,197,178,0),24 }, + { IPv4(192,197,180,0),24 }, + { IPv4(192,197,181,0),24 }, + { IPv4(192,197,182,0),24 }, + { IPv4(192,197,183,0),24 }, + { IPv4(192,197,184,0),24 }, + { IPv4(192,197,186,0),24 }, + { IPv4(192,197,191,0),24 }, + { IPv4(192,197,212,0),23 }, + { IPv4(192,197,214,0),24 }, + { IPv4(192,197,243,0),24 }, + { IPv4(192,197,244,0),24 }, + { IPv4(192,197,253,0),24 }, + { IPv4(192,198,148,0),24 }, + { IPv4(192,200,2,0),24 }, + { IPv4(192,200,3,0),24 }, + { IPv4(192,200,4,0),24 }, + { IPv4(192,200,5,0),24 }, + { IPv4(192,200,6,0),24 }, + { IPv4(192,200,7,0),24 }, + { IPv4(192,203,40,0),24 }, + { IPv4(192,203,41,0),24 }, + { IPv4(192,203,43,0),24 }, + { IPv4(192,203,48,0),24 }, + { IPv4(192,203,48,0),22 }, + { IPv4(192,203,49,0),24 }, + { IPv4(192,203,50,0),24 }, + { IPv4(192,203,51,0),24 }, + { IPv4(192,203,106,0),24 }, + { IPv4(192,203,130,0),23 }, + { IPv4(192,203,132,0),24 }, + { IPv4(192,203,136,0),23 }, + { IPv4(192,203,138,0),24 }, + { IPv4(192,203,139,0),24 }, + { IPv4(192,203,140,0),22 }, + { IPv4(192,203,144,0),24 }, + { IPv4(192,203,167,0),24 }, + { IPv4(192,203,174,0),24 }, + { IPv4(192,203,178,0),24 }, + { IPv4(192,203,180,0),24 }, + { IPv4(192,203,188,0),24 }, + { IPv4(192,203,190,0),24 }, + { IPv4(192,203,191,0),24 }, + { IPv4(192,203,196,0),24 }, + { IPv4(192,203,201,0),24 }, + { IPv4(192,203,204,0),24 }, + { IPv4(192,203,206,0),24 }, + { IPv4(192,203,212,0),24 }, + { IPv4(192,203,214,0),23 }, + { IPv4(192,203,230,0),24 }, + { IPv4(192,203,247,0),24 }, + { IPv4(192,203,249,0),24 }, + { IPv4(192,204,0,0),16 }, + { IPv4(192,204,160,0),21 }, + { IPv4(192,205,31,0),24 }, + { IPv4(192,205,32,0),22 }, + { IPv4(192,205,36,0),23 }, + { IPv4(192,206,21,0),24 }, + { IPv4(192,206,50,0),24 }, + { IPv4(192,206,101,0),24 }, + { IPv4(192,206,177,0),24 }, + { IPv4(192,206,180,0),24 }, + { IPv4(192,206,185,0),24 }, + { IPv4(192,206,217,0),24 }, + { IPv4(192,206,218,0),24 }, + { IPv4(192,206,235,0),24 }, + { IPv4(192,207,13,0),24 }, + { IPv4(192,207,20,0),24 }, + { IPv4(192,207,36,0),24 }, + { IPv4(192,207,63,0),24 }, + { IPv4(192,207,69,0),24 }, + { IPv4(192,207,72,0),24 }, + { IPv4(192,207,74,0),24 }, + { IPv4(192,207,119,0),24 }, + { IPv4(192,207,133,0),24 }, + { IPv4(192,207,159,0),24 }, + { IPv4(192,207,163,0),24 }, + { IPv4(192,207,169,0),24 }, + { IPv4(192,207,179,0),24 }, + { IPv4(192,207,181,0),24 }, + { IPv4(192,207,184,0),24 }, + { IPv4(192,207,187,0),24 }, + { IPv4(192,207,207,0),24 }, + { IPv4(192,207,208,0),22 }, + { IPv4(192,207,209,0),24 }, + { IPv4(192,207,210,0),23 }, + { IPv4(192,207,212,0),24 }, + { IPv4(192,207,223,0),24 }, + { IPv4(192,207,225,0),24 }, + { IPv4(192,207,228,0),22 }, + { IPv4(192,207,233,0),24 }, + { IPv4(192,207,235,0),24 }, + { IPv4(192,208,16,0),24 }, + { IPv4(192,208,17,0),24 }, + { IPv4(192,208,18,0),24 }, + { IPv4(192,208,19,0),24 }, + { IPv4(192,208,20,0),24 }, + { IPv4(192,208,21,0),24 }, + { IPv4(192,208,22,0),24 }, + { IPv4(192,208,23,0),24 }, + { IPv4(192,208,24,0),24 }, + { IPv4(192,208,25,0),24 }, + { IPv4(192,208,26,0),24 }, + { IPv4(192,208,27,0),24 }, + { IPv4(192,208,28,0),24 }, + { IPv4(192,208,29,0),24 }, + { IPv4(192,208,30,0),23 }, + { IPv4(192,208,35,0),24 }, + { IPv4(192,208,38,0),24 }, + { IPv4(192,208,40,0),24 }, + { IPv4(192,209,117,0),24 }, + { IPv4(192,210,98,0),24 }, + { IPv4(192,211,64,0),24 }, + { IPv4(192,211,64,0),19 }, + { IPv4(192,211,66,0),24 }, + { IPv4(192,211,67,0),24 }, + { IPv4(192,211,71,0),24 }, + { IPv4(192,211,72,0),24 }, + { IPv4(192,211,75,0),24 }, + { IPv4(192,211,76,0),22 }, + { IPv4(192,211,80,0),22 }, + { IPv4(192,211,84,0),23 }, + { IPv4(192,211,88,0),24 }, + { IPv4(192,211,94,0),23 }, + { IPv4(192,211,96,0),23 }, + { IPv4(192,211,96,0),20 }, + { IPv4(192,211,102,0),23 }, + { IPv4(192,211,103,0),24 }, + { IPv4(192,211,105,0),24 }, + { IPv4(192,211,107,0),24 }, + { IPv4(192,211,110,0),23 }, + { IPv4(192,211,112,0),21 }, + { IPv4(192,211,112,0),23 }, + { IPv4(192,211,114,0),24 }, + { IPv4(192,211,116,0),24 }, + { IPv4(192,211,117,0),24 }, + { IPv4(192,211,118,0),24 }, + { IPv4(192,211,120,0),24 }, + { IPv4(192,211,120,0),22 }, + { IPv4(192,211,121,0),24 }, + { IPv4(192,211,122,0),24 }, + { IPv4(192,215,0,0),16 }, + { IPv4(192,215,1,0),24 }, + { IPv4(192,215,3,0),24 }, + { IPv4(192,215,4,0),24 }, + { IPv4(192,215,8,0),23 }, + { IPv4(192,215,11,0),24 }, + { IPv4(192,215,14,0),24 }, + { IPv4(192,215,16,0),23 }, + { IPv4(192,215,21,0),24 }, + { IPv4(192,215,22,0),23 }, + { IPv4(192,215,26,0),24 }, + { IPv4(192,215,32,0),24 }, + { IPv4(192,215,36,0),24 }, + { IPv4(192,215,48,0),24 }, + { IPv4(192,215,50,0),23 }, + { IPv4(192,215,58,0),24 }, + { IPv4(192,215,64,0),23 }, + { IPv4(192,215,70,0),23 }, + { IPv4(192,215,72,0),22 }, + { IPv4(192,215,78,0),23 }, + { IPv4(192,215,81,0),24 }, + { IPv4(192,215,101,0),24 }, + { IPv4(192,215,102,0),24 }, + { IPv4(192,215,103,0),24 }, + { IPv4(192,215,107,0),24 }, + { IPv4(192,215,120,0),24 }, + { IPv4(192,215,122,0),24 }, + { IPv4(192,215,123,0),24 }, + { IPv4(192,215,124,0),24 }, + { IPv4(192,215,140,0),22 }, + { IPv4(192,215,145,0),24 }, + { IPv4(192,215,146,0),23 }, + { IPv4(192,215,150,0),24 }, + { IPv4(192,215,160,0),23 }, + { IPv4(192,215,162,0),24 }, + { IPv4(192,215,164,0),24 }, + { IPv4(192,215,168,0),24 }, + { IPv4(192,215,169,0),24 }, + { IPv4(192,215,170,0),24 }, + { IPv4(192,215,171,0),24 }, + { IPv4(192,215,175,0),24 }, + { IPv4(192,215,176,0),24 }, + { IPv4(192,215,180,0),23 }, + { IPv4(192,215,184,0),24 }, + { IPv4(192,215,185,0),24 }, + { IPv4(192,215,191,0),24 }, + { IPv4(192,215,194,0),24 }, + { IPv4(192,215,198,0),24 }, + { IPv4(192,215,200,0),21 }, + { IPv4(192,215,212,0),24 }, + { IPv4(192,215,213,0),24 }, + { IPv4(192,215,214,0),24 }, + { IPv4(192,215,215,0),24 }, + { IPv4(192,215,216,0),23 }, + { IPv4(192,215,220,0),24 }, + { IPv4(192,215,234,0),23 }, + { IPv4(192,215,241,0),24 }, + { IPv4(192,215,248,0),24 }, + { IPv4(192,215,249,0),24 }, + { IPv4(192,215,254,0),24 }, + { IPv4(192,216,8,0),24 }, + { IPv4(192,216,44,0),24 }, + { IPv4(192,216,45,0),24 }, + { IPv4(192,216,56,0),24 }, + { IPv4(192,216,57,0),24 }, + { IPv4(192,216,61,0),24 }, + { IPv4(192,216,72,0),24 }, + { IPv4(192,216,73,0),24 }, + { IPv4(192,216,74,0),24 }, + { IPv4(192,216,79,0),24 }, + { IPv4(192,216,89,0),24 }, + { IPv4(192,216,93,0),24 }, + { IPv4(192,216,95,0),24 }, + { IPv4(192,216,139,0),24 }, + { IPv4(192,216,144,0),21 }, + { IPv4(192,216,186,0),24 }, + { IPv4(192,216,242,0),24 }, + { IPv4(192,217,0,0),16 }, + { IPv4(192,218,8,0),23 }, + { IPv4(192,218,10,0),23 }, + { IPv4(192,218,12,0),23 }, + { IPv4(192,218,14,0),24 }, + { IPv4(192,218,15,0),24 }, + { IPv4(192,218,128,0),23 }, + { IPv4(192,218,140,0),24 }, + { IPv4(192,218,151,0),24 }, + { IPv4(192,219,150,0),24 }, + { IPv4(192,220,0,0),16 }, + { IPv4(192,222,1,0),24 }, + { IPv4(192,222,2,0),23 }, + { IPv4(192,222,4,0),22 }, + { IPv4(192,222,8,0),24 }, + { IPv4(192,222,64,0),23 }, + { IPv4(192,222,64,0),19 }, + { IPv4(192,222,66,0),24 }, + { IPv4(192,222,67,0),24 }, + { IPv4(192,222,69,0),24 }, + { IPv4(192,222,70,0),24 }, + { IPv4(192,222,71,0),24 }, + { IPv4(192,222,72,0),22 }, + { IPv4(192,222,78,0),23 }, + { IPv4(192,222,80,0),24 }, + { IPv4(192,222,82,0),23 }, + { IPv4(192,222,90,0),24 }, + { IPv4(192,222,93,0),24 }, + { IPv4(192,222,94,0),24 }, + { IPv4(192,222,96,0),22 }, + { IPv4(192,222,99,0),24 }, + { IPv4(192,223,4,0),24 }, + { IPv4(192,223,6,0),24 }, + { IPv4(192,223,7,0),24 }, + { IPv4(192,223,35,0),24 }, + { IPv4(192,223,36,0),24 }, + { IPv4(192,223,37,0),24 }, + { IPv4(192,223,57,0),24 }, + { IPv4(192,223,154,0),24 }, + { IPv4(192,223,160,0),24 }, + { IPv4(192,223,161,0),24 }, + { IPv4(192,223,163,0),24 }, + { IPv4(192,223,169,0),24 }, + { IPv4(192,223,172,0),24 }, + { IPv4(192,223,174,0),24 }, + { IPv4(192,223,176,0),21 }, + { IPv4(192,223,184,0),21 }, + { IPv4(192,223,192,0),21 }, + { IPv4(192,223,200,0),24 }, + { IPv4(192,223,203,0),24 }, + { IPv4(192,223,204,0),24 }, + { IPv4(192,223,206,0),24 }, + { IPv4(192,223,207,0),24 }, + { IPv4(192,223,208,0),21 }, + { IPv4(192,223,219,0),24 }, + { IPv4(192,223,221,0),24 }, + { IPv4(192,223,222,0),24 }, + { IPv4(192,223,223,0),24 }, + { IPv4(192,223,225,0),24 }, + { IPv4(192,223,226,0),24 }, + { IPv4(192,223,227,0),24 }, + { IPv4(192,223,228,0),24 }, + { IPv4(192,223,235,0),24 }, + { IPv4(192,223,237,0),24 }, + { IPv4(192,223,241,0),24 }, + { IPv4(192,223,242,0),24 }, + { IPv4(192,223,243,0),24 }, + { IPv4(192,223,246,0),24 }, + { IPv4(192,223,248,0),21 }, + { IPv4(192,224,11,0),24 }, + { IPv4(192,225,32,0),20 }, + { IPv4(192,225,48,0),21 }, + { IPv4(192,225,56,0),24 }, + { IPv4(192,225,64,0),19 }, + { IPv4(192,227,1,0),24 }, + { IPv4(192,227,2,0),23 }, + { IPv4(192,227,4,0),22 }, + { IPv4(192,227,8,0),21 }, + { IPv4(192,228,128,0),17 }, + { IPv4(192,228,128,0),18 }, + { IPv4(192,228,192,0),19 }, + { IPv4(192,228,224,0),19 }, + { IPv4(192,229,42,0),24 }, + { IPv4(192,231,6,0),24 }, + { IPv4(192,231,31,0),24 }, + { IPv4(192,231,43,0),24 }, + { IPv4(192,231,63,0),24 }, + { IPv4(192,231,86,0),24 }, + { IPv4(192,231,90,0),24 }, + { IPv4(192,231,110,0),24 }, + { IPv4(192,231,128,0),24 }, + { IPv4(192,231,135,0),24 }, + { IPv4(192,231,139,0),24 }, + { IPv4(192,231,156,0),22 }, + { IPv4(192,231,160,0),24 }, + { IPv4(192,231,162,0),23 }, + { IPv4(192,231,164,0),24 }, + { IPv4(192,231,172,0),24 }, + { IPv4(192,231,193,0),24 }, + { IPv4(192,231,202,0),24 }, + { IPv4(192,231,214,0),24 }, + { IPv4(192,231,221,0),24 }, + { IPv4(192,231,231,0),24 }, + { IPv4(192,232,95,0),24 }, + { IPv4(192,232,117,0),24 }, + { IPv4(192,232,118,0),24 }, + { IPv4(192,232,119,0),24 }, + { IPv4(192,232,120,0),21 }, + { IPv4(192,232,120,0),24 }, + { IPv4(192,232,121,0),24 }, + { IPv4(192,233,80,0),24 }, + { IPv4(192,233,81,0),24 }, + { IPv4(192,234,14,0),23 }, + { IPv4(192,234,16,0),24 }, + { IPv4(192,234,17,0),24 }, + { IPv4(192,234,18,0),23 }, + { IPv4(192,234,20,0),24 }, + { IPv4(192,234,65,0),24 }, + { IPv4(192,234,72,0),24 }, + { IPv4(192,234,96,0),24 }, + { IPv4(192,234,101,0),24 }, + { IPv4(192,234,135,0),24 }, + { IPv4(192,234,136,0),24 }, + { IPv4(192,234,137,0),24 }, + { IPv4(192,234,140,0),24 }, + { IPv4(192,234,153,0),24 }, + { IPv4(192,234,167,0),24 }, + { IPv4(192,234,173,0),24 }, + { IPv4(192,234,175,0),24 }, + { IPv4(192,234,176,0),24 }, + { IPv4(192,234,177,0),24 }, + { IPv4(192,234,223,0),24 }, + { IPv4(192,234,235,0),24 }, + { IPv4(192,234,237,0),24 }, + { IPv4(192,234,247,0),24 }, + { IPv4(192,234,253,0),24 }, + { IPv4(192,235,0,0),20 }, + { IPv4(192,235,16,0),20 }, + { IPv4(192,237,0,0),19 }, + { IPv4(192,237,29,0),24 }, + { IPv4(192,237,32,0),19 }, + { IPv4(192,237,114,0),24 }, + { IPv4(192,237,115,0),24 }, + { IPv4(192,237,125,0),24 }, + { IPv4(192,239,13,0),24 }, + { IPv4(192,239,39,0),24 }, + { IPv4(192,239,48,0),24 }, + { IPv4(192,240,128,0),20 }, + { IPv4(192,240,135,0),24 }, + { IPv4(192,241,47,0),24 }, + { IPv4(192,243,0,0),20 }, + { IPv4(192,243,16,0),21 }, + { IPv4(192,243,173,0),24 }, + { IPv4(192,244,4,0),24 }, + { IPv4(192,244,8,0),21 }, + { IPv4(192,244,24,0),23 }, + { IPv4(192,244,75,0),24 }, + { IPv4(192,244,231,0),24 }, + { IPv4(192,244,247,0),24 }, + { IPv4(192,244,253,0),24 }, + { IPv4(192,245,19,0),24 }, + { IPv4(192,245,20,0),22 }, + { IPv4(192,245,25,0),24 }, + { IPv4(192,245,26,0),24 }, + { IPv4(192,245,27,0),24 }, + { IPv4(192,245,28,0),24 }, + { IPv4(192,245,29,0),24 }, + { IPv4(192,245,33,0),24 }, + { IPv4(192,245,36,0),24 }, + { IPv4(192,245,42,0),23 }, + { IPv4(192,245,58,0),23 }, + { IPv4(192,245,61,0),24 }, + { IPv4(192,245,81,0),24 }, + { IPv4(192,245,82,0),23 }, + { IPv4(192,245,84,0),23 }, + { IPv4(192,245,86,0),24 }, + { IPv4(192,245,88,0),24 }, + { IPv4(192,245,89,0),24 }, + { IPv4(192,245,90,0),24 }, + { IPv4(192,245,92,0),24 }, + { IPv4(192,245,95,0),24 }, + { IPv4(192,245,98,0),24 }, + { IPv4(192,245,119,0),24 }, + { IPv4(192,245,142,0),24 }, + { IPv4(192,245,153,0),24 }, + { IPv4(192,245,163,0),24 }, + { IPv4(192,245,171,0),24 }, + { IPv4(192,245,176,0),24 }, + { IPv4(192,245,179,0),24 }, + { IPv4(192,245,187,0),24 }, + { IPv4(192,245,197,0),24 }, + { IPv4(192,245,198,0),23 }, + { IPv4(192,245,200,0),21 }, + { IPv4(192,245,218,0),24 }, + { IPv4(192,245,232,0),24 }, + { IPv4(192,245,249,0),24 }, + { IPv4(192,245,250,0),24 }, + { IPv4(192,246,9,0),24 }, + { IPv4(192,246,17,0),24 }, + { IPv4(192,246,34,0),24 }, + { IPv4(192,246,69,0),24 }, + { IPv4(192,246,76,0),24 }, + { IPv4(192,246,84,0),24 }, + { IPv4(192,246,85,0),24 }, + { IPv4(192,246,88,0),24 }, + { IPv4(192,246,103,0),24 }, + { IPv4(192,246,117,0),24 }, + { IPv4(192,246,123,0),24 }, + { IPv4(192,246,150,0),24 }, + { IPv4(192,246,155,0),24 }, + { IPv4(192,246,171,0),24 }, + { IPv4(192,246,172,0),24 }, + { IPv4(192,246,218,0),24 }, + { IPv4(192,246,224,0),22 }, + { IPv4(192,246,228,0),23 }, + { IPv4(192,246,230,0),24 }, + { IPv4(192,246,231,0),24 }, + { IPv4(192,246,232,0),22 }, + { IPv4(192,247,16,0),20 }, + { IPv4(192,248,0,0),17 }, + { IPv4(192,249,24,0),24 }, + { IPv4(192,249,46,0),24 }, + { IPv4(192,249,47,0),24 }, + { IPv4(192,249,48,0),24 }, + { IPv4(192,249,49,0),24 }, + { IPv4(192,250,0,0),20 }, + { IPv4(192,250,112,0),24 }, + { IPv4(192,251,6,0),24 }, + { IPv4(192,251,7,0),24 }, + { IPv4(192,251,14,0),24 }, + { IPv4(192,251,26,0),24 }, + { IPv4(192,251,27,0),24 }, + { IPv4(192,251,28,0),24 }, + { IPv4(192,251,29,0),24 }, + { IPv4(192,251,30,0),24 }, + { IPv4(192,251,46,0),23 }, + { IPv4(192,251,66,0),23 }, + { IPv4(192,251,68,0),23 }, + { IPv4(192,251,94,0),24 }, + { IPv4(192,251,147,0),24 }, + { IPv4(192,251,193,0),24 }, + { IPv4(192,251,195,0),24 }, + { IPv4(192,251,213,0),24 }, + { IPv4(192,251,219,0),24 }, + { IPv4(192,251,220,0),22 }, + { IPv4(192,251,224,0),24 }, + { IPv4(192,252,0,0),21 }, + { IPv4(192,252,64,0),18 }, + { IPv4(192,252,76,0),24 }, + { IPv4(193,0,14,0),24 }, + { IPv4(193,0,224,0),22 }, + { IPv4(193,3,128,0),23 }, + { IPv4(193,5,2,0),24 }, + { IPv4(193,5,24,0),24 }, + { IPv4(193,5,25,0),24 }, + { IPv4(193,5,41,0),24 }, + { IPv4(193,5,68,0),23 }, + { IPv4(193,5,160,0),21 }, + { IPv4(193,5,240,0),21 }, + { IPv4(193,5,248,0),23 }, + { IPv4(193,5,255,0),24 }, + { IPv4(193,8,35,0),24 }, + { IPv4(193,8,40,0),23 }, + { IPv4(193,8,109,0),24 }, + { IPv4(193,8,197,0),24 }, + { IPv4(193,9,120,0),24 }, + { IPv4(193,9,124,0),22 }, + { IPv4(193,9,254,0),24 }, + { IPv4(193,16,48,0),20 }, + { IPv4(193,18,249,0),24 }, + { IPv4(193,22,100,0),23 }, + { IPv4(193,22,120,0),21 }, + { IPv4(193,23,134,0),24 }, + { IPv4(193,23,148,0),22 }, + { IPv4(193,23,164,0),24 }, + { IPv4(193,23,167,0),24 }, + { IPv4(193,24,16,0),21 }, + { IPv4(193,24,48,0),20 }, + { IPv4(193,24,64,0),23 }, + { IPv4(193,24,65,0),24 }, + { IPv4(193,24,66,0),24 }, + { IPv4(193,28,5,0),24 }, + { IPv4(193,28,62,0),24 }, + { IPv4(193,28,212,0),24 }, + { IPv4(193,29,230,0),24 }, + { IPv4(193,30,20,0),24 }, + { IPv4(193,30,28,0),22 }, + { IPv4(193,30,202,0),24 }, + { IPv4(193,32,17,0),24 }, + { IPv4(193,32,23,0),24 }, + { IPv4(193,32,98,0),23 }, + { IPv4(193,32,114,0),24 }, + { IPv4(193,32,208,0),23 }, + { IPv4(193,32,254,0),24 }, + { IPv4(193,34,230,0),23 }, + { IPv4(193,34,230,0),24 }, + { IPv4(193,35,182,0),23 }, + { IPv4(193,35,184,0),21 }, + { IPv4(193,35,192,0),22 }, + { IPv4(193,35,196,0),23 }, + { IPv4(193,35,255,0),24 }, + { IPv4(193,36,232,0),24 }, + { IPv4(193,37,32,0),24 }, + { IPv4(193,37,36,0),24 }, + { IPv4(193,37,69,0),24 }, + { IPv4(193,37,160,0),24 }, + { IPv4(193,38,52,0),24 }, + { IPv4(193,38,64,0),18 }, + { IPv4(193,38,168,0),24 }, + { IPv4(193,38,169,0),24 }, + { IPv4(193,39,16,0),20 }, + { IPv4(193,39,32,0),19 }, + { IPv4(193,39,64,0),23 }, + { IPv4(193,39,122,0),24 }, + { IPv4(193,39,133,0),24 }, + { IPv4(193,39,144,0),24 }, + { IPv4(193,39,246,0),24 }, + { IPv4(193,41,2,0),23 }, + { IPv4(193,41,10,0),23 }, + { IPv4(193,41,36,0),24 }, + { IPv4(193,41,56,0),22 }, + { IPv4(193,41,90,0),24 }, + { IPv4(193,41,93,0),24 }, + { IPv4(193,41,118,0),23 }, + { IPv4(193,41,128,0),22 }, + { IPv4(193,41,148,0),23 }, + { IPv4(193,41,164,0),23 }, + { IPv4(193,41,184,0),22 }, + { IPv4(193,42,128,0),22 }, + { IPv4(193,43,15,0),24 }, + { IPv4(193,46,135,0),24 }, + { IPv4(193,47,104,0),21 }, + { IPv4(193,47,112,0),20 }, + { IPv4(193,47,128,0),21 }, + { IPv4(193,53,23,0),24 }, + { IPv4(193,53,80,0),24 }, + { IPv4(193,56,127,0),24 }, + { IPv4(193,57,105,0),24 }, + { IPv4(193,57,106,0),24 }, + { IPv4(193,57,107,0),24 }, + { IPv4(193,57,109,0),24 }, + { IPv4(193,57,110,0),24 }, + { IPv4(193,58,70,0),24 }, + { IPv4(193,58,70,0),23 }, + { IPv4(193,58,71,0),24 }, + { IPv4(193,58,204,0),22 }, + { IPv4(193,58,208,0),24 }, + { IPv4(193,58,209,0),24 }, + { IPv4(193,61,112,0),22 }, + { IPv4(193,73,62,0),24 }, + { IPv4(193,73,73,0),24 }, + { IPv4(193,73,74,0),24 }, + { IPv4(193,73,75,0),24 }, + { IPv4(193,73,76,0),24 }, + { IPv4(193,73,78,0),24 }, + { IPv4(193,73,79,0),24 }, + { IPv4(193,73,80,0),24 }, + { IPv4(193,73,81,0),24 }, + { IPv4(193,73,82,0),24 }, + { IPv4(193,73,83,0),24 }, + { IPv4(193,73,84,0),24 }, + { IPv4(193,73,85,0),24 }, + { IPv4(193,73,86,0),24 }, + { IPv4(193,73,87,0),24 }, + { IPv4(193,73,88,0),24 }, + { IPv4(193,73,89,0),24 }, + { IPv4(193,73,90,0),24 }, + { IPv4(193,73,91,0),24 }, + { IPv4(193,73,92,0),24 }, + { IPv4(193,73,93,0),24 }, + { IPv4(193,73,94,0),24 }, + { IPv4(193,73,95,0),24 }, + { IPv4(193,73,96,0),24 }, + { IPv4(193,73,97,0),24 }, + { IPv4(193,73,98,0),24 }, + { IPv4(193,73,99,0),24 }, + { IPv4(193,73,100,0),24 }, + { IPv4(193,73,101,0),24 }, + { IPv4(193,73,102,0),24 }, + { IPv4(193,73,103,0),24 }, + { IPv4(193,82,158,0),24 }, + { IPv4(193,83,209,0),24 }, + { IPv4(193,83,212,0),24 }, + { IPv4(193,92,46,0),24 }, + { IPv4(193,96,28,0),24 }, + { IPv4(193,96,112,0),21 }, + { IPv4(193,96,173,0),24 }, + { IPv4(193,96,230,0),24 }, + { IPv4(193,97,96,0),20 }, + { IPv4(193,97,120,0),22 }, + { IPv4(193,97,124,0),23 }, + { IPv4(193,97,129,0),24 }, + { IPv4(193,97,184,0),24 }, + { IPv4(193,98,1,0),24 }, + { IPv4(193,98,110,0),24 }, + { IPv4(193,99,144,0),24 }, + { IPv4(193,99,145,0),24 }, + { IPv4(193,100,32,0),19 }, + { IPv4(193,100,232,0),24 }, + { IPv4(193,101,58,0),24 }, + { IPv4(193,101,67,0),24 }, + { IPv4(193,102,208,0),24 }, + { IPv4(193,102,227,0),24 }, + { IPv4(193,103,1,0),24 }, + { IPv4(193,103,2,0),23 }, + { IPv4(193,103,4,0),22 }, + { IPv4(193,103,8,0),21 }, + { IPv4(193,103,16,0),20 }, + { IPv4(193,103,32,0),19 }, + { IPv4(193,103,64,0),18 }, + { IPv4(193,108,42,0),23 }, + { IPv4(193,108,64,0),21 }, + { IPv4(193,108,91,0),24 }, + { IPv4(193,108,92,0),24 }, + { IPv4(193,108,100,0),24 }, + { IPv4(193,108,132,0),23 }, + { IPv4(193,108,148,0),22 }, + { IPv4(193,108,165,0),24 }, + { IPv4(193,108,210,0),24 }, + { IPv4(193,108,214,0),24 }, + { IPv4(193,108,232,0),23 }, + { IPv4(193,108,238,0),23 }, + { IPv4(193,108,252,0),24 }, + { IPv4(193,108,253,0),24 }, + { IPv4(193,108,254,0),24 }, + { IPv4(193,108,255,0),24 }, + { IPv4(193,109,81,0),24 }, + { IPv4(193,109,108,0),22 }, + { IPv4(193,109,116,0),24 }, + { IPv4(193,109,122,0),24 }, + { IPv4(193,109,138,0),23 }, + { IPv4(193,109,142,0),23 }, + { IPv4(193,109,215,0),24 }, + { IPv4(193,113,0,0),16 }, + { IPv4(193,113,22,0),23 }, + { IPv4(193,114,118,0),24 }, + { IPv4(193,114,233,0),24 }, + { IPv4(193,114,248,0),24 }, + { IPv4(193,117,72,0),21 }, + { IPv4(193,117,190,0),24 }, + { IPv4(193,118,16,0),20 }, + { IPv4(193,119,176,0),20 }, + { IPv4(193,122,136,4),30 }, + { IPv4(193,123,112,0),20 }, + { IPv4(193,125,78,0),23 }, + { IPv4(193,128,184,0),22 }, + { IPv4(193,131,100,0),22 }, + { IPv4(193,131,114,0),23 }, + { IPv4(193,131,127,0),24 }, + { IPv4(193,132,4,0),22 }, + { IPv4(193,132,203,0),24 }, + { IPv4(193,134,254,0),24 }, + { IPv4(193,135,104,0),23 }, + { IPv4(193,135,106,0),24 }, + { IPv4(193,135,254,0),24 }, + { IPv4(193,138,32,0),19 }, + { IPv4(193,140,192,0),20 }, + { IPv4(193,140,208,0),21 }, + { IPv4(193,141,64,0),24 }, + { IPv4(193,141,176,0),24 }, + { IPv4(193,141,182,0),24 }, + { IPv4(193,141,183,0),24 }, + { IPv4(193,141,188,0),24 }, + { IPv4(193,148,24,0),21 }, + { IPv4(193,148,32,0),22 }, + { IPv4(193,148,36,0),23 }, + { IPv4(193,148,246,0),24 }, + { IPv4(193,149,32,0),19 }, + { IPv4(193,149,192,0),18 }, + { IPv4(193,149,217,0),24 }, + { IPv4(193,150,152,0),21 }, + { IPv4(193,150,160,0),22 }, + { IPv4(193,150,164,0),24 }, + { IPv4(193,162,104,0),23 }, + { IPv4(193,164,96,0),19 }, + { IPv4(193,164,192,0),24 }, + { IPv4(193,164,194,0),23 }, + { IPv4(193,164,242,0),23 }, + { IPv4(193,168,2,0),24 }, + { IPv4(193,171,114,0),24 }, + { IPv4(193,172,0,0),15 }, + { IPv4(193,176,64,0),24 }, + { IPv4(193,176,93,0),24 }, + { IPv4(193,176,94,0),23 }, + { IPv4(193,176,136,0),21 }, + { IPv4(193,177,224,0),21 }, + { IPv4(193,178,53,0),24 }, + { IPv4(193,178,131,0),24 }, + { IPv4(193,178,132,0),24 }, + { IPv4(193,178,148,0),23 }, + { IPv4(193,178,173,0),24 }, + { IPv4(193,178,208,0),24 }, + { IPv4(193,178,219,0),24 }, + { IPv4(193,180,62,0),24 }, + { IPv4(193,181,0,0),24 }, + { IPv4(193,183,18,0),23 }, + { IPv4(193,186,93,0),24 }, + { IPv4(193,186,94,0),24 }, + { IPv4(193,186,161,0),24 }, + { IPv4(193,186,188,0),22 }, + { IPv4(193,188,32,0),20 }, + { IPv4(193,188,32,0),24 }, + { IPv4(193,188,34,0),23 }, + { IPv4(193,188,36,0),23 }, + { IPv4(193,188,40,0),21 }, + { IPv4(193,188,135,0),24 }, + { IPv4(193,188,160,0),19 }, + { IPv4(193,192,32,0),19 }, + { IPv4(193,192,64,0),19 }, + { IPv4(193,192,224,0),19 }, + { IPv4(193,192,230,0),24 }, + { IPv4(193,192,246,0),24 }, + { IPv4(193,192,249,0),24 }, + { IPv4(193,193,97,0),24 }, + { IPv4(193,193,99,0),24 }, + { IPv4(193,193,104,0),24 }, + { IPv4(193,193,106,0),24 }, + { IPv4(193,193,108,0),24 }, + { IPv4(193,193,112,0),24 }, + { IPv4(193,193,113,0),24 }, + { IPv4(193,193,121,0),24 }, + { IPv4(193,193,122,0),24 }, + { IPv4(193,193,123,0),24 }, + { IPv4(193,193,124,0),22 }, + { IPv4(193,193,161,0),24 }, + { IPv4(193,193,171,0),24 }, + { IPv4(193,193,184,0),23 }, + { IPv4(193,194,64,0),19 }, + { IPv4(193,194,64,0),24 }, + { IPv4(193,194,68,0),24 }, + { IPv4(193,194,76,0),24 }, + { IPv4(193,194,130,0),24 }, + { IPv4(193,194,136,0),24 }, + { IPv4(193,194,158,0),24 }, + { IPv4(193,195,0,0),16 }, + { IPv4(193,195,63,0),24 }, + { IPv4(193,195,234,0),24 }, + { IPv4(193,203,30,0),23 }, + { IPv4(193,203,96,0),19 }, + { IPv4(193,203,225,0),24 }, + { IPv4(193,203,226,0),24 }, + { IPv4(193,203,240,0),20 }, + { IPv4(193,218,80,0),23 }, + { IPv4(193,218,84,0),22 }, + { IPv4(193,218,88,0),22 }, + { IPv4(193,218,92,0),23 }, + { IPv4(193,218,99,0),24 }, + { IPv4(193,218,104,0),24 }, + { IPv4(193,218,121,0),24 }, + { IPv4(193,222,60,0),24 }, + { IPv4(193,226,30,0),24 }, + { IPv4(193,226,31,0),24 }, + { IPv4(193,226,32,0),24 }, + { IPv4(193,226,33,0),24 }, + { IPv4(193,226,35,0),24 }, + { IPv4(193,226,44,0),24 }, + { IPv4(193,226,54,0),24 }, + { IPv4(193,226,57,0),24 }, + { IPv4(193,226,64,0),24 }, + { IPv4(193,226,82,0),23 }, + { IPv4(193,226,83,0),24 }, + { IPv4(193,226,84,0),24 }, + { IPv4(193,226,88,0),23 }, + { IPv4(193,226,95,0),24 }, + { IPv4(193,226,98,0),24 }, + { IPv4(193,226,99,0),24 }, + { IPv4(193,226,100,0),24 }, + { IPv4(193,226,101,0),24 }, + { IPv4(193,226,103,0),24 }, + { IPv4(193,226,111,0),24 }, + { IPv4(193,227,97,0),24 }, + { IPv4(193,227,105,0),24 }, + { IPv4(193,227,106,0),24 }, + { IPv4(193,227,107,0),24 }, + { IPv4(193,228,61,0),24 }, + { IPv4(193,228,62,0),24 }, + { IPv4(193,230,0,0),17 }, + { IPv4(193,230,134,0),23 }, + { IPv4(193,230,135,0),24 }, + { IPv4(193,230,142,0),24 }, + { IPv4(193,230,145,0),24 }, + { IPv4(193,230,146,0),24 }, + { IPv4(193,230,160,0),24 }, + { IPv4(193,230,163,0),24 }, + { IPv4(193,230,166,0),24 }, + { IPv4(193,230,167,0),24 }, + { IPv4(193,230,169,0),24 }, + { IPv4(193,230,213,0),24 }, + { IPv4(193,230,232,0),24 }, + { IPv4(193,230,234,0),23 }, + { IPv4(193,230,237,0),24 }, + { IPv4(193,230,239,0),24 }, + { IPv4(193,230,242,0),24 }, + { IPv4(193,230,243,0),24 }, + { IPv4(193,230,245,0),24 }, + { IPv4(193,230,248,0),24 }, + { IPv4(193,230,253,0),24 }, + { IPv4(193,231,79,0),24 }, + { IPv4(193,231,98,0),24 }, + { IPv4(193,231,99,0),24 }, + { IPv4(193,231,109,0),24 }, + { IPv4(193,231,116,0),22 }, + { IPv4(193,231,119,0),24 }, + { IPv4(193,231,120,0),21 }, + { IPv4(193,231,122,0),24 }, + { IPv4(193,231,123,0),24 }, + { IPv4(193,231,164,0),22 }, + { IPv4(193,231,180,0),22 }, + { IPv4(193,231,204,0),24 }, + { IPv4(193,231,206,0),23 }, + { IPv4(193,231,246,0),24 }, + { IPv4(193,231,250,0),24 }, + { IPv4(193,234,220,0),23 }, + { IPv4(193,235,130,0),23 }, + { IPv4(193,235,206,0),24 }, + { IPv4(193,237,0,0),16 }, + { IPv4(193,238,0,0),16 }, + { IPv4(193,242,96,0),24 }, + { IPv4(193,242,113,0),24 }, + { IPv4(193,242,115,0),24 }, + { IPv4(193,242,116,0),24 }, + { IPv4(193,243,162,0),23 }, + { IPv4(193,243,164,0),23 }, + { IPv4(193,243,176,0),22 }, + { IPv4(193,243,180,0),23 }, + { IPv4(193,243,192,0),19 }, + { IPv4(193,243,224,0),19 }, + { IPv4(193,246,96,0),24 }, + { IPv4(193,246,101,0),24 }, + { IPv4(193,246,108,0),23 }, + { IPv4(193,246,120,0),24 }, + { IPv4(193,246,123,0),24 }, + { IPv4(193,247,48,0),23 }, + { IPv4(193,247,51,0),24 }, + { IPv4(193,247,54,0),23 }, + { IPv4(193,247,56,0),22 }, + { IPv4(193,247,68,0),22 }, + { IPv4(193,247,74,0),23 }, + { IPv4(193,247,76,0),24 }, + { IPv4(193,247,81,0),24 }, + { IPv4(193,247,87,0),24 }, + { IPv4(193,247,88,0),24 }, + { IPv4(193,247,94,0),24 }, + { IPv4(193,247,101,0),24 }, + { IPv4(193,247,133,0),24 }, + { IPv4(193,247,134,0),23 }, + { IPv4(193,247,147,0),24 }, + { IPv4(193,247,180,0),24 }, + { IPv4(193,247,183,0),24 }, + { IPv4(193,247,189,0),24 }, + { IPv4(193,247,202,0),24 }, + { IPv4(193,247,218,0),24 }, + { IPv4(193,247,219,0),24 }, + { IPv4(193,247,220,0),22 }, + { IPv4(193,247,238,0),24 }, + { IPv4(193,254,28,0),24 }, + { IPv4(193,255,106,0),24 }, + { IPv4(194,8,64,0),19 }, + { IPv4(194,8,96,0),19 }, + { IPv4(194,8,128,0),19 }, + { IPv4(194,8,228,0),22 }, + { IPv4(194,8,231,0),24 }, + { IPv4(194,8,232,0),24 }, + { IPv4(194,8,233,0),24 }, + { IPv4(194,8,234,0),24 }, + { IPv4(194,8,235,0),24 }, + { IPv4(194,8,236,0),24 }, + { IPv4(194,9,124,0),23 }, + { IPv4(194,9,126,0),24 }, + { IPv4(194,10,201,0),24 }, + { IPv4(194,13,240,0),20 }, + { IPv4(194,14,6,0),23 }, + { IPv4(194,14,80,0),24 }, + { IPv4(194,14,81,0),24 }, + { IPv4(194,14,86,0),24 }, + { IPv4(194,15,64,0),21 }, + { IPv4(194,15,72,0),22 }, + { IPv4(194,15,175,0),24 }, + { IPv4(194,15,230,0),24 }, + { IPv4(194,15,237,0),24 }, + { IPv4(194,15,243,0),24 }, + { IPv4(194,20,8,0),21 }, + { IPv4(194,20,40,0),23 }, + { IPv4(194,20,42,0),24 }, + { IPv4(194,20,44,0),22 }, + { IPv4(194,20,49,0),24 }, + { IPv4(194,20,50,0),24 }, + { IPv4(194,20,52,0),22 }, + { IPv4(194,20,56,0),23 }, + { IPv4(194,20,60,0),22 }, + { IPv4(194,20,108,0),22 }, + { IPv4(194,20,155,0),24 }, + { IPv4(194,20,199,0),24 }, + { IPv4(194,20,200,0),21 }, + { IPv4(194,20,208,0),21 }, + { IPv4(194,20,216,0),22 }, + { IPv4(194,20,222,0),24 }, + { IPv4(194,20,226,0),24 }, + { IPv4(194,20,229,0),24 }, + { IPv4(194,20,248,0),24 }, + { IPv4(194,21,4,0),22 }, + { IPv4(194,21,8,0),22 }, + { IPv4(194,21,19,0),24 }, + { IPv4(194,21,20,0),22 }, + { IPv4(194,21,25,0),24 }, + { IPv4(194,21,28,0),22 }, + { IPv4(194,29,0,0),19 }, + { IPv4(194,29,64,0),24 }, + { IPv4(194,29,65,0),24 }, + { IPv4(194,29,68,0),23 }, + { IPv4(194,29,71,0),24 }, + { IPv4(194,29,72,0),21 }, + { IPv4(194,29,97,0),24 }, + { IPv4(194,29,98,0),24 }, + { IPv4(194,29,99,0),24 }, + { IPv4(194,29,100,0),23 }, + { IPv4(194,29,102,0),23 }, + { IPv4(194,29,216,0),21 }, + { IPv4(194,30,128,0),19 }, + { IPv4(194,30,192,0),18 }, + { IPv4(194,31,16,0),20 }, + { IPv4(194,31,77,0),24 }, + { IPv4(194,31,205,0),24 }, + { IPv4(194,31,220,0),24 }, + { IPv4(194,31,227,0),24 }, + { IPv4(194,31,240,0),23 }, + { IPv4(194,31,242,0),24 }, + { IPv4(194,32,125,0),24 }, + { IPv4(194,32,126,0),23 }, + { IPv4(194,32,174,0),23 }, + { IPv4(194,32,203,0),24 }, + { IPv4(194,32,221,0),24 }, + { IPv4(194,33,47,0),24 }, + { IPv4(194,33,48,0),23 }, + { IPv4(194,34,112,0),20 }, + { IPv4(194,35,15,0),24 }, + { IPv4(194,35,252,0),24 }, + { IPv4(194,36,120,0),24 }, + { IPv4(194,36,172,0),22 }, + { IPv4(194,36,208,0),24 }, + { IPv4(194,36,219,0),24 }, + { IPv4(194,36,220,0),24 }, + { IPv4(194,36,223,0),24 }, + { IPv4(194,38,74,0),23 }, + { IPv4(194,38,76,0),22 }, + { IPv4(194,38,80,0),21 }, + { IPv4(194,38,88,0),23 }, + { IPv4(194,38,90,0),24 }, + { IPv4(194,39,121,0),24 }, + { IPv4(194,39,148,0),24 }, + { IPv4(194,39,237,0),24 }, + { IPv4(194,40,0,0),17 }, + { IPv4(194,41,1,0),24 }, + { IPv4(194,41,60,0),23 }, + { IPv4(194,41,62,0),24 }, + { IPv4(194,42,56,0),21 }, + { IPv4(194,42,128,0),19 }, + { IPv4(194,42,160,0),19 }, + { IPv4(194,42,176,0),20 }, + { IPv4(194,42,192,0),20 }, + { IPv4(194,44,16,0),24 }, + { IPv4(194,44,26,0),24 }, + { IPv4(194,44,110,0),24 }, + { IPv4(194,44,154,0),24 }, + { IPv4(194,44,186,0),24 }, + { IPv4(194,45,45,0),24 }, + { IPv4(194,45,106,0),24 }, + { IPv4(194,45,127,0),24 }, + { IPv4(194,45,184,0),24 }, + { IPv4(194,45,227,0),24 }, + { IPv4(194,45,232,0),24 }, + { IPv4(194,48,88,0),22 }, + { IPv4(194,48,124,0),22 }, + { IPv4(194,48,128,0),22 }, + { IPv4(194,48,132,0),24 }, + { IPv4(194,48,132,0),22 }, + { IPv4(194,48,136,0),22 }, + { IPv4(194,48,204,0),22 }, + { IPv4(194,48,208,0),21 }, + { IPv4(194,48,216,0),22 }, + { IPv4(194,49,21,0),24 }, + { IPv4(194,49,22,0),24 }, + { IPv4(194,49,60,0),24 }, + { IPv4(194,53,57,0),24 }, + { IPv4(194,53,92,0),24 }, + { IPv4(194,55,84,0),23 }, + { IPv4(194,55,86,0),24 }, + { IPv4(194,55,183,0),24 }, + { IPv4(194,55,246,0),23 }, + { IPv4(194,56,97,0),24 }, + { IPv4(194,56,165,0),24 }, + { IPv4(194,56,244,0),24 }, + { IPv4(194,56,250,0),23 }, + { IPv4(194,59,16,0),23 }, + { IPv4(194,59,96,0),19 }, + { IPv4(194,59,133,0),24 }, + { IPv4(194,59,152,0),23 }, + { IPv4(194,59,154,0),24 }, + { IPv4(194,59,156,0),24 }, + { IPv4(194,59,182,0),24 }, + { IPv4(194,60,98,0),24 }, + { IPv4(194,60,99,0),24 }, + { IPv4(194,60,100,0),24 }, + { IPv4(194,60,101,0),24 }, + { IPv4(194,60,102,0),24 }, + { IPv4(194,60,103,0),24 }, + { IPv4(194,60,104,0),24 }, + { IPv4(194,60,105,0),24 }, + { IPv4(194,60,106,0),23 }, + { IPv4(194,60,108,0),22 }, + { IPv4(194,61,41,0),24 }, + { IPv4(194,61,49,0),24 }, + { IPv4(194,61,63,0),24 }, + { IPv4(194,61,173,0),24 }, + { IPv4(194,61,230,0),24 }, + { IPv4(194,62,124,0),24 }, + { IPv4(194,63,0,0),17 }, + { IPv4(194,64,125,0),24 }, + { IPv4(194,64,151,0),24 }, + { IPv4(194,68,8,0),22 }, + { IPv4(194,68,12,0),24 }, + { IPv4(194,68,56,0),24 }, + { IPv4(194,68,102,0),24 }, + { IPv4(194,68,222,0),24 }, + { IPv4(194,69,16,0),20 }, + { IPv4(194,69,32,0),19 }, + { IPv4(194,69,169,0),24 }, + { IPv4(194,69,181,0),24 }, + { IPv4(194,69,182,0),24 }, + { IPv4(194,69,253,0),24 }, + { IPv4(194,70,0,0),16 }, + { IPv4(194,71,222,0),24 }, + { IPv4(194,72,0,0),14 }, + { IPv4(194,72,154,0),24 }, + { IPv4(194,73,16,0),21 }, + { IPv4(194,73,24,0),21 }, + { IPv4(194,73,74,0),24 }, + { IPv4(194,73,84,0),24 }, + { IPv4(194,73,85,0),24 }, + { IPv4(194,73,86,0),24 }, + { IPv4(194,73,94,0),23 }, + { IPv4(194,73,144,0),24 }, + { IPv4(194,73,228,0),23 }, + { IPv4(194,74,80,0),21 }, + { IPv4(194,74,88,0),21 }, + { IPv4(194,74,96,0),21 }, + { IPv4(194,74,104,0),21 }, + { IPv4(194,74,108,0),24 }, + { IPv4(194,74,111,0),24 }, + { IPv4(194,74,112,0),21 }, + { IPv4(194,74,120,0),21 }, + { IPv4(194,74,128,0),21 }, + { IPv4(194,74,136,0),21 }, + { IPv4(194,74,152,0),21 }, + { IPv4(194,74,160,0),19 }, + { IPv4(194,75,0,0),20 }, + { IPv4(194,75,16,0),21 }, + { IPv4(194,75,24,0),21 }, + { IPv4(194,75,26,0),24 }, + { IPv4(194,75,27,0),24 }, + { IPv4(194,75,40,0),21 }, + { IPv4(194,75,44,0),23 }, + { IPv4(194,75,48,0),21 }, + { IPv4(194,75,64,0),20 }, + { IPv4(194,75,80,0),21 }, + { IPv4(194,75,88,0),21 }, + { IPv4(194,75,112,0),21 }, + { IPv4(194,75,120,0),21 }, + { IPv4(194,75,178,0),24 }, + { IPv4(194,75,192,0),21 }, + { IPv4(194,75,200,0),21 }, + { IPv4(194,75,208,0),21 }, + { IPv4(194,75,216,0),21 }, + { IPv4(194,75,220,0),23 }, + { IPv4(194,76,40,0),24 }, + { IPv4(194,76,45,0),24 }, + { IPv4(194,76,146,0),24 }, + { IPv4(194,76,240,0),24 }, + { IPv4(194,77,0,0),16 }, + { IPv4(194,77,20,0),24 }, + { IPv4(194,77,21,0),24 }, + { IPv4(194,77,24,0),23 }, + { IPv4(194,77,26,0),23 }, + { IPv4(194,77,28,0),24 }, + { IPv4(194,77,71,0),24 }, + { IPv4(194,77,76,0),24 }, + { IPv4(194,77,82,0),24 }, + { IPv4(194,77,90,0),24 }, + { IPv4(194,77,97,0),24 }, + { IPv4(194,77,139,0),24 }, + { IPv4(194,77,153,0),24 }, + { IPv4(194,77,161,0),24 }, + { IPv4(194,77,213,0),24 }, + { IPv4(194,77,253,0),24 }, + { IPv4(194,85,23,0),24 }, + { IPv4(194,85,48,0),21 }, + { IPv4(194,85,56,0),24 }, + { IPv4(194,85,57,0),24 }, + { IPv4(194,88,58,0),24 }, + { IPv4(194,88,128,0),19 }, + { IPv4(194,93,50,0),24 }, + { IPv4(194,93,192,0),18 }, + { IPv4(194,96,0,0),16 }, + { IPv4(194,97,0,0),16 }, + { IPv4(194,97,120,0),21 }, + { IPv4(194,97,128,0),19 }, + { IPv4(194,97,168,0),24 }, + { IPv4(194,99,111,0),24 }, + { IPv4(194,99,115,0),24 }, + { IPv4(194,101,64,0),21 }, + { IPv4(194,101,72,0),22 }, + { IPv4(194,101,76,0),23 }, + { IPv4(194,101,78,0),24 }, + { IPv4(194,102,0,0),19 }, + { IPv4(194,102,16,0),24 }, + { IPv4(194,102,79,0),24 }, + { IPv4(194,102,99,0),24 }, + { IPv4(194,102,114,0),24 }, + { IPv4(194,102,120,0),22 }, + { IPv4(194,102,127,0),24 }, + { IPv4(194,102,131,0),24 }, + { IPv4(194,102,144,0),22 }, + { IPv4(194,102,148,0),24 }, + { IPv4(194,102,170,0),24 }, + { IPv4(194,102,172,0),24 }, + { IPv4(194,102,173,0),24 }, + { IPv4(194,102,174,0),24 }, + { IPv4(194,102,181,0),24 }, + { IPv4(194,102,192,0),24 }, + { IPv4(194,102,224,0),24 }, + { IPv4(194,102,232,0),24 }, + { IPv4(194,102,233,0),24 }, + { IPv4(194,103,23,0),24 }, + { IPv4(194,103,152,0),24 }, + { IPv4(194,104,100,0),24 }, + { IPv4(194,104,120,0),22 }, + { IPv4(194,104,124,0),23 }, + { IPv4(194,104,138,0),23 }, + { IPv4(194,104,140,0),24 }, + { IPv4(194,104,142,0),24 }, + { IPv4(194,104,175,0),24 }, + { IPv4(194,105,8,0),24 }, + { IPv4(194,105,12,0),22 }, + { IPv4(194,105,16,0),24 }, + { IPv4(194,105,20,0),24 }, + { IPv4(194,105,64,0),19 }, + { IPv4(194,105,160,0),19 }, + { IPv4(194,106,188,0),22 }, + { IPv4(194,107,60,0),22 }, + { IPv4(194,107,64,0),22 }, + { IPv4(194,107,68,0),24 }, + { IPv4(194,107,82,0),24 }, + { IPv4(194,107,83,0),24 }, + { IPv4(194,107,96,0),20 }, + { IPv4(194,107,112,0),24 }, + { IPv4(194,107,114,0),23 }, + { IPv4(194,107,116,0),22 }, + { IPv4(194,107,120,0),21 }, + { IPv4(194,112,128,0),18 }, + { IPv4(194,112,192,0),18 }, + { IPv4(194,115,182,0),23 }, + { IPv4(194,115,224,0),20 }, + { IPv4(194,117,128,0),19 }, + { IPv4(194,118,0,0),16 }, + { IPv4(194,119,128,0),18 }, + { IPv4(194,119,224,0),19 }, + { IPv4(194,120,55,0),24 }, + { IPv4(194,120,228,0),24 }, + { IPv4(194,121,56,0),24 }, + { IPv4(194,124,112,0),22 }, + { IPv4(194,124,145,0),24 }, + { IPv4(194,124,146,0),23 }, + { IPv4(194,124,148,0),22 }, + { IPv4(194,125,228,0),24 }, + { IPv4(194,125,229,0),24 }, + { IPv4(194,125,230,0),24 }, + { IPv4(194,125,231,0),24 }, + { IPv4(194,125,252,0),23 }, + { IPv4(194,126,6,0),24 }, + { IPv4(194,126,10,0),24 }, + { IPv4(194,126,11,0),24 }, + { IPv4(194,126,12,0),24 }, + { IPv4(194,126,16,0),24 }, + { IPv4(194,126,17,0),24 }, + { IPv4(194,126,18,0),24 }, + { IPv4(194,126,20,0),24 }, + { IPv4(194,126,23,0),24 }, + { IPv4(194,126,24,0),24 }, + { IPv4(194,126,27,0),24 }, + { IPv4(194,126,46,0),24 }, + { IPv4(194,126,47,0),24 }, + { IPv4(194,126,64,0),19 }, + { IPv4(194,126,128,0),24 }, + { IPv4(194,126,136,0),24 }, + { IPv4(194,126,140,0),24 }, + { IPv4(194,126,142,0),24 }, + { IPv4(194,127,171,0),24 }, + { IPv4(194,130,152,0),21 }, + { IPv4(194,132,122,0),24 }, + { IPv4(194,132,149,0),24 }, + { IPv4(194,133,98,0),24 }, + { IPv4(194,133,160,0),20 }, + { IPv4(194,133,240,0),20 }, + { IPv4(194,133,242,0),24 }, + { IPv4(194,133,243,0),24 }, + { IPv4(194,133,244,0),24 }, + { IPv4(194,139,6,0),23 }, + { IPv4(194,139,128,0),18 }, + { IPv4(194,140,64,0),19 }, + { IPv4(194,140,80,0),24 }, + { IPv4(194,140,82,0),24 }, + { IPv4(194,140,224,0),19 }, + { IPv4(194,143,160,0),19 }, + { IPv4(194,145,122,0),23 }, + { IPv4(194,145,124,0),24 }, + { IPv4(194,145,147,0),24 }, + { IPv4(194,145,150,0),23 }, + { IPv4(194,147,112,0),23 }, + { IPv4(194,147,171,0),24 }, + { IPv4(194,147,234,0),23 }, + { IPv4(194,147,236,0),23 }, + { IPv4(194,149,24,0),23 }, + { IPv4(194,149,72,0),22 }, + { IPv4(194,149,91,0),24 }, + { IPv4(194,149,236,0),24 }, + { IPv4(194,149,243,0),24 }, + { IPv4(194,149,246,0),24 }, + { IPv4(194,149,247,0),24 }, + { IPv4(194,149,248,0),24 }, + { IPv4(194,149,249,0),24 }, + { IPv4(194,149,250,0),24 }, + { IPv4(194,149,251,0),24 }, + { IPv4(194,149,252,0),24 }, + { IPv4(194,149,253,0),24 }, + { IPv4(194,149,254,0),24 }, + { IPv4(194,149,255,0),24 }, + { IPv4(194,151,0,0),16 }, + { IPv4(194,151,2,0),23 }, + { IPv4(194,151,128,0),19 }, + { IPv4(194,152,128,0),19 }, + { IPv4(194,153,83,0),24 }, + { IPv4(194,153,86,0),23 }, + { IPv4(194,153,99,0),24 }, + { IPv4(194,153,132,0),24 }, + { IPv4(194,153,136,0),21 }, + { IPv4(194,153,144,0),24 }, + { IPv4(194,153,150,0),24 }, + { IPv4(194,153,160,0),21 }, + { IPv4(194,153,176,0),21 }, + { IPv4(194,153,227,0),24 }, + { IPv4(194,153,229,0),24 }, + { IPv4(194,153,231,0),24 }, + { IPv4(194,153,236,0),24 }, + { IPv4(194,153,241,0),24 }, + { IPv4(194,153,250,0),24 }, + { IPv4(194,153,253,0),24 }, + { IPv4(194,153,255,0),24 }, + { IPv4(194,154,0,0),19 }, + { IPv4(194,154,128,0),24 }, + { IPv4(194,154,129,0),24 }, + { IPv4(194,154,131,0),24 }, + { IPv4(194,154,132,0),24 }, + { IPv4(194,154,133,0),24 }, + { IPv4(194,154,134,0),24 }, + { IPv4(194,154,135,0),24 }, + { IPv4(194,154,136,0),22 }, + { IPv4(194,154,140,0),22 }, + { IPv4(194,154,144,0),22 }, + { IPv4(194,154,149,0),24 }, + { IPv4(194,154,152,0),24 }, + { IPv4(194,154,153,0),24 }, + { IPv4(194,154,154,0),24 }, + { IPv4(194,154,156,0),24 }, + { IPv4(194,154,157,0),24 }, + { IPv4(194,154,158,0),24 }, + { IPv4(194,154,159,0),24 }, + { IPv4(194,154,160,0),19 }, + { IPv4(194,154,192,0),19 }, + { IPv4(194,158,128,0),19 }, + { IPv4(194,158,160,0),19 }, + { IPv4(194,158,224,0),19 }, + { IPv4(194,158,250,0),23 }, + { IPv4(194,158,252,0),24 }, + { IPv4(194,159,0,0),16 }, + { IPv4(194,159,72,0),23 }, + { IPv4(194,159,224,0),21 }, + { IPv4(194,161,154,0),24 }, + { IPv4(194,161,200,0),23 }, + { IPv4(194,161,200,0),24 }, + { IPv4(194,161,201,0),24 }, + { IPv4(194,164,7,0),24 }, + { IPv4(194,165,64,0),19 }, + { IPv4(194,165,209,0),24 }, + { IPv4(194,168,0,0),16 }, + { IPv4(194,171,96,0),21 }, + { IPv4(194,174,84,0),23 }, + { IPv4(194,176,179,0),24 }, + { IPv4(194,177,96,0),19 }, + { IPv4(194,177,128,0),19 }, + { IPv4(194,180,25,0),24 }, + { IPv4(194,180,128,0),24 }, + { IPv4(194,180,160,0),21 }, + { IPv4(194,180,239,0),24 }, + { IPv4(194,183,128,0),19 }, + { IPv4(194,183,192,0),19 }, + { IPv4(194,183,211,0),24 }, + { IPv4(194,183,218,0),24 }, + { IPv4(194,183,224,0),19 }, + { IPv4(194,185,0,0),16 }, + { IPv4(194,185,25,0),24 }, + { IPv4(194,193,17,0),24 }, + { IPv4(194,194,0,0),16 }, + { IPv4(194,196,0,0),16 }, + { IPv4(194,196,47,0),24 }, + { IPv4(194,196,196,0),24 }, + { IPv4(194,196,248,0),24 }, + { IPv4(194,201,253,0),24 }, + { IPv4(194,202,0,0),22 }, + { IPv4(194,202,4,0),23 }, + { IPv4(194,203,201,0),24 }, + { IPv4(194,208,0,0),16 }, + { IPv4(194,209,108,0),24 }, + { IPv4(194,209,146,0),24 }, + { IPv4(194,209,185,0),24 }, + { IPv4(194,213,64,0),19 }, + { IPv4(194,216,59,0),24 }, + { IPv4(194,216,168,0),24 }, + { IPv4(194,217,0,0),16 }, + { IPv4(194,217,92,0),24 }, + { IPv4(194,217,220,0),23 }, + { IPv4(194,222,0,0),16 }, + { IPv4(194,230,0,0),16 }, + { IPv4(194,230,57,0),24 }, + { IPv4(194,230,99,0),24 }, + { IPv4(194,231,0,0),16 }, + { IPv4(194,231,54,0),24 }, + { IPv4(194,231,105,0),24 }, + { IPv4(194,231,164,0),23 }, + { IPv4(194,231,168,0),24 }, + { IPv4(194,231,236,0),22 }, + { IPv4(194,231,242,0),23 }, + { IPv4(194,231,246,0),24 }, + { IPv4(194,231,254,0),23 }, + { IPv4(194,232,0,0),16 }, + { IPv4(194,235,143,0),24 }, + { IPv4(194,235,243,0),24 }, + { IPv4(194,238,0,0),16 }, + { IPv4(194,242,34,0),24 }, + { IPv4(194,242,35,0),24 }, + { IPv4(194,242,41,0),24 }, + { IPv4(194,242,45,0),24 }, + { IPv4(194,242,54,0),24 }, + { IPv4(194,242,58,0),24 }, + { IPv4(194,242,61,0),24 }, + { IPv4(194,242,64,0),19 }, + { IPv4(194,242,160,0),24 }, + { IPv4(194,246,96,0),24 }, + { IPv4(194,247,64,0),19 }, + { IPv4(194,247,74,0),24 }, + { IPv4(194,247,75,0),24 }, + { IPv4(194,247,91,0),24 }, + { IPv4(194,253,130,0),24 }, + { IPv4(194,253,184,0),24 }, + { IPv4(195,2,128,0),19 }, + { IPv4(195,2,160,0),19 }, + { IPv4(195,3,108,0),23 }, + { IPv4(195,4,67,0),24 }, + { IPv4(195,4,68,0),23 }, + { IPv4(195,5,0,0),19 }, + { IPv4(195,5,32,0),19 }, + { IPv4(195,5,64,0),19 }, + { IPv4(195,5,197,0),24 }, + { IPv4(195,5,204,0),24 }, + { IPv4(195,7,224,0),19 }, + { IPv4(195,8,64,0),19 }, + { IPv4(195,10,96,0),20 }, + { IPv4(195,10,112,0),20 }, + { IPv4(195,10,224,0),19 }, + { IPv4(195,11,0,0),16 }, + { IPv4(195,11,224,0),19 }, + { IPv4(195,12,0,0),19 }, + { IPv4(195,12,192,0),19 }, + { IPv4(195,13,40,0),22 }, + { IPv4(195,13,64,0),18 }, + { IPv4(195,14,64,0),19 }, + { IPv4(195,16,0,0),19 }, + { IPv4(195,16,64,0),19 }, + { IPv4(195,16,128,0),19 }, + { IPv4(195,16,224,0),19 }, + { IPv4(195,18,64,0),18 }, + { IPv4(195,22,0,0),19 }, + { IPv4(195,24,64,0),19 }, + { IPv4(195,24,192,0),19 }, + { IPv4(195,26,64,0),19 }, + { IPv4(195,26,96,0),19 }, + { IPv4(195,26,192,0),19 }, + { IPv4(195,28,224,0),19 }, + { IPv4(195,30,0,0),16 }, + { IPv4(195,33,0,0),16 }, + { IPv4(195,33,18,0),24 }, + { IPv4(195,33,64,0),24 }, + { IPv4(195,33,96,0),19 }, + { IPv4(195,33,192,0),18 }, + { IPv4(195,34,160,0),19 }, + { IPv4(195,35,81,0),24 }, + { IPv4(195,35,105,0),24 }, + { IPv4(195,35,106,0),24 }, + { IPv4(195,35,110,0),24 }, + { IPv4(195,35,121,0),24 }, + { IPv4(195,35,126,0),24 }, + { IPv4(195,35,128,0),18 }, + { IPv4(195,38,19,0),24 }, + { IPv4(195,38,64,0),19 }, + { IPv4(195,38,192,0),18 }, + { IPv4(195,42,240,0),21 }, + { IPv4(195,46,0,0),21 }, + { IPv4(195,46,8,0),21 }, + { IPv4(195,46,16,0),20 }, + { IPv4(195,46,128,0),21 }, + { IPv4(195,46,128,0),24 }, + { IPv4(195,46,128,0),19 }, + { IPv4(195,46,132,0),24 }, + { IPv4(195,46,134,0),24 }, + { IPv4(195,46,141,0),24 }, + { IPv4(195,46,142,0),24 }, + { IPv4(195,46,224,0),19 }, + { IPv4(195,51,64,0),24 }, + { IPv4(195,51,174,0),23 }, + { IPv4(195,51,176,0),23 }, + { IPv4(195,51,180,0),24 }, + { IPv4(195,54,96,0),19 }, + { IPv4(195,54,224,0),19 }, + { IPv4(195,58,128,0),19 }, + { IPv4(195,58,192,0),19 }, + { IPv4(195,60,0,0),19 }, + { IPv4(195,61,32,0),24 }, + { IPv4(195,61,61,0),24 }, + { IPv4(195,64,0,0),19 }, + { IPv4(195,65,24,0),24 }, + { IPv4(195,65,76,0),24 }, + { IPv4(195,65,77,0),24 }, + { IPv4(195,65,78,0),24 }, + { IPv4(195,65,158,0),24 }, + { IPv4(195,66,128,0),19 }, + { IPv4(195,66,224,0),19 }, + { IPv4(195,70,64,0),19 }, + { IPv4(195,70,96,0),19 }, + { IPv4(195,72,128,0),19 }, + { IPv4(195,72,160,0),19 }, + { IPv4(195,74,96,0),19 }, + { IPv4(195,74,128,0),19 }, + { IPv4(195,74,224,0),19 }, + { IPv4(195,75,0,0),16 }, + { IPv4(195,75,46,0),24 }, + { IPv4(195,78,128,0),19 }, + { IPv4(195,79,161,0),24 }, + { IPv4(195,79,171,0),24 }, + { IPv4(195,80,0,0),19 }, + { IPv4(195,80,32,0),19 }, + { IPv4(195,80,129,0),24 }, + { IPv4(195,80,134,0),24 }, + { IPv4(195,81,40,0),23 }, + { IPv4(195,82,32,0),19 }, + { IPv4(195,82,128,0),19 }, + { IPv4(195,82,224,0),19 }, + { IPv4(195,85,128,0),18 }, + { IPv4(195,86,0,0),16 }, + { IPv4(195,87,0,0),16 }, + { IPv4(195,88,10,0),23 }, + { IPv4(195,88,17,0),24 }, + { IPv4(195,88,42,0),24 }, + { IPv4(195,88,43,0),24 }, + { IPv4(195,88,44,0),23 }, + { IPv4(195,88,46,0),24 }, + { IPv4(195,88,120,0),24 }, + { IPv4(195,88,123,0),24 }, + { IPv4(195,88,146,0),24 }, + { IPv4(195,88,160,0),22 }, + { IPv4(195,90,64,0),19 }, + { IPv4(195,90,192,0),19 }, + { IPv4(195,90,224,0),19 }, + { IPv4(195,93,0,0),17 }, + { IPv4(195,93,16,0),20 }, + { IPv4(195,93,32,0),20 }, + { IPv4(195,93,48,0),20 }, + { IPv4(195,93,64,0),20 }, + { IPv4(195,93,80,0),20 }, + { IPv4(195,94,0,0),24 }, + { IPv4(195,94,1,0),24 }, + { IPv4(195,94,2,0),24 }, + { IPv4(195,94,3,0),24 }, + { IPv4(195,94,4,0),24 }, + { IPv4(195,94,5,0),24 }, + { IPv4(195,94,6,0),24 }, + { IPv4(195,94,96,0),19 }, + { IPv4(195,95,0,0),18 }, + { IPv4(195,95,64,0),19 }, + { IPv4(195,95,96,0),19 }, + { IPv4(195,95,128,0),21 }, + { IPv4(195,95,152,0),21 }, + { IPv4(195,95,160,0),20 }, + { IPv4(195,96,32,0),19 }, + { IPv4(195,97,0,0),17 }, + { IPv4(195,97,87,0),24 }, + { IPv4(195,98,192,0),19 }, + { IPv4(195,99,0,0),16 }, + { IPv4(195,99,8,0),21 }, + { IPv4(195,99,16,0),21 }, + { IPv4(195,99,24,0),21 }, + { IPv4(195,99,32,0),21 }, + { IPv4(195,99,64,0),20 }, + { IPv4(195,99,69,0),24 }, + { IPv4(195,99,80,0),21 }, + { IPv4(195,99,88,0),21 }, + { IPv4(195,99,93,0),24 }, + { IPv4(195,99,96,0),21 }, + { IPv4(195,99,104,0),21 }, + { IPv4(195,99,112,0),21 }, + { IPv4(195,99,192,0),21 }, + { IPv4(195,99,200,0),21 }, + { IPv4(195,99,208,0),21 }, + { IPv4(195,99,224,0),21 }, + { IPv4(195,99,232,0),21 }, + { IPv4(195,99,248,0),21 }, + { IPv4(195,100,0,0),16 }, + { IPv4(195,102,0,0),16 }, + { IPv4(195,102,253,0),24 }, + { IPv4(195,106,206,0),23 }, + { IPv4(195,110,160,0),19 }, + { IPv4(195,110,192,0),19 }, + { IPv4(195,112,0,0),18 }, + { IPv4(195,112,64,0),19 }, + { IPv4(195,121,0,0),16 }, + { IPv4(195,129,2,0),24 }, + { IPv4(195,129,34,0),23 }, + { IPv4(195,130,160,0),19 }, + { IPv4(195,134,0,0),19 }, + { IPv4(195,134,128,0),19 }, + { IPv4(195,138,124,0),22 }, + { IPv4(195,138,128,0),24 }, + { IPv4(195,138,128,0),21 }, + { IPv4(195,138,131,0),24 }, + { IPv4(195,138,133,0),24 }, + { IPv4(195,138,136,0),23 }, + { IPv4(195,138,137,0),24 }, + { IPv4(195,138,140,0),24 }, + { IPv4(195,138,140,0),22 }, + { IPv4(195,138,141,0),24 }, + { IPv4(195,138,142,0),24 }, + { IPv4(195,138,144,0),24 }, + { IPv4(195,138,144,0),20 }, + { IPv4(195,138,145,0),24 }, + { IPv4(195,138,147,0),24 }, + { IPv4(195,138,148,0),24 }, + { IPv4(195,138,149,0),24 }, + { IPv4(195,138,151,0),24 }, + { IPv4(195,138,152,0),24 }, + { IPv4(195,138,153,0),24 }, + { IPv4(195,138,155,0),24 }, + { IPv4(195,138,156,0),24 }, + { IPv4(195,138,157,0),24 }, + { IPv4(195,138,158,0),24 }, + { IPv4(195,138,159,0),24 }, + { IPv4(195,138,224,0),19 }, + { IPv4(195,141,0,0),16 }, + { IPv4(195,141,7,0),24 }, + { IPv4(195,141,36,0),23 }, + { IPv4(195,141,162,0),24 }, + { IPv4(195,141,233,0),24 }, + { IPv4(195,141,250,0),23 }, + { IPv4(195,146,32,0),19 }, + { IPv4(195,147,0,0),16 }, + { IPv4(195,149,0,0),18 }, + { IPv4(195,155,161,0),24 }, + { IPv4(195,162,0,0),19 }, + { IPv4(195,162,64,0),19 }, + { IPv4(195,162,96,0),19 }, + { IPv4(195,162,160,0),19 }, + { IPv4(195,162,224,0),19 }, + { IPv4(195,163,0,0),17 }, + { IPv4(195,163,128,0),19 }, + { IPv4(195,163,160,0),20 }, + { IPv4(195,163,176,0),20 }, + { IPv4(195,163,192,0),18 }, + { IPv4(195,167,0,0),17 }, + { IPv4(195,170,0,0),19 }, + { IPv4(195,170,64,0),19 }, + { IPv4(195,171,0,0),16 }, + { IPv4(195,171,32,0),22 }, + { IPv4(195,171,36,0),22 }, + { IPv4(195,171,44,0),22 }, + { IPv4(195,171,48,0),22 }, + { IPv4(195,171,52,0),22 }, + { IPv4(195,171,56,0),22 }, + { IPv4(195,171,60,0),23 }, + { IPv4(195,171,62,0),23 }, + { IPv4(195,171,64,0),23 }, + { IPv4(195,171,84,0),24 }, + { IPv4(195,171,85,0),24 }, + { IPv4(195,171,86,0),24 }, + { IPv4(195,171,88,0),21 }, + { IPv4(195,171,100,0),22 }, + { IPv4(195,173,0,0),16 }, + { IPv4(195,173,224,0),19 }, + { IPv4(195,176,128,0),22 }, + { IPv4(195,176,139,0),24 }, + { IPv4(195,176,156,0),23 }, + { IPv4(195,182,0,0),19 }, + { IPv4(195,182,96,0),19 }, + { IPv4(195,183,0,0),16 }, + { IPv4(195,184,64,0),19 }, + { IPv4(195,184,128,0),19 }, + { IPv4(195,184,132,0),24 }, + { IPv4(195,184,137,0),24 }, + { IPv4(195,184,140,0),24 }, + { IPv4(195,184,146,0),24 }, + { IPv4(195,184,147,0),24 }, + { IPv4(195,184,158,0),24 }, + { IPv4(195,184,159,0),24 }, + { IPv4(195,184,224,0),19 }, + { IPv4(195,188,0,0),16 }, + { IPv4(195,190,32,0),19 }, + { IPv4(195,190,160,0),19 }, + { IPv4(195,200,64,0),19 }, + { IPv4(195,200,128,0),19 }, + { IPv4(195,202,65,0),24 }, + { IPv4(195,202,68,0),24 }, + { IPv4(195,202,71,0),24 }, + { IPv4(195,202,73,0),24 }, + { IPv4(195,202,74,0),24 }, + { IPv4(195,202,79,0),24 }, + { IPv4(195,202,82,0),24 }, + { IPv4(195,202,83,0),24 }, + { IPv4(195,202,128,0),19 }, + { IPv4(195,202,160,0),19 }, + { IPv4(195,202,192,0),18 }, + { IPv4(195,206,64,0),19 }, + { IPv4(195,206,160,0),19 }, + { IPv4(195,210,0,0),19 }, + { IPv4(195,211,34,0),23 }, + { IPv4(195,211,99,0),24 }, + { IPv4(195,212,0,0),16 }, + { IPv4(195,213,0,0),16 }, + { IPv4(195,213,87,0),24 }, + { IPv4(195,214,128,0),19 }, + { IPv4(195,214,160,0),20 }, + { IPv4(195,214,176,0),21 }, + { IPv4(195,214,184,0),22 }, + { IPv4(195,218,96,0),19 }, + { IPv4(195,224,0,0),16 }, + { IPv4(195,226,0,0),22 }, + { IPv4(195,226,128,0),19 }, + { IPv4(195,230,0,0),22 }, + { IPv4(195,230,4,0),23 }, + { IPv4(195,230,6,0),24 }, + { IPv4(195,230,7,0),24 }, + { IPv4(195,230,8,0),21 }, + { IPv4(195,230,16,0),20 }, + { IPv4(195,240,0,0),16 }, + { IPv4(195,241,0,0),16 }, + { IPv4(195,244,224,0),19 }, + { IPv4(195,246,0,0),19 }, + { IPv4(195,246,96,0),19 }, + { IPv4(195,246,128,0),19 }, + { IPv4(195,246,204,0),23 }, + { IPv4(195,246,206,0),23 }, + { IPv4(195,248,192,0),24 }, + { IPv4(195,248,210,0),24 }, + { IPv4(195,248,220,0),24 }, + { IPv4(195,250,96,0),19 }, + { IPv4(196,1,32,0),24 }, + { IPv4(196,1,103,0),24 }, + { IPv4(196,1,130,0),24 }, + { IPv4(196,1,131,0),24 }, + { IPv4(196,1,132,0),24 }, + { IPv4(196,1,133,0),24 }, + { IPv4(196,2,0,0),24 }, + { IPv4(196,2,1,0),24 }, + { IPv4(196,3,0,0),24 }, + { IPv4(196,3,1,0),24 }, + { IPv4(196,3,2,0),24 }, + { IPv4(196,3,3,0),24 }, + { IPv4(196,3,4,0),24 }, + { IPv4(196,3,5,0),24 }, + { IPv4(196,3,6,0),24 }, + { IPv4(196,3,7,0),24 }, + { IPv4(196,3,47,0),24 }, + { IPv4(196,3,51,0),24 }, + { IPv4(196,3,54,0),24 }, + { IPv4(196,3,95,0),24 }, + { IPv4(196,3,113,0),24 }, + { IPv4(196,3,114,0),24 }, + { IPv4(196,3,115,0),24 }, + { IPv4(196,3,124,0),22 }, + { IPv4(196,3,128,0),22 }, + { IPv4(196,3,156,0),23 }, + { IPv4(196,3,159,0),24 }, + { IPv4(196,3,164,0),22 }, + { IPv4(196,3,168,0),21 }, + { IPv4(196,3,176,0),22 }, + { IPv4(196,3,193,0),24 }, + { IPv4(196,3,198,0),24 }, + { IPv4(196,3,199,0),24 }, + { IPv4(196,3,218,0),23 }, + { IPv4(196,3,220,0),24 }, + { IPv4(196,3,221,0),24 }, + { IPv4(196,4,2,0),24 }, + { IPv4(196,4,20,0),22 }, + { IPv4(196,4,24,0),22 }, + { IPv4(196,4,28,0),23 }, + { IPv4(196,4,49,0),24 }, + { IPv4(196,4,51,0),24 }, + { IPv4(196,4,53,0),24 }, + { IPv4(196,4,58,0),24 }, + { IPv4(196,4,61,0),24 }, + { IPv4(196,4,62,0),23 }, + { IPv4(196,4,64,0),22 }, + { IPv4(196,4,70,0),24 }, + { IPv4(196,4,71,0),24 }, + { IPv4(196,4,72,0),22 }, + { IPv4(196,4,76,0),24 }, + { IPv4(196,4,79,0),24 }, + { IPv4(196,4,81,0),24 }, + { IPv4(196,4,83,0),24 }, + { IPv4(196,4,84,0),24 }, + { IPv4(196,4,87,0),24 }, + { IPv4(196,4,88,0),22 }, + { IPv4(196,4,92,0),23 }, + { IPv4(196,4,95,0),24 }, + { IPv4(196,4,96,0),24 }, + { IPv4(196,4,97,0),24 }, + { IPv4(196,4,100,0),22 }, + { IPv4(196,4,104,0),24 }, + { IPv4(196,4,105,0),24 }, + { IPv4(196,4,143,0),24 }, + { IPv4(196,4,144,0),24 }, + { IPv4(196,4,145,0),24 }, + { IPv4(196,4,146,0),24 }, + { IPv4(196,4,162,0),23 }, + { IPv4(196,4,164,0),24 }, + { IPv4(196,4,165,0),24 }, + { IPv4(196,4,168,0),24 }, + { IPv4(196,4,172,0),24 }, + { IPv4(196,4,173,0),24 }, + { IPv4(196,4,174,0),23 }, + { IPv4(196,4,176,0),21 }, + { IPv4(196,4,184,0),22 }, + { IPv4(196,4,188,0),24 }, + { IPv4(196,4,212,0),22 }, + { IPv4(196,4,216,0),21 }, + { IPv4(196,4,224,0),21 }, + { IPv4(196,4,232,0),21 }, + { IPv4(196,4,239,0),24 }, + { IPv4(196,4,240,0),24 }, + { IPv4(196,4,240,0),21 }, + { IPv4(196,4,241,0),24 }, + { IPv4(196,4,242,0),24 }, + { IPv4(196,4,243,0),24 }, + { IPv4(196,4,244,0),24 }, + { IPv4(196,4,245,0),24 }, + { IPv4(196,4,246,0),24 }, + { IPv4(196,4,247,0),24 }, + { IPv4(196,4,248,0),24 }, + { IPv4(196,4,250,0),23 }, + { IPv4(196,5,0,0),16 }, + { IPv4(196,6,1,0),24 }, + { IPv4(196,6,121,0),24 }, + { IPv4(196,6,149,0),24 }, + { IPv4(196,6,150,0),24 }, + { IPv4(196,6,151,0),24 }, + { IPv4(196,6,153,0),24 }, + { IPv4(196,6,175,0),24 }, + { IPv4(196,6,176,0),23 }, + { IPv4(196,6,178,0),24 }, + { IPv4(196,6,183,0),24 }, + { IPv4(196,6,184,0),24 }, + { IPv4(196,6,196,0),24 }, + { IPv4(196,6,198,0),24 }, + { IPv4(196,6,199,0),24 }, + { IPv4(196,6,201,0),24 }, + { IPv4(196,6,208,0),24 }, + { IPv4(196,6,211,0),24 }, + { IPv4(196,6,212,0),23 }, + { IPv4(196,6,212,0),24 }, + { IPv4(196,6,213,0),24 }, + { IPv4(196,6,214,0),24 }, + { IPv4(196,6,220,0),23 }, + { IPv4(196,6,222,0),23 }, + { IPv4(196,6,237,0),24 }, + { IPv4(196,6,241,0),24 }, + { IPv4(196,6,242,0),24 }, + { IPv4(196,6,243,0),24 }, + { IPv4(196,6,251,0),24 }, + { IPv4(196,8,115,0),24 }, + { IPv4(196,9,0,0),16 }, + { IPv4(196,10,51,0),24 }, + { IPv4(196,10,96,0),24 }, + { IPv4(196,10,101,0),24 }, + { IPv4(196,10,104,0),24 }, + { IPv4(196,10,106,0),24 }, + { IPv4(196,10,107,0),24 }, + { IPv4(196,10,108,0),24 }, + { IPv4(196,10,110,0),24 }, + { IPv4(196,10,111,0),24 }, + { IPv4(196,10,112,0),22 }, + { IPv4(196,10,118,0),24 }, + { IPv4(196,10,119,0),24 }, + { IPv4(196,10,121,0),24 }, + { IPv4(196,10,122,0),23 }, + { IPv4(196,10,124,0),22 }, + { IPv4(196,10,130,0),23 }, + { IPv4(196,10,132,0),22 }, + { IPv4(196,10,136,0),22 }, + { IPv4(196,10,140,0),22 }, + { IPv4(196,10,150,0),23 }, + { IPv4(196,10,152,0),21 }, + { IPv4(196,10,160,0),19 }, + { IPv4(196,10,192,0),21 }, + { IPv4(196,10,203,0),24 }, + { IPv4(196,10,204,0),22 }, + { IPv4(196,10,208,0),22 }, + { IPv4(196,10,212,0),23 }, + { IPv4(196,10,224,0),24 }, + { IPv4(196,10,229,0),24 }, + { IPv4(196,10,231,0),24 }, + { IPv4(196,10,248,0),23 }, + { IPv4(196,10,249,0),24 }, + { IPv4(196,10,251,0),24 }, + { IPv4(196,10,252,0),23 }, + { IPv4(196,10,254,0),24 }, + { IPv4(196,11,0,0),20 }, + { IPv4(196,11,16,0),21 }, + { IPv4(196,11,24,0),22 }, + { IPv4(196,11,28,0),23 }, + { IPv4(196,11,30,0),24 }, + { IPv4(196,11,40,0),21 }, + { IPv4(196,11,57,0),24 }, + { IPv4(196,11,58,0),24 }, + { IPv4(196,11,61,0),24 }, + { IPv4(196,11,69,0),24 }, + { IPv4(196,11,70,0),24 }, + { IPv4(196,11,72,0),24 }, + { IPv4(196,11,98,0),24 }, + { IPv4(196,11,109,0),24 }, + { IPv4(196,11,110,0),23 }, + { IPv4(196,11,112,0),23 }, + { IPv4(196,11,114,0),24 }, + { IPv4(196,11,116,0),24 }, + { IPv4(196,11,122,0),24 }, + { IPv4(196,11,124,0),24 }, + { IPv4(196,11,127,0),24 }, + { IPv4(196,11,134,0),24 }, + { IPv4(196,11,135,0),24 }, + { IPv4(196,11,136,0),21 }, + { IPv4(196,11,144,0),23 }, + { IPv4(196,11,146,0),23 }, + { IPv4(196,11,148,0),23 }, + { IPv4(196,11,170,0),23 }, + { IPv4(196,11,172,0),23 }, + { IPv4(196,11,174,0),24 }, + { IPv4(196,11,188,0),23 }, + { IPv4(196,11,188,0),24 }, + { IPv4(196,11,190,0),24 }, + { IPv4(196,11,192,0),22 }, + { IPv4(196,11,196,0),24 }, + { IPv4(196,11,197,0),24 }, + { IPv4(196,11,200,0),22 }, + { IPv4(196,11,204,0),24 }, + { IPv4(196,11,205,0),24 }, + { IPv4(196,11,209,0),24 }, + { IPv4(196,11,233,0),24 }, + { IPv4(196,11,235,0),24 }, + { IPv4(196,11,239,0),24 }, + { IPv4(196,11,240,0),23 }, + { IPv4(196,11,243,0),24 }, + { IPv4(196,11,244,0),23 }, + { IPv4(196,11,251,0),24 }, + { IPv4(196,12,16,0),24 }, + { IPv4(196,12,160,0),24 }, + { IPv4(196,12,161,0),24 }, + { IPv4(196,12,162,0),24 }, + { IPv4(196,12,163,0),24 }, + { IPv4(196,12,164,0),24 }, + { IPv4(196,12,165,0),24 }, + { IPv4(196,12,166,0),24 }, + { IPv4(196,12,167,0),24 }, + { IPv4(196,12,168,0),24 }, + { IPv4(196,12,169,0),24 }, + { IPv4(196,12,170,0),24 }, + { IPv4(196,12,171,0),24 }, + { IPv4(196,12,172,0),24 }, + { IPv4(196,12,173,0),24 }, + { IPv4(196,12,174,0),24 }, + { IPv4(196,12,175,0),24 }, + { IPv4(196,12,176,0),24 }, + { IPv4(196,12,177,0),24 }, + { IPv4(196,12,178,0),24 }, + { IPv4(196,12,179,0),24 }, + { IPv4(196,12,180,0),24 }, + { IPv4(196,12,181,0),24 }, + { IPv4(196,12,182,0),24 }, + { IPv4(196,12,183,0),24 }, + { IPv4(196,12,184,0),24 }, + { IPv4(196,12,185,0),24 }, + { IPv4(196,12,186,0),24 }, + { IPv4(196,12,187,0),24 }, + { IPv4(196,12,188,0),24 }, + { IPv4(196,12,189,0),24 }, + { IPv4(196,12,190,0),24 }, + { IPv4(196,12,191,0),24 }, + { IPv4(196,13,1,0),24 }, + { IPv4(196,13,2,0),23 }, + { IPv4(196,13,4,0),22 }, + { IPv4(196,13,8,0),21 }, + { IPv4(196,13,16,0),21 }, + { IPv4(196,13,24,0),22 }, + { IPv4(196,13,28,0),23 }, + { IPv4(196,13,30,0),24 }, + { IPv4(196,13,31,0),24 }, + { IPv4(196,13,32,0),19 }, + { IPv4(196,13,64,0),20 }, + { IPv4(196,13,80,0),24 }, + { IPv4(196,13,93,0),24 }, + { IPv4(196,13,97,0),24 }, + { IPv4(196,13,101,0),24 }, + { IPv4(196,13,102,0),23 }, + { IPv4(196,13,104,0),24 }, + { IPv4(196,13,108,0),24 }, + { IPv4(196,13,113,0),24 }, + { IPv4(196,13,115,0),24 }, + { IPv4(196,13,118,0),24 }, + { IPv4(196,13,121,0),24 }, + { IPv4(196,13,125,0),24 }, + { IPv4(196,13,126,0),24 }, + { IPv4(196,13,127,0),24 }, + { IPv4(196,13,128,0),22 }, + { IPv4(196,13,132,0),24 }, + { IPv4(196,13,138,0),24 }, + { IPv4(196,13,139,0),24 }, + { IPv4(196,13,140,0),22 }, + { IPv4(196,13,144,0),22 }, + { IPv4(196,13,150,0),24 }, + { IPv4(196,13,151,0),24 }, + { IPv4(196,13,152,0),21 }, + { IPv4(196,13,160,0),24 }, + { IPv4(196,13,163,0),24 }, + { IPv4(196,13,164,0),24 }, + { IPv4(196,13,165,0),24 }, + { IPv4(196,13,169,0),24 }, + { IPv4(196,13,174,0),23 }, + { IPv4(196,13,176,0),21 }, + { IPv4(196,13,184,0),23 }, + { IPv4(196,13,187,0),24 }, + { IPv4(196,13,188,0),22 }, + { IPv4(196,13,192,0),22 }, + { IPv4(196,13,196,0),24 }, + { IPv4(196,13,200,0),22 }, + { IPv4(196,13,204,0),24 }, + { IPv4(196,13,213,0),24 }, + { IPv4(196,13,214,0),23 }, + { IPv4(196,13,216,0),23 }, + { IPv4(196,13,225,0),24 }, + { IPv4(196,13,226,0),23 }, + { IPv4(196,13,228,0),22 }, + { IPv4(196,13,232,0),24 }, + { IPv4(196,13,252,0),22 }, + { IPv4(196,14,0,0),16 }, + { IPv4(196,15,0,0),21 }, + { IPv4(196,21,0,0),16 }, + { IPv4(196,22,0,0),22 }, + { IPv4(196,22,32,0),24 }, + { IPv4(196,22,160,0),19 }, + { IPv4(196,22,162,0),24 }, + { IPv4(196,22,166,0),24 }, + { IPv4(196,22,170,0),24 }, + { IPv4(196,22,176,0),24 }, + { IPv4(196,22,181,0),24 }, + { IPv4(196,22,182,0),24 }, + { IPv4(196,22,183,0),24 }, + { IPv4(196,22,189,0),24 }, + { IPv4(196,23,0,0),16 }, + { IPv4(196,24,0,0),16 }, + { IPv4(196,26,0,0),16 }, + { IPv4(196,27,12,0),24 }, + { IPv4(196,27,15,0),24 }, + { IPv4(196,27,19,0),24 }, + { IPv4(196,27,40,0),22 }, + { IPv4(196,27,48,0),22 }, + { IPv4(196,28,5,0),24 }, + { IPv4(196,28,8,0),24 }, + { IPv4(196,28,16,0),20 }, + { IPv4(196,28,32,0),20 }, + { IPv4(196,28,64,0),19 }, + { IPv4(196,28,96,0),19 }, + { IPv4(196,29,6,0),24 }, + { IPv4(196,29,32,0),24 }, + { IPv4(196,29,33,0),24 }, + { IPv4(196,29,34,0),24 }, + { IPv4(196,29,35,0),24 }, + { IPv4(196,29,36,0),24 }, + { IPv4(196,29,37,0),24 }, + { IPv4(196,29,38,0),24 }, + { IPv4(196,29,39,0),24 }, + { IPv4(196,32,0,0),21 }, + { IPv4(196,33,0,0),16 }, + { IPv4(196,34,0,0),15 }, + { IPv4(196,36,0,0),16 }, + { IPv4(196,37,0,0),16 }, + { IPv4(196,38,0,0),16 }, + { IPv4(196,39,0,0),17 }, + { IPv4(196,41,67,0),24 }, + { IPv4(196,41,128,0),19 }, + { IPv4(196,41,160,0),20 }, + { IPv4(196,41,171,0),24 }, + { IPv4(196,41,192,0),19 }, + { IPv4(196,43,0,0),18 }, + { IPv4(196,44,0,0),19 }, + { IPv4(198,1,32,0),20 }, + { IPv4(198,1,35,0),24 }, + { IPv4(198,1,36,0),23 }, + { IPv4(198,1,48,0),22 }, + { IPv4(198,3,16,0),20 }, + { IPv4(198,3,122,0),24 }, + { IPv4(198,3,124,0),24 }, + { IPv4(198,4,44,0),24 }, + { IPv4(198,4,64,0),20 }, + { IPv4(198,5,5,0),24 }, + { IPv4(198,5,6,0),24 }, + { IPv4(198,5,222,0),23 }, + { IPv4(198,6,80,0),24 }, + { IPv4(198,6,95,0),24 }, + { IPv4(198,6,196,0),24 }, + { IPv4(198,6,245,0),24 }, + { IPv4(198,6,255,0),24 }, + { IPv4(198,7,0,0),21 }, + { IPv4(198,7,128,0),18 }, + { IPv4(198,7,142,0),24 }, + { IPv4(198,8,16,0),20 }, + { IPv4(198,8,32,0),20 }, + { IPv4(198,8,48,0),21 }, + { IPv4(198,8,56,0),24 }, + { IPv4(198,8,64,0),22 }, + { IPv4(198,8,68,0),23 }, + { IPv4(198,9,0,0),16 }, + { IPv4(198,10,0,0),16 }, + { IPv4(198,11,16,0),20 }, + { IPv4(198,11,32,0),19 }, + { IPv4(198,11,57,0),24 }, + { IPv4(198,11,58,0),24 }, + { IPv4(198,14,32,0),19 }, + { IPv4(198,17,5,0),24 }, + { IPv4(198,17,37,0),24 }, + { IPv4(198,17,39,0),24 }, + { IPv4(198,17,40,0),24 }, + { IPv4(198,17,46,0),24 }, + { IPv4(198,17,47,0),24 }, + { IPv4(198,17,57,0),24 }, + { IPv4(198,17,59,0),24 }, + { IPv4(198,17,62,0),24 }, + { IPv4(198,17,81,0),24 }, + { IPv4(198,17,101,0),24 }, + { IPv4(198,17,107,0),24 }, + { IPv4(198,17,138,0),24 }, + { IPv4(198,17,144,0),24 }, + { IPv4(198,17,145,0),24 }, + { IPv4(198,17,150,0),23 }, + { IPv4(198,17,169,0),24 }, + { IPv4(198,17,176,0),24 }, + { IPv4(198,17,183,0),24 }, + { IPv4(198,17,184,0),24 }, + { IPv4(198,17,184,0),23 }, + { IPv4(198,17,186,0),24 }, + { IPv4(198,17,189,0),24 }, + { IPv4(198,17,191,0),24 }, + { IPv4(198,17,192,0),23 }, + { IPv4(198,17,194,0),24 }, + { IPv4(198,17,200,0),24 }, + { IPv4(198,17,205,0),24 }, + { IPv4(198,17,235,0),24 }, + { IPv4(198,17,242,0),24 }, + { IPv4(198,17,243,0),24 }, + { IPv4(198,17,247,0),24 }, + { IPv4(198,17,249,0),24 }, + { IPv4(198,20,8,0),21 }, + { IPv4(198,22,5,0),24 }, + { IPv4(198,22,6,0),23 }, + { IPv4(198,22,8,0),23 }, + { IPv4(198,22,19,0),24 }, + { IPv4(198,22,28,0),24 }, + { IPv4(198,22,41,0),24 }, + { IPv4(198,22,62,0),23 }, + { IPv4(198,22,64,0),22 }, + { IPv4(198,22,99,0),24 }, + { IPv4(198,22,109,0),24 }, + { IPv4(198,22,110,0),24 }, + { IPv4(198,22,121,0),24 }, + { IPv4(198,22,129,0),24 }, + { IPv4(198,22,133,0),24 }, + { IPv4(198,22,137,0),24 }, + { IPv4(198,22,146,0),24 }, + { IPv4(198,22,176,0),24 }, + { IPv4(198,22,229,0),24 }, + { IPv4(198,22,230,0),24 }, + { IPv4(198,22,249,0),24 }, + { IPv4(198,24,6,0),24 }, + { IPv4(198,25,0,0),16 }, + { IPv4(198,25,24,0),24 }, + { IPv4(198,25,35,0),24 }, + { IPv4(198,25,42,0),24 }, + { IPv4(198,25,48,0),24 }, + { IPv4(198,25,50,0),24 }, + { IPv4(198,25,67,0),24 }, + { IPv4(198,25,72,0),24 }, + { IPv4(198,25,102,0),24 }, + { IPv4(198,25,141,0),24 }, + { IPv4(198,25,142,0),24 }, + { IPv4(198,25,143,0),24 }, + { IPv4(198,25,150,0),24 }, + { IPv4(198,25,190,0),24 }, + { IPv4(198,25,191,0),24 }, + { IPv4(198,25,192,0),24 }, + { IPv4(198,25,195,0),24 }, + { IPv4(198,25,197,0),24 }, + { IPv4(198,25,199,0),24 }, + { IPv4(198,25,202,0),24 }, + { IPv4(198,25,212,0),24 }, + { IPv4(198,25,230,0),24 }, + { IPv4(198,25,231,0),24 }, + { IPv4(198,25,232,0),24 }, + { IPv4(198,25,236,0),24 }, + { IPv4(198,25,237,0),24 }, + { IPv4(198,25,239,0),24 }, + { IPv4(198,25,240,0),24 }, + { IPv4(198,25,242,0),24 }, + { IPv4(198,25,243,0),24 }, + { IPv4(198,26,0,0),16 }, + { IPv4(198,26,118,0),24 }, + { IPv4(198,26,171,0),24 }, + { IPv4(198,26,172,0),24 }, + { IPv4(198,26,173,0),24 }, + { IPv4(198,26,174,0),24 }, + { IPv4(198,26,175,0),24 }, + { IPv4(198,26,177,0),24 }, + { IPv4(198,26,178,0),24 }, + { IPv4(198,26,180,0),24 }, + { IPv4(198,26,181,0),24 }, + { IPv4(198,26,182,0),24 }, + { IPv4(198,26,183,0),24 }, + { IPv4(198,26,186,0),24 }, + { IPv4(198,26,187,0),24 }, + { IPv4(198,26,188,0),24 }, + { IPv4(198,26,190,0),24 }, + { IPv4(198,26,192,0),24 }, + { IPv4(198,26,199,0),24 }, + { IPv4(198,26,227,0),24 }, + { IPv4(198,27,18,0),24 }, + { IPv4(198,27,24,0),24 }, + { IPv4(198,27,38,0),24 }, + { IPv4(198,27,47,0),24 }, + { IPv4(198,27,48,0),24 }, + { IPv4(198,27,54,0),24 }, + { IPv4(198,28,128,0),24 }, + { IPv4(198,29,0,0),22 }, + { IPv4(198,31,9,0),24 }, + { IPv4(198,31,31,0),24 }, + { IPv4(198,31,158,0),23 }, + { IPv4(198,31,232,0),23 }, + { IPv4(198,31,238,0),24 }, + { IPv4(198,31,238,0),23 }, + { IPv4(198,31,239,0),24 }, + { IPv4(198,32,42,0),24 }, + { IPv4(198,32,64,0),24 }, + { IPv4(198,32,114,0),24 }, + { IPv4(198,32,128,0),24 }, + { IPv4(198,32,136,0),24 }, + { IPv4(198,32,139,0),24 }, + { IPv4(198,32,176,0),24 }, + { IPv4(198,32,177,0),24 }, + { IPv4(198,32,184,0),24 }, + { IPv4(198,32,200,0),24 }, + { IPv4(198,32,212,0),24 }, + { IPv4(198,32,216,0),24 }, + { IPv4(198,32,220,0),24 }, + { IPv4(198,32,224,0),24 }, + { IPv4(198,32,248,0),24 }, + { IPv4(198,32,249,0),24 }, + { IPv4(198,32,251,0),24 }, + { IPv4(198,34,224,0),21 }, + { IPv4(198,35,1,0),24 }, + { IPv4(198,35,2,0),24 }, + { IPv4(198,35,3,0),24 }, + { IPv4(198,35,4,0),24 }, + { IPv4(198,35,5,0),24 }, + { IPv4(198,35,6,0),24 }, + { IPv4(198,35,7,0),24 }, + { IPv4(198,35,8,0),24 }, + { IPv4(198,35,9,0),24 }, + { IPv4(198,35,10,0),24 }, + { IPv4(198,35,11,0),24 }, + { IPv4(198,35,12,0),24 }, + { IPv4(198,35,13,0),24 }, + { IPv4(198,35,14,0),24 }, + { IPv4(198,35,15,0),24 }, + { IPv4(198,35,128,0),24 }, + { IPv4(198,36,16,0),21 }, + { IPv4(198,36,24,0),22 }, + { IPv4(198,36,180,0),23 }, + { IPv4(198,36,190,0),24 }, + { IPv4(198,37,16,0),21 }, + { IPv4(198,37,24,0),22 }, + { IPv4(198,38,8,0),22 }, + { IPv4(198,38,12,0),24 }, + { IPv4(198,40,16,0),21 }, + { IPv4(198,40,24,0),22 }, + { IPv4(198,40,28,0),23 }, + { IPv4(198,40,30,0),24 }, + { IPv4(198,41,0,0),24 }, + { IPv4(198,41,1,0),24 }, + { IPv4(198,41,3,0),24 }, + { IPv4(198,41,6,0),24 }, + { IPv4(198,41,8,0),24 }, + { IPv4(198,41,9,0),24 }, + { IPv4(198,41,10,0),24 }, + { IPv4(198,41,11,0),24 }, + { IPv4(198,43,100,0),24 }, + { IPv4(198,43,237,0),24 }, + { IPv4(198,45,18,0),24 }, + { IPv4(198,45,20,0),24 }, + { IPv4(198,45,22,0),24 }, + { IPv4(198,45,23,0),24 }, + { IPv4(198,45,24,0),24 }, + { IPv4(198,46,0,0),21 }, + { IPv4(198,46,8,0),24 }, + { IPv4(198,46,9,0),24 }, + { IPv4(198,46,75,0),24 }, + { IPv4(198,48,16,0),23 }, + { IPv4(198,48,16,0),24 }, + { IPv4(198,48,17,0),24 }, + { IPv4(198,49,22,0),24 }, + { IPv4(198,49,45,0),24 }, + { IPv4(198,49,92,0),23 }, + { IPv4(198,49,103,0),24 }, + { IPv4(198,49,104,0),24 }, + { IPv4(198,49,114,0),24 }, + { IPv4(198,49,120,0),22 }, + { IPv4(198,49,143,0),24 }, + { IPv4(198,49,144,0),23 }, + { IPv4(198,49,168,0),23 }, + { IPv4(198,49,174,0),24 }, + { IPv4(198,49,182,0),24 }, + { IPv4(198,49,183,0),24 }, + { IPv4(198,49,184,0),21 }, + { IPv4(198,49,192,0),24 }, + { IPv4(198,49,205,0),24 }, + { IPv4(198,49,206,0),24 }, + { IPv4(198,49,207,0),24 }, + { IPv4(198,49,208,0),24 }, + { IPv4(198,49,224,0),21 }, + { IPv4(198,49,232,0),22 }, + { IPv4(198,49,236,0),24 }, + { IPv4(198,49,239,0),24 }, + { IPv4(198,49,240,0),24 }, + { IPv4(198,49,241,0),24 }, + { IPv4(198,50,1,0),24 }, + { IPv4(198,50,7,0),24 }, + { IPv4(198,50,9,0),24 }, + { IPv4(198,51,13,0),24 }, + { IPv4(198,51,14,0),24 }, + { IPv4(198,51,35,0),24 }, + { IPv4(198,51,72,0),24 }, + { IPv4(198,51,90,0),24 }, + { IPv4(198,51,93,0),24 }, + { IPv4(198,51,94,0),24 }, + { IPv4(198,51,109,0),24 }, + { IPv4(198,51,141,0),24 }, + { IPv4(198,51,170,0),23 }, + { IPv4(198,51,173,0),24 }, + { IPv4(198,51,177,0),24 }, + { IPv4(198,51,178,0),24 }, + { IPv4(198,51,184,0),23 }, + { IPv4(198,51,191,0),24 }, + { IPv4(198,51,192,0),24 }, + { IPv4(198,51,193,0),24 }, + { IPv4(198,51,209,0),24 }, + { IPv4(198,51,210,0),24 }, + { IPv4(198,51,214,0),24 }, + { IPv4(198,51,215,0),24 }, + { IPv4(198,51,234,0),24 }, + { IPv4(198,51,237,0),24 }, + { IPv4(198,51,238,0),24 }, + { IPv4(198,51,239,0),24 }, + { IPv4(198,51,241,0),24 }, + { IPv4(198,52,0,0),22 }, + { IPv4(198,53,26,0),23 }, + { IPv4(198,53,26,0),24 }, + { IPv4(198,54,20,0),22 }, + { IPv4(198,54,24,0),21 }, + { IPv4(198,54,32,0),21 }, + { IPv4(198,54,40,0),23 }, + { IPv4(198,54,58,0),24 }, + { IPv4(198,54,64,0),24 }, + { IPv4(198,54,65,0),24 }, + { IPv4(198,54,66,0),24 }, + { IPv4(198,54,68,0),24 }, + { IPv4(198,54,71,0),24 }, + { IPv4(198,54,72,0),22 }, + { IPv4(198,54,80,0),24 }, + { IPv4(198,54,82,0),24 }, + { IPv4(198,54,83,0),24 }, + { IPv4(198,54,84,0),24 }, + { IPv4(198,54,90,0),23 }, + { IPv4(198,54,92,0),24 }, + { IPv4(198,54,149,0),24 }, + { IPv4(198,54,154,0),23 }, + { IPv4(198,54,155,0),24 }, + { IPv4(198,54,156,0),22 }, + { IPv4(198,54,160,0),23 }, + { IPv4(198,54,162,0),24 }, + { IPv4(198,54,163,0),24 }, + { IPv4(198,54,165,0),24 }, + { IPv4(198,54,170,0),24 }, + { IPv4(198,54,173,0),24 }, + { IPv4(198,54,174,0),24 }, + { IPv4(198,54,184,0),22 }, + { IPv4(198,54,188,0),23 }, + { IPv4(198,54,192,0),22 }, + { IPv4(198,54,196,0),24 }, + { IPv4(198,54,202,0),24 }, + { IPv4(198,54,219,0),24 }, + { IPv4(198,54,222,0),23 }, + { IPv4(198,54,225,0),24 }, + { IPv4(198,54,234,0),24 }, + { IPv4(198,54,253,0),24 }, + { IPv4(198,55,4,0),24 }, + { IPv4(198,55,8,0),21 }, + { IPv4(198,55,64,0),20 }, + { IPv4(198,55,69,0),24 }, + { IPv4(198,55,80,0),21 }, + { IPv4(198,55,84,0),24 }, + { IPv4(198,55,85,0),24 }, + { IPv4(198,55,86,0),24 }, + { IPv4(198,55,87,0),24 }, + { IPv4(198,55,88,0),22 }, + { IPv4(198,55,89,0),24 }, + { IPv4(198,55,92,0),23 }, + { IPv4(198,55,93,0),24 }, + { IPv4(198,56,0,0),21 }, + { IPv4(198,57,64,0),20 }, + { IPv4(198,58,0,0),24 }, + { IPv4(198,58,1,0),24 }, + { IPv4(198,58,16,0),22 }, + { IPv4(198,58,16,0),21 }, + { IPv4(198,58,20,0),22 }, + { IPv4(198,58,24,0),22 }, + { IPv4(198,58,24,0),23 }, + { IPv4(198,58,26,0),23 }, + { IPv4(198,58,37,0),24 }, + { IPv4(198,58,38,0),24 }, + { IPv4(198,58,64,0),21 }, + { IPv4(198,58,64,0),22 }, + { IPv4(198,58,68,0),22 }, + { IPv4(198,58,70,0),24 }, + { IPv4(198,58,71,0),24 }, + { IPv4(198,59,0,0),18 }, + { IPv4(198,59,2,0),24 }, + { IPv4(198,59,7,0),24 }, + { IPv4(198,59,36,0),24 }, + { IPv4(198,59,40,0),24 }, + { IPv4(198,59,46,0),24 }, + { IPv4(198,59,47,0),24 }, + { IPv4(198,59,48,0),24 }, + { IPv4(198,59,49,0),24 }, + { IPv4(198,59,54,0),24 }, + { IPv4(198,59,55,0),24 }, + { IPv4(198,59,59,0),24 }, + { IPv4(198,59,61,0),24 }, + { IPv4(198,59,64,0),19 }, + { IPv4(198,59,69,0),24 }, + { IPv4(198,59,70,0),24 }, + { IPv4(198,59,81,0),24 }, + { IPv4(198,59,82,0),24 }, + { IPv4(198,59,83,0),24 }, + { IPv4(198,59,87,0),24 }, + { IPv4(198,59,89,0),24 }, + { IPv4(198,59,93,0),24 }, + { IPv4(198,60,0,0),18 }, + { IPv4(198,60,1,0),24 }, + { IPv4(198,60,3,0),24 }, + { IPv4(198,60,4,0),24 }, + { IPv4(198,60,5,0),24 }, + { IPv4(198,60,9,0),24 }, + { IPv4(198,60,17,0),24 }, + { IPv4(198,60,22,0),24 }, + { IPv4(198,60,64,0),19 }, + { IPv4(198,60,72,0),22 }, + { IPv4(198,60,80,0),23 }, + { IPv4(198,60,82,0),24 }, + { IPv4(198,60,84,0),24 }, + { IPv4(198,60,85,0),24 }, + { IPv4(198,60,86,0),23 }, + { IPv4(198,60,88,0),22 }, + { IPv4(198,60,92,0),24 }, + { IPv4(198,60,93,0),24 }, + { IPv4(198,60,94,0),24 }, + { IPv4(198,60,95,0),24 }, + { IPv4(198,60,96,0),20 }, + { IPv4(198,60,96,0),24 }, + { IPv4(198,60,97,0),24 }, + { IPv4(198,60,98,0),24 }, + { IPv4(198,60,99,0),24 }, + { IPv4(198,60,100,0),24 }, + { IPv4(198,60,101,0),24 }, + { IPv4(198,60,102,0),24 }, + { IPv4(198,60,103,0),24 }, + { IPv4(198,60,104,0),24 }, + { IPv4(198,60,105,0),24 }, + { IPv4(198,60,109,0),24 }, + { IPv4(198,60,110,0),24 }, + { IPv4(198,60,112,0),21 }, + { IPv4(198,60,114,0),24 }, + { IPv4(198,60,121,0),24 }, + { IPv4(198,60,122,0),23 }, + { IPv4(198,60,124,0),22 }, + { IPv4(198,60,129,0),24 }, + { IPv4(198,60,132,0),24 }, + { IPv4(198,60,143,0),24 }, + { IPv4(198,60,144,0),20 }, + { IPv4(198,60,148,0),24 }, + { IPv4(198,60,152,0),24 }, + { IPv4(198,60,159,0),24 }, + { IPv4(198,60,160,0),19 }, + { IPv4(198,60,183,0),24 }, + { IPv4(198,60,186,0),24 }, + { IPv4(198,60,195,0),24 }, + { IPv4(198,60,217,0),24 }, + { IPv4(198,60,218,0),23 }, + { IPv4(198,60,220,0),22 }, + { IPv4(198,60,224,0),22 }, + { IPv4(198,60,251,0),24 }, + { IPv4(198,61,16,0),20 }, + { IPv4(198,62,8,0),24 }, + { IPv4(198,62,9,0),24 }, + { IPv4(198,62,10,0),24 }, + { IPv4(198,62,11,0),24 }, + { IPv4(198,62,64,0),24 }, + { IPv4(198,62,65,0),24 }, + { IPv4(198,62,66,0),24 }, + { IPv4(198,62,106,0),24 }, + { IPv4(198,62,112,0),24 }, + { IPv4(198,62,120,0),23 }, + { IPv4(198,62,142,0),24 }, + { IPv4(198,62,155,0),24 }, + { IPv4(198,62,160,0),24 }, + { IPv4(198,62,186,0),24 }, + { IPv4(198,62,187,0),24 }, + { IPv4(198,62,198,0),24 }, + { IPv4(198,62,205,0),24 }, + { IPv4(198,62,209,0),24 }, + { IPv4(198,62,210,0),24 }, + { IPv4(198,62,212,0),24 }, + { IPv4(198,62,230,0),24 }, + { IPv4(198,62,231,0),24 }, + { IPv4(198,62,232,0),24 }, + { IPv4(198,62,233,0),24 }, + { IPv4(198,62,242,0),24 }, + { IPv4(198,62,246,0),23 }, + { IPv4(198,62,248,0),23 }, + { IPv4(198,62,250,0),24 }, + { IPv4(198,63,0,0),16 }, + { IPv4(198,63,24,0),24 }, + { IPv4(198,63,193,0),24 }, + { IPv4(198,63,227,0),24 }, + { IPv4(198,64,0,0),15 }, + { IPv4(198,64,127,0),24 }, + { IPv4(198,65,199,0),24 }, + { IPv4(198,66,0,0),16 }, + { IPv4(198,67,15,0),24 }, + { IPv4(198,67,33,0),24 }, + { IPv4(198,67,38,0),24 }, + { IPv4(198,68,64,0),18 }, + { IPv4(198,68,128,0),20 }, + { IPv4(198,68,144,0),20 }, + { IPv4(198,68,164,0),22 }, + { IPv4(198,68,168,0),21 }, + { IPv4(198,68,181,0),24 }, + { IPv4(198,68,193,0),24 }, + { IPv4(198,68,224,0),20 }, + { IPv4(198,69,2,0),23 }, + { IPv4(198,69,26,0),24 }, + { IPv4(198,69,80,0),23 }, + { IPv4(198,69,82,0),24 }, + { IPv4(198,69,84,0),22 }, + { IPv4(198,69,88,0),21 }, + { IPv4(198,69,90,0),24 }, + { IPv4(198,69,131,0),24 }, + { IPv4(198,69,134,0),24 }, + { IPv4(198,69,184,0),23 }, + { IPv4(198,69,186,0),23 }, + { IPv4(198,69,188,0),22 }, + { IPv4(198,69,191,0),24 }, + { IPv4(198,70,176,0),20 }, + { IPv4(198,70,195,0),24 }, + { IPv4(198,70,196,0),22 }, + { IPv4(198,70,209,0),24 }, + { IPv4(198,70,220,0),23 }, + { IPv4(198,70,222,0),23 }, + { IPv4(198,70,224,0),20 }, + { IPv4(198,70,240,0),23 }, + { IPv4(198,70,242,0),24 }, + { IPv4(198,70,243,0),24 }, + { IPv4(198,70,244,0),23 }, + { IPv4(198,72,0,0),22 }, + { IPv4(198,72,5,0),24 }, + { IPv4(198,72,8,0),22 }, + { IPv4(198,72,12,0),24 }, + { IPv4(198,72,32,0),21 }, + { IPv4(198,72,40,0),23 }, + { IPv4(198,72,64,0),21 }, + { IPv4(198,72,72,0),22 }, + { IPv4(198,73,137,0),24 }, + { IPv4(198,73,138,0),24 }, + { IPv4(198,73,139,0),24 }, + { IPv4(198,73,176,0),24 }, + { IPv4(198,73,190,0),24 }, + { IPv4(198,73,248,0),24 }, + { IPv4(198,73,249,0),24 }, + { IPv4(198,73,252,0),24 }, + { IPv4(198,73,253,0),24 }, + { IPv4(198,74,16,0),24 }, + { IPv4(198,74,18,0),24 }, + { IPv4(198,74,20,0),24 }, + { IPv4(198,74,22,0),24 }, + { IPv4(198,74,24,0),24 }, + { IPv4(198,74,25,0),24 }, + { IPv4(198,74,26,0),24 }, + { IPv4(198,74,32,0),21 }, + { IPv4(198,74,40,0),23 }, + { IPv4(198,76,23,0),24 }, + { IPv4(198,76,29,0),24 }, + { IPv4(198,76,30,0),24 }, + { IPv4(198,76,31,0),24 }, + { IPv4(198,76,126,0),23 }, + { IPv4(198,76,162,0),24 }, + { IPv4(198,76,176,0),23 }, + { IPv4(198,76,178,0),24 }, + { IPv4(198,77,0,0),18 }, + { IPv4(198,77,32,0),23 }, + { IPv4(198,77,54,0),23 }, + { IPv4(198,77,56,0),22 }, + { IPv4(198,77,66,0),24 }, + { IPv4(198,77,86,0),23 }, + { IPv4(198,77,104,0),24 }, + { IPv4(198,77,105,0),24 }, + { IPv4(198,77,106,0),24 }, + { IPv4(198,77,110,0),24 }, + { IPv4(198,77,112,0),22 }, + { IPv4(198,77,136,0),24 }, + { IPv4(198,77,248,0),23 }, + { IPv4(198,78,8,0),21 }, + { IPv4(198,78,80,0),20 }, + { IPv4(198,78,96,0),20 }, + { IPv4(198,78,137,0),24 }, + { IPv4(198,78,138,0),24 }, + { IPv4(198,78,224,0),20 }, + { IPv4(198,79,24,0),22 }, + { IPv4(198,79,88,0),21 }, + { IPv4(198,80,0,0),23 }, + { IPv4(198,80,15,0),24 }, + { IPv4(198,80,20,0),24 }, + { IPv4(198,80,56,0),24 }, + { IPv4(198,80,57,0),24 }, + { IPv4(198,80,58,0),24 }, + { IPv4(198,80,59,0),24 }, + { IPv4(198,80,68,0),22 }, + { IPv4(198,80,88,0),21 }, + { IPv4(198,80,129,0),24 }, + { IPv4(198,80,130,0),24 }, + { IPv4(198,80,132,0),24 }, + { IPv4(198,80,135,0),24 }, + { IPv4(198,80,136,0),24 }, + { IPv4(198,80,137,0),24 }, + { IPv4(198,80,138,0),24 }, + { IPv4(198,80,139,0),24 }, + { IPv4(198,80,140,0),24 }, + { IPv4(198,80,142,0),24 }, + { IPv4(198,80,143,0),24 }, + { IPv4(198,80,144,0),24 }, + { IPv4(198,80,145,0),24 }, + { IPv4(198,80,146,0),24 }, + { IPv4(198,80,151,0),24 }, + { IPv4(198,80,152,0),24 }, + { IPv4(198,80,153,0),24 }, + { IPv4(198,80,155,0),24 }, + { IPv4(198,80,156,0),24 }, + { IPv4(198,80,157,0),24 }, + { IPv4(198,80,159,0),24 }, + { IPv4(198,80,160,0),24 }, + { IPv4(198,80,162,0),24 }, + { IPv4(198,80,164,0),24 }, + { IPv4(198,80,165,0),24 }, + { IPv4(198,80,167,0),24 }, + { IPv4(198,80,169,0),24 }, + { IPv4(198,80,170,0),24 }, + { IPv4(198,80,171,0),24 }, + { IPv4(198,80,172,0),24 }, + { IPv4(198,80,173,0),24 }, + { IPv4(198,80,174,0),24 }, + { IPv4(198,80,178,0),24 }, + { IPv4(198,80,180,0),24 }, + { IPv4(198,80,182,0),24 }, + { IPv4(198,80,183,0),24 }, + { IPv4(198,80,185,0),24 }, + { IPv4(198,80,186,0),24 }, + { IPv4(198,80,187,0),24 }, + { IPv4(198,80,189,0),24 }, + { IPv4(198,80,191,0),24 }, + { IPv4(198,81,0,0),19 }, + { IPv4(198,81,4,0),22 }, + { IPv4(198,81,16,0),20 }, + { IPv4(198,81,200,0),24 }, + { IPv4(198,81,230,0),24 }, + { IPv4(198,81,240,0),24 }, + { IPv4(198,83,19,0),24 }, + { IPv4(198,83,28,0),22 }, + { IPv4(198,83,112,0),20 }, + { IPv4(198,83,130,0),24 }, + { IPv4(198,84,16,0),20 }, + { IPv4(198,84,51,0),24 }, + { IPv4(198,84,52,0),24 }, + { IPv4(198,85,74,0),23 }, + { IPv4(198,85,116,0),24 }, + { IPv4(198,87,0,0),16 }, + { IPv4(198,88,0,0),16 }, + { IPv4(198,89,35,0),24 }, + { IPv4(198,89,36,0),24 }, + { IPv4(198,89,37,0),24 }, + { IPv4(198,89,138,0),24 }, + { IPv4(198,89,159,0),24 }, + { IPv4(198,89,160,0),24 }, + { IPv4(198,91,64,0),24 }, + { IPv4(198,91,65,0),24 }, + { IPv4(198,91,66,0),24 }, + { IPv4(198,91,67,0),24 }, + { IPv4(198,91,70,0),24 }, + { IPv4(198,91,71,0),24 }, + { IPv4(198,91,73,0),24 }, + { IPv4(198,92,64,0),22 }, + { IPv4(198,92,104,0),21 }, + { IPv4(198,92,156,0),23 }, + { IPv4(198,92,208,0),22 }, + { IPv4(198,93,92,0),22 }, + { IPv4(198,93,108,0),24 }, + { IPv4(198,93,109,0),24 }, + { IPv4(198,93,110,0),24 }, + { IPv4(198,93,111,0),24 }, + { IPv4(198,93,134,0),23 }, + { IPv4(198,93,136,0),22 }, + { IPv4(198,94,128,0),21 }, + { IPv4(198,95,8,0),23 }, + { IPv4(198,95,10,0),24 }, + { IPv4(198,95,64,0),21 }, + { IPv4(198,95,248,0),22 }, + { IPv4(198,96,2,0),24 }, + { IPv4(198,96,3,0),24 }, + { IPv4(198,96,18,0),23 }, + { IPv4(198,96,46,0),23 }, + { IPv4(198,96,48,0),21 }, + { IPv4(198,96,56,0),22 }, + { IPv4(198,96,60,0),24 }, + { IPv4(198,96,62,0),24 }, + { IPv4(198,96,80,0),22 }, + { IPv4(198,96,113,0),24 }, + { IPv4(198,96,127,0),24 }, + { IPv4(198,96,131,0),24 }, + { IPv4(198,96,185,0),24 }, + { IPv4(198,96,188,0),24 }, + { IPv4(198,96,199,0),24 }, + { IPv4(198,96,223,0),24 }, + { IPv4(198,96,251,0),24 }, + { IPv4(198,97,44,0),24 }, + { IPv4(198,97,52,0),23 }, + { IPv4(198,97,67,0),24 }, + { IPv4(198,97,70,0),23 }, + { IPv4(198,97,70,0),24 }, + { IPv4(198,97,72,0),21 }, + { IPv4(198,97,72,0),24 }, + { IPv4(198,97,79,0),24 }, + { IPv4(198,97,80,0),24 }, + { IPv4(198,97,80,0),20 }, + { IPv4(198,97,81,0),24 }, + { IPv4(198,97,82,0),23 }, + { IPv4(198,97,84,0),22 }, + { IPv4(198,97,88,0),24 }, + { IPv4(198,97,93,0),24 }, + { IPv4(198,97,96,0),19 }, + { IPv4(198,97,108,0),24 }, + { IPv4(198,97,110,0),24 }, + { IPv4(198,97,128,0),18 }, + { IPv4(198,97,135,0),24 }, + { IPv4(198,97,138,0),24 }, + { IPv4(198,97,143,0),24 }, + { IPv4(198,97,144,0),24 }, + { IPv4(198,97,151,0),24 }, + { IPv4(198,97,155,0),24 }, + { IPv4(198,97,192,0),20 }, + { IPv4(198,97,208,0),24 }, + { IPv4(198,97,208,0),23 }, + { IPv4(198,97,209,0),24 }, + { IPv4(198,97,234,0),23 }, + { IPv4(198,97,236,0),24 }, + { IPv4(198,97,240,0),20 }, + { IPv4(198,98,83,0),24 }, + { IPv4(198,99,85,0),24 }, + { IPv4(198,99,88,0),24 }, + { IPv4(198,99,89,0),24 }, + { IPv4(198,99,90,0),24 }, + { IPv4(198,99,106,0),24 }, + { IPv4(198,99,107,0),24 }, + { IPv4(198,99,108,0),24 }, + { IPv4(198,99,110,0),24 }, + { IPv4(198,99,115,0),24 }, + { IPv4(198,99,146,0),24 }, + { IPv4(198,99,191,0),24 }, + { IPv4(198,99,201,0),24 }, + { IPv4(198,99,225,0),24 }, + { IPv4(198,99,239,0),24 }, + { IPv4(198,99,244,0),24 }, + { IPv4(198,101,4,0),22 }, + { IPv4(198,101,23,0),24 }, + { IPv4(198,101,24,0),24 }, + { IPv4(198,101,32,0),20 }, + { IPv4(198,102,1,0),24 }, + { IPv4(198,102,2,0),23 }, + { IPv4(198,102,66,0),24 }, + { IPv4(198,102,67,0),24 }, + { IPv4(198,102,85,0),24 }, + { IPv4(198,102,86,0),23 }, + { IPv4(198,102,88,0),24 }, + { IPv4(198,102,91,0),24 }, + { IPv4(198,102,103,0),24 }, + { IPv4(198,102,112,0),24 }, + { IPv4(198,102,117,0),24 }, + { IPv4(198,102,147,0),24 }, + { IPv4(198,102,157,0),24 }, + { IPv4(198,102,172,0),24 }, + { IPv4(198,102,186,0),23 }, + { IPv4(198,102,188,0),23 }, + { IPv4(198,102,188,0),22 }, + { IPv4(198,102,190,0),23 }, + { IPv4(198,102,192,0),23 }, + { IPv4(198,102,192,0),22 }, + { IPv4(198,102,194,0),23 }, + { IPv4(198,102,196,0),24 }, + { IPv4(198,102,196,0),23 }, + { IPv4(198,102,198,0),23 }, + { IPv4(198,102,201,0),24 }, + { IPv4(198,102,206,0),24 }, + { IPv4(198,102,211,0),24 }, + { IPv4(198,102,244,0),24 }, + { IPv4(198,102,253,0),24 }, + { IPv4(198,102,254,0),24 }, + { IPv4(198,103,0,0),16 }, + { IPv4(198,103,1,0),24 }, + { IPv4(198,103,15,0),24 }, + { IPv4(198,103,18,0),24 }, + { IPv4(198,103,22,0),24 }, + { IPv4(198,103,37,0),24 }, + { IPv4(198,103,41,0),24 }, + { IPv4(198,103,42,0),24 }, + { IPv4(198,103,45,0),24 }, + { IPv4(198,103,49,0),24 }, + { IPv4(198,103,53,0),24 }, + { IPv4(198,103,55,0),24 }, + { IPv4(198,103,56,0),24 }, + { IPv4(198,103,61,0),24 }, + { IPv4(198,103,63,0),24 }, + { IPv4(198,103,92,0),24 }, + { IPv4(198,103,93,0),24 }, + { IPv4(198,103,94,0),24 }, + { IPv4(198,103,95,0),24 }, + { IPv4(198,103,96,0),24 }, + { IPv4(198,103,97,0),24 }, + { IPv4(198,103,98,0),24 }, + { IPv4(198,103,99,0),24 }, + { IPv4(198,103,101,0),24 }, + { IPv4(198,103,103,0),24 }, + { IPv4(198,103,104,0),24 }, + { IPv4(198,103,108,0),24 }, + { IPv4(198,103,109,0),24 }, + { IPv4(198,103,111,0),24 }, + { IPv4(198,103,138,0),24 }, + { IPv4(198,103,140,0),24 }, + { IPv4(198,103,143,0),24 }, + { IPv4(198,103,145,0),24 }, + { IPv4(198,103,146,0),24 }, + { IPv4(198,103,147,0),24 }, + { IPv4(198,103,152,0),24 }, + { IPv4(198,103,153,0),24 }, + { IPv4(198,103,154,0),24 }, + { IPv4(198,103,161,0),24 }, + { IPv4(198,103,162,0),24 }, + { IPv4(198,103,164,0),24 }, + { IPv4(198,103,167,0),24 }, + { IPv4(198,103,169,0),24 }, + { IPv4(198,103,171,0),24 }, + { IPv4(198,103,172,0),24 }, + { IPv4(198,103,174,0),24 }, + { IPv4(198,103,176,0),24 }, + { IPv4(198,103,177,0),24 }, + { IPv4(198,103,180,0),24 }, + { IPv4(198,103,185,0),24 }, + { IPv4(198,103,186,0),24 }, + { IPv4(198,103,191,0),24 }, + { IPv4(198,103,193,0),24 }, + { IPv4(198,103,194,0),24 }, + { IPv4(198,103,195,0),24 }, + { IPv4(198,103,196,0),24 }, + { IPv4(198,103,198,0),24 }, + { IPv4(198,103,206,0),24 }, + { IPv4(198,103,208,0),24 }, + { IPv4(198,103,211,0),24 }, + { IPv4(198,103,213,0),24 }, + { IPv4(198,103,214,0),24 }, + { IPv4(198,103,215,0),24 }, + { IPv4(198,103,216,0),24 }, + { IPv4(198,103,217,0),24 }, + { IPv4(198,103,218,0),24 }, + { IPv4(198,103,220,0),24 }, + { IPv4(198,103,222,0),24 }, + { IPv4(198,103,234,0),24 }, + { IPv4(198,103,235,0),24 }, + { IPv4(198,103,236,0),24 }, + { IPv4(198,103,237,0),24 }, + { IPv4(198,103,238,0),24 }, + { IPv4(198,103,241,0),24 }, + { IPv4(198,103,242,0),24 }, + { IPv4(198,103,244,0),24 }, + { IPv4(198,103,245,0),24 }, + { IPv4(198,103,248,0),24 }, + { IPv4(198,103,249,0),24 }, + { IPv4(198,103,250,0),24 }, + { IPv4(198,104,0,0),16 }, + { IPv4(198,105,2,0),24 }, + { IPv4(198,105,32,0),20 }, + { IPv4(198,105,64,0),20 }, + { IPv4(198,106,0,0),15 }, + { IPv4(198,108,16,0),22 }, + { IPv4(198,111,96,0),24 }, + { IPv4(198,112,169,0),24 }, + { IPv4(198,112,200,0),23 }, + { IPv4(198,113,60,0),24 }, + { IPv4(198,113,61,0),24 }, + { IPv4(198,116,0,0),14 }, + { IPv4(198,118,206,0),24 }, + { IPv4(198,119,23,0),24 }, + { IPv4(198,119,24,0),24 }, + { IPv4(198,119,27,0),25 }, + { IPv4(198,120,0,0),14 }, + { IPv4(198,124,0,0),14 }, + { IPv4(198,128,0,0),14 }, + { IPv4(198,133,16,0),24 }, + { IPv4(198,133,16,0),20 }, + { IPv4(198,133,36,0),24 }, + { IPv4(198,133,77,0),24 }, + { IPv4(198,133,79,0),24 }, + { IPv4(198,133,120,0),24 }, + { IPv4(198,133,123,0),24 }, + { IPv4(198,133,146,0),24 }, + { IPv4(198,133,170,0),24 }, + { IPv4(198,133,178,0),23 }, + { IPv4(198,133,180,0),22 }, + { IPv4(198,133,185,0),24 }, + { IPv4(198,133,198,0),24 }, + { IPv4(198,133,199,0),24 }, + { IPv4(198,133,206,0),24 }, + { IPv4(198,133,219,0),24 }, + { IPv4(198,133,233,0),24 }, + { IPv4(198,133,237,0),24 }, + { IPv4(198,133,242,0),24 }, + { IPv4(198,133,244,0),23 }, + { IPv4(198,133,246,0),24 }, + { IPv4(198,134,143,0),24 }, + { IPv4(198,134,148,0),24 }, + { IPv4(198,134,158,0),23 }, + { IPv4(198,134,196,0),24 }, + { IPv4(198,135,0,0),22 }, + { IPv4(198,135,4,0),22 }, + { IPv4(198,135,68,0),24 }, + { IPv4(198,135,78,0),24 }, + { IPv4(198,135,110,0),24 }, + { IPv4(198,135,118,0),23 }, + { IPv4(198,135,153,0),24 }, + { IPv4(198,135,222,0),24 }, + { IPv4(198,136,8,0),21 }, + { IPv4(198,136,139,0),24 }, + { IPv4(198,136,160,0),24 }, + { IPv4(198,136,186,0),24 }, + { IPv4(198,136,201,0),24 }, + { IPv4(198,136,226,0),24 }, + { IPv4(198,136,229,0),24 }, + { IPv4(198,136,233,0),24 }, + { IPv4(198,136,243,0),24 }, + { IPv4(198,136,250,0),24 }, + { IPv4(198,137,70,0),24 }, + { IPv4(198,137,99,0),24 }, + { IPv4(198,137,140,0),24 }, + { IPv4(198,137,142,0),24 }, + { IPv4(198,137,143,0),24 }, + { IPv4(198,137,147,0),24 }, + { IPv4(198,137,151,0),24 }, + { IPv4(198,137,152,0),23 }, + { IPv4(198,137,170,0),24 }, + { IPv4(198,137,181,0),24 }, + { IPv4(198,137,182,0),23 }, + { IPv4(198,137,186,0),24 }, + { IPv4(198,137,187,0),24 }, + { IPv4(198,137,194,0),24 }, + { IPv4(198,137,199,0),24 }, + { IPv4(198,137,200,0),24 }, + { IPv4(198,137,202,0),24 }, + { IPv4(198,137,221,0),24 }, + { IPv4(198,137,249,0),24 }, + { IPv4(198,137,254,0),24 }, + { IPv4(198,138,0,0),15 }, + { IPv4(198,138,53,0),24 }, + { IPv4(198,138,54,0),23 }, + { IPv4(198,138,56,0),22 }, + { IPv4(198,138,60,0),24 }, + { IPv4(198,138,103,0),24 }, + { IPv4(198,139,122,0),24 }, + { IPv4(198,139,128,0),24 }, + { IPv4(198,139,237,0),24 }, + { IPv4(198,140,0,0),22 }, + { IPv4(198,140,58,0),23 }, + { IPv4(198,140,63,0),24 }, + { IPv4(198,140,134,0),24 }, + { IPv4(198,140,179,0),24 }, + { IPv4(198,140,180,0),24 }, + { IPv4(198,140,189,0),24 }, + { IPv4(198,140,215,0),24 }, + { IPv4(198,143,8,0),24 }, + { IPv4(198,143,13,0),24 }, + { IPv4(198,143,16,0),24 }, + { IPv4(198,143,17,0),24 }, + { IPv4(198,143,18,0),24 }, + { IPv4(198,143,19,0),24 }, + { IPv4(198,143,20,0),24 }, + { IPv4(198,143,21,0),24 }, + { IPv4(198,143,22,0),24 }, + { IPv4(198,143,24,0),24 }, + { IPv4(198,144,128,0),20 }, + { IPv4(198,144,135,0),24 }, + { IPv4(198,144,192,0),20 }, + { IPv4(198,147,0,0),20 }, + { IPv4(198,147,37,0),24 }, + { IPv4(198,147,38,0),24 }, + { IPv4(198,147,75,0),24 }, + { IPv4(198,147,81,0),24 }, + { IPv4(198,147,91,0),24 }, + { IPv4(198,147,128,0),24 }, + { IPv4(198,147,137,0),24 }, + { IPv4(198,147,142,0),23 }, + { IPv4(198,147,147,0),24 }, + { IPv4(198,147,150,0),24 }, + { IPv4(198,147,151,0),24 }, + { IPv4(198,147,157,0),24 }, + { IPv4(198,147,162,0),24 }, + { IPv4(198,147,175,0),24 }, + { IPv4(198,147,200,0),23 }, + { IPv4(198,147,219,0),24 }, + { IPv4(198,147,224,0),24 }, + { IPv4(198,147,246,0),24 }, + { IPv4(198,148,166,0),24 }, + { IPv4(198,148,175,0),24 }, + { IPv4(198,148,190,0),24 }, + { IPv4(198,148,205,0),24 }, + { IPv4(198,148,206,0),24 }, + { IPv4(198,148,209,0),24 }, + { IPv4(198,148,239,0),24 }, + { IPv4(198,148,251,0),24 }, + { IPv4(198,149,2,0),24 }, + { IPv4(198,149,172,0),22 }, + { IPv4(198,151,130,0),24 }, + { IPv4(198,151,137,0),24 }, + { IPv4(198,151,139,0),24 }, + { IPv4(198,151,149,0),24 }, + { IPv4(198,151,160,0),24 }, + { IPv4(198,151,170,0),24 }, + { IPv4(198,151,171,0),24 }, + { IPv4(198,151,172,0),24 }, + { IPv4(198,151,175,0),24 }, + { IPv4(198,151,200,0),22 }, + { IPv4(198,151,212,0),24 }, + { IPv4(198,151,230,0),23 }, + { IPv4(198,151,248,0),24 }, + { IPv4(198,152,185,0),24 }, + { IPv4(198,153,8,0),21 }, + { IPv4(198,153,20,0),22 }, + { IPv4(198,153,31,0),24 }, + { IPv4(198,153,132,0),24 }, + { IPv4(198,153,146,0),24 }, + { IPv4(198,153,152,0),24 }, + { IPv4(198,153,219,0),24 }, + { IPv4(198,153,232,0),24 }, + { IPv4(198,154,2,0),23 }, + { IPv4(198,154,8,0),21 }, + { IPv4(198,154,16,0),24 }, + { IPv4(198,154,18,0),24 }, + { IPv4(198,154,19,0),24 }, + { IPv4(198,154,20,0),24 }, + { IPv4(198,154,21,0),24 }, + { IPv4(198,154,22,0),24 }, + { IPv4(198,154,23,0),24 }, + { IPv4(198,154,24,0),23 }, + { IPv4(198,154,24,0),24 }, + { IPv4(198,154,25,0),24 }, + { IPv4(198,154,64,0),21 }, + { IPv4(198,154,72,0),22 }, + { IPv4(198,154,77,0),24 }, + { IPv4(198,154,128,0),19 }, + { IPv4(198,154,150,0),24 }, + { IPv4(198,154,160,0),20 }, + { IPv4(198,154,173,0),24 }, + { IPv4(198,154,174,0),23 }, + { IPv4(198,154,176,0),23 }, + { IPv4(198,155,0,0),16 }, + { IPv4(198,160,18,0),24 }, + { IPv4(198,160,140,0),24 }, + { IPv4(198,160,177,0),24 }, + { IPv4(198,160,178,0),24 }, + { IPv4(198,160,179,0),24 }, + { IPv4(198,160,180,0),24 }, + { IPv4(198,160,196,0),24 }, + { IPv4(198,160,197,0),24 }, + { IPv4(198,160,246,0),24 }, + { IPv4(198,160,250,0),24 }, + { IPv4(198,160,252,0),24 }, + { IPv4(198,161,2,0),24 }, + { IPv4(198,161,22,0),24 }, + { IPv4(198,161,23,0),24 }, + { IPv4(198,161,82,0),24 }, + { IPv4(198,161,83,0),24 }, + { IPv4(198,161,180,0),24 }, + { IPv4(198,161,208,0),24 }, + { IPv4(198,161,210,0),24 }, + { IPv4(198,161,211,0),24 }, + { IPv4(198,161,216,0),24 }, + { IPv4(198,161,246,0),23 }, + { IPv4(198,162,70,0),24 }, + { IPv4(198,162,158,0),23 }, + { IPv4(198,162,232,0),22 }, + { IPv4(198,163,115,0),24 }, + { IPv4(198,163,184,0),21 }, + { IPv4(198,163,192,0),21 }, + { IPv4(198,163,200,0),21 }, + { IPv4(198,164,3,0),24 }, + { IPv4(198,164,7,0),24 }, + { IPv4(198,165,18,0),24 }, + { IPv4(198,165,39,0),24 }, + { IPv4(198,165,53,0),24 }, + { IPv4(198,165,56,0),23 }, + { IPv4(198,165,59,0),24 }, + { IPv4(198,165,60,0),23 }, + { IPv4(198,165,62,0),24 }, + { IPv4(198,165,72,0),23 }, + { IPv4(198,165,162,0),23 }, + { IPv4(198,165,185,0),24 }, + { IPv4(198,167,160,0),24 }, + { IPv4(198,167,161,0),24 }, + { IPv4(198,167,162,0),24 }, + { IPv4(198,167,163,0),24 }, + { IPv4(198,169,171,0),24 }, + { IPv4(198,169,181,0),24 }, + { IPv4(198,169,182,0),24 }, + { IPv4(198,169,183,0),24 }, + { IPv4(198,169,184,0),24 }, + { IPv4(198,170,0,0),15 }, + { IPv4(198,170,186,0),24 }, + { IPv4(198,170,208,0),24 }, + { IPv4(198,172,0,0),15 }, + { IPv4(198,174,0,0),16 }, + { IPv4(198,174,1,0),24 }, + { IPv4(198,174,2,0),23 }, + { IPv4(198,174,4,0),23 }, + { IPv4(198,174,6,0),24 }, + { IPv4(198,174,8,0),24 }, + { IPv4(198,174,48,0),24 }, + { IPv4(198,174,49,0),24 }, + { IPv4(198,174,50,0),24 }, + { IPv4(198,174,51,0),24 }, + { IPv4(198,174,52,0),24 }, + { IPv4(198,174,55,0),24 }, + { IPv4(198,174,65,0),24 }, + { IPv4(198,174,66,0),23 }, + { IPv4(198,174,68,0),22 }, + { IPv4(198,174,72,0),21 }, + { IPv4(198,174,80,0),20 }, + { IPv4(198,174,120,0),24 }, + { IPv4(198,174,121,0),24 }, + { IPv4(198,174,122,0),23 }, + { IPv4(198,174,124,0),22 }, + { IPv4(198,174,127,0),24 }, + { IPv4(198,174,128,0),22 }, + { IPv4(198,174,128,0),24 }, + { IPv4(198,174,132,0),24 }, + { IPv4(198,174,169,0),24 }, + { IPv4(198,174,176,0),20 }, + { IPv4(198,174,217,0),24 }, + { IPv4(198,174,218,0),23 }, + { IPv4(198,174,220,0),22 }, + { IPv4(198,174,224,0),21 }, + { IPv4(198,174,232,0),24 }, + { IPv4(198,175,9,0),24 }, + { IPv4(198,175,11,0),24 }, + { IPv4(198,175,14,0),24 }, + { IPv4(198,175,47,0),24 }, + { IPv4(198,175,48,0),24 }, + { IPv4(198,175,49,0),24 }, + { IPv4(198,175,56,0),24 }, + { IPv4(198,175,57,0),24 }, + { IPv4(198,175,60,0),24 }, + { IPv4(198,175,62,0),23 }, + { IPv4(198,175,62,0),24 }, + { IPv4(198,175,68,0),24 }, + { IPv4(198,175,70,0),23 }, + { IPv4(198,175,72,0),24 }, + { IPv4(198,175,76,0),24 }, + { IPv4(198,175,149,0),24 }, + { IPv4(198,175,158,0),24 }, + { IPv4(198,175,187,0),24 }, + { IPv4(198,175,194,0),23 }, + { IPv4(198,175,196,0),22 }, + { IPv4(198,175,202,0),24 }, + { IPv4(198,175,203,0),24 }, + { IPv4(198,175,204,0),24 }, + { IPv4(198,175,212,0),22 }, + { IPv4(198,175,236,0),24 }, + { IPv4(198,175,240,0),24 }, + { IPv4(198,175,250,0),24 }, + { IPv4(198,176,16,0),24 }, + { IPv4(198,176,17,0),24 }, + { IPv4(198,176,20,0),24 }, + { IPv4(198,176,21,0),24 }, + { IPv4(198,176,160,0),24 }, + { IPv4(198,176,170,0),24 }, + { IPv4(198,176,174,0),24 }, + { IPv4(198,176,184,0),24 }, + { IPv4(198,176,193,0),24 }, + { IPv4(198,176,199,0),24 }, + { IPv4(198,176,204,0),24 }, + { IPv4(198,176,217,0),24 }, + { IPv4(198,176,225,0),24 }, + { IPv4(198,176,247,0),24 }, + { IPv4(198,177,11,0),24 }, + { IPv4(198,177,12,0),24 }, + { IPv4(198,177,13,0),24 }, + { IPv4(198,177,14,0),24 }, + { IPv4(198,177,15,0),24 }, + { IPv4(198,177,32,0),20 }, + { IPv4(198,177,48,0),22 }, + { IPv4(198,177,169,0),24 }, + { IPv4(198,177,170,0),24 }, + { IPv4(198,177,171,0),24 }, + { IPv4(198,177,172,0),24 }, + { IPv4(198,177,173,0),24 }, + { IPv4(198,177,174,0),24 }, + { IPv4(198,177,180,0),23 }, + { IPv4(198,177,181,0),24 }, + { IPv4(198,177,191,0),24 }, + { IPv4(198,177,192,0),22 }, + { IPv4(198,177,196,0),23 }, + { IPv4(198,177,224,0),24 }, + { IPv4(198,177,229,0),24 }, + { IPv4(198,178,8,0),24 }, + { IPv4(198,178,9,0),24 }, + { IPv4(198,178,32,0),20 }, + { IPv4(198,178,32,0),21 }, + { IPv4(198,178,40,0),21 }, + { IPv4(198,178,48,0),22 }, + { IPv4(198,178,48,0),21 }, + { IPv4(198,178,52,0),22 }, + { IPv4(198,178,129,0),24 }, + { IPv4(198,178,148,0),24 }, + { IPv4(198,178,167,0),24 }, + { IPv4(198,178,186,0),24 }, + { IPv4(198,178,215,0),24 }, + { IPv4(198,178,217,0),24 }, + { IPv4(198,178,226,0),24 }, + { IPv4(198,178,232,0),24 }, + { IPv4(198,178,234,0),23 }, + { IPv4(198,178,236,0),22 }, + { IPv4(198,178,254,0),24 }, + { IPv4(198,179,16,0),24 }, + { IPv4(198,179,128,0),24 }, + { IPv4(198,179,140,0),24 }, + { IPv4(198,179,169,0),24 }, + { IPv4(198,179,170,0),24 }, + { IPv4(198,179,171,0),24 }, + { IPv4(198,179,172,0),24 }, + { IPv4(198,179,173,0),24 }, + { IPv4(198,179,201,0),24 }, + { IPv4(198,179,208,0),24 }, + { IPv4(198,179,214,0),24 }, + { IPv4(198,179,232,0),24 }, + { IPv4(198,179,239,0),24 }, + { IPv4(198,179,246,0),24 }, + { IPv4(198,179,248,0),24 }, + { IPv4(198,180,16,0),20 }, + { IPv4(198,180,36,0),24 }, + { IPv4(198,180,49,0),24 }, + { IPv4(198,180,67,0),24 }, + { IPv4(198,180,129,0),24 }, + { IPv4(198,180,136,0),24 }, + { IPv4(198,180,141,0),24 }, + { IPv4(198,180,147,0),24 }, + { IPv4(198,180,161,0),24 }, + { IPv4(198,180,162,0),24 }, + { IPv4(198,180,182,0),24 }, + { IPv4(198,180,183,0),24 }, + { IPv4(198,180,191,0),24 }, + { IPv4(198,180,205,0),24 }, + { IPv4(198,180,215,0),24 }, + { IPv4(198,180,219,0),24 }, + { IPv4(198,180,225,0),24 }, + { IPv4(198,180,252,0),24 }, + { IPv4(198,181,4,0),22 }, + { IPv4(198,181,8,0),24 }, + { IPv4(198,181,17,0),24 }, + { IPv4(198,181,18,0),23 }, + { IPv4(198,181,156,0),24 }, + { IPv4(198,181,161,0),24 }, + { IPv4(198,181,175,0),24 }, + { IPv4(198,181,219,0),24 }, + { IPv4(198,181,242,0),24 }, + { IPv4(198,181,243,0),24 }, + { IPv4(198,181,250,0),24 }, + { IPv4(198,182,8,0),21 }, + { IPv4(198,182,16,0),24 }, + { IPv4(198,182,21,0),24 }, + { IPv4(198,182,24,0),24 }, + { IPv4(198,182,25,0),24 }, + { IPv4(198,182,26,0),24 }, + { IPv4(198,182,28,0),24 }, + { IPv4(198,182,31,0),24 }, + { IPv4(198,182,76,0),24 }, + { IPv4(198,182,88,0),24 }, + { IPv4(198,182,89,0),24 }, + { IPv4(198,182,90,0),24 }, + { IPv4(198,182,91,0),24 }, + { IPv4(198,182,96,0),24 }, + { IPv4(198,182,97,0),24 }, + { IPv4(198,182,98,0),24 }, + { IPv4(198,182,99,0),24 }, + { IPv4(198,182,106,0),24 }, + { IPv4(198,182,107,0),24 }, + { IPv4(198,182,130,0),24 }, + { IPv4(198,182,131,0),24 }, + { IPv4(198,182,132,0),24 }, + { IPv4(198,182,133,0),24 }, + { IPv4(198,182,134,0),24 }, + { IPv4(198,182,140,0),24 }, + { IPv4(198,182,176,0),22 }, + { IPv4(198,182,178,0),24 }, + { IPv4(198,182,180,0),23 }, + { IPv4(198,182,196,0),24 }, + { IPv4(198,182,200,0),24 }, + { IPv4(198,182,201,0),24 }, + { IPv4(198,182,220,0),24 }, + { IPv4(198,182,225,0),24 }, + { IPv4(198,182,239,0),24 }, + { IPv4(198,183,8,0),21 }, + { IPv4(198,183,10,0),24 }, + { IPv4(198,183,128,0),22 }, + { IPv4(198,183,139,0),24 }, + { IPv4(198,183,146,0),23 }, + { IPv4(198,183,157,0),24 }, + { IPv4(198,183,160,0),22 }, + { IPv4(198,183,164,0),24 }, + { IPv4(198,183,165,0),24 }, + { IPv4(198,183,166,0),24 }, + { IPv4(198,183,167,0),24 }, + { IPv4(198,183,217,0),24 }, + { IPv4(198,183,218,0),24 }, + { IPv4(198,183,241,0),24 }, + { IPv4(198,184,66,0),24 }, + { IPv4(198,184,69,0),24 }, + { IPv4(198,184,85,0),24 }, + { IPv4(198,184,93,0),24 }, + { IPv4(198,184,118,0),24 }, + { IPv4(198,184,121,0),24 }, + { IPv4(198,184,126,0),24 }, + { IPv4(198,184,127,0),24 }, + { IPv4(198,184,134,0),24 }, + { IPv4(198,184,147,0),24 }, + { IPv4(198,184,150,0),24 }, + { IPv4(198,184,152,0),23 }, + { IPv4(198,184,171,0),24 }, + { IPv4(198,184,210,0),24 }, + { IPv4(198,184,211,0),24 }, + { IPv4(198,184,227,0),24 }, + { IPv4(198,185,4,0),22 }, + { IPv4(198,185,10,0),24 }, + { IPv4(198,185,22,0),24 }, + { IPv4(198,185,70,0),24 }, + { IPv4(198,185,72,0),24 }, + { IPv4(198,185,73,0),24 }, + { IPv4(198,185,104,0),24 }, + { IPv4(198,185,133,0),24 }, + { IPv4(198,185,134,0),23 }, + { IPv4(198,185,136,0),23 }, + { IPv4(198,185,163,0),24 }, + { IPv4(198,185,184,0),24 }, + { IPv4(198,185,205,0),24 }, + { IPv4(198,185,207,0),24 }, + { IPv4(198,185,234,0),24 }, + { IPv4(198,185,236,0),24 }, + { IPv4(198,186,48,0),22 }, + { IPv4(198,186,52,0),24 }, + { IPv4(198,186,53,0),24 }, + { IPv4(198,186,63,0),24 }, + { IPv4(198,186,64,0),24 }, + { IPv4(198,186,145,0),24 }, + { IPv4(198,186,151,0),24 }, + { IPv4(198,186,160,0),24 }, + { IPv4(198,186,167,0),24 }, + { IPv4(198,186,184,0),24 }, + { IPv4(198,186,200,0),22 }, + { IPv4(198,186,212,0),23 }, + { IPv4(198,186,213,0),24 }, + { IPv4(198,186,214,0),24 }, + { IPv4(198,186,237,0),24 }, + { IPv4(198,187,135,0),24 }, + { IPv4(198,187,136,0),24 }, + { IPv4(198,187,156,0),24 }, + { IPv4(198,187,203,0),24 }, + { IPv4(198,187,204,0),24 }, + { IPv4(198,187,215,0),24 }, + { IPv4(198,187,216,0),24 }, + { IPv4(198,187,220,0),24 }, + { IPv4(198,187,247,0),24 }, + { IPv4(198,187,252,0),24 }, + { IPv4(198,188,0,0),16 }, + { IPv4(198,188,7,0),24 }, + { IPv4(198,188,8,0),24 }, + { IPv4(198,188,49,0),24 }, + { IPv4(198,188,50,0),24 }, + { IPv4(198,188,160,0),19 }, + { IPv4(198,188,192,0),20 }, + { IPv4(198,188,208,0),23 }, + { IPv4(198,188,211,0),24 }, + { IPv4(198,188,212,0),22 }, + { IPv4(198,188,216,0),21 }, + { IPv4(198,188,224,0),20 }, + { IPv4(198,188,240,0),21 }, + { IPv4(198,188,252,0),23 }, + { IPv4(198,189,0,0),16 }, + { IPv4(198,190,28,0),24 }, + { IPv4(198,190,147,0),24 }, + { IPv4(198,190,166,0),24 }, + { IPv4(198,190,182,0),24 }, + { IPv4(198,190,187,0),24 }, + { IPv4(198,190,195,0),24 }, + { IPv4(198,190,201,0),24 }, + { IPv4(198,190,216,0),24 }, + { IPv4(198,190,219,0),24 }, + { IPv4(198,190,247,0),24 }, + { IPv4(198,190,249,0),24 }, + { IPv4(198,190,250,0),23 }, + { IPv4(198,190,252,0),24 }, + { IPv4(198,199,9,0),24 }, + { IPv4(198,199,13,0),24 }, + { IPv4(198,199,128,0),24 }, + { IPv4(198,199,136,0),24 }, + { IPv4(198,199,168,0),24 }, + { IPv4(198,199,179,0),24 }, + { IPv4(198,199,187,0),24 }, + { IPv4(198,199,191,0),24 }, + { IPv4(198,199,199,0),24 }, + { IPv4(198,199,206,0),24 }, + { IPv4(198,199,219,0),24 }, + { IPv4(198,199,224,0),24 }, + { IPv4(198,199,237,0),24 }, + { IPv4(198,200,32,0),21 }, + { IPv4(198,200,139,0),24 }, + { IPv4(198,200,147,0),24 }, + { IPv4(198,200,171,0),24 }, + { IPv4(198,200,174,0),24 }, + { IPv4(198,200,182,0),24 }, + { IPv4(198,200,184,0),24 }, + { IPv4(198,200,195,0),24 }, + { IPv4(198,200,228,0),24 }, + { IPv4(198,201,4,0),23 }, + { IPv4(198,201,5,0),24 }, + { IPv4(198,201,6,0),24 }, + { IPv4(198,201,23,0),24 }, + { IPv4(198,202,33,0),24 }, + { IPv4(198,202,64,0),21 }, + { IPv4(198,202,64,0),18 }, + { IPv4(198,202,65,0),24 }, + { IPv4(198,202,66,0),24 }, + { IPv4(198,202,67,0),24 }, + { IPv4(198,202,68,0),24 }, + { IPv4(198,202,69,0),24 }, + { IPv4(198,202,70,0),24 }, + { IPv4(198,202,71,0),24 }, + { IPv4(198,202,72,0),21 }, + { IPv4(198,202,72,0),24 }, + { IPv4(198,202,73,0),24 }, + { IPv4(198,202,74,0),24 }, + { IPv4(198,202,75,0),24 }, + { IPv4(198,202,76,0),24 }, + { IPv4(198,202,79,0),24 }, + { IPv4(198,202,80,0),24 }, + { IPv4(198,202,80,0),20 }, + { IPv4(198,202,81,0),24 }, + { IPv4(198,202,84,0),24 }, + { IPv4(198,202,85,0),24 }, + { IPv4(198,202,86,0),24 }, + { IPv4(198,202,87,0),24 }, + { IPv4(198,202,96,0),19 }, + { IPv4(198,202,139,0),24 }, + { IPv4(198,202,144,0),24 }, + { IPv4(198,202,145,0),24 }, + { IPv4(198,202,148,0),24 }, + { IPv4(198,202,150,0),24 }, + { IPv4(198,202,162,0),24 }, + { IPv4(198,202,168,0),24 }, + { IPv4(198,202,174,0),24 }, + { IPv4(198,202,177,0),24 }, + { IPv4(198,202,182,0),24 }, + { IPv4(198,202,200,0),22 }, + { IPv4(198,202,201,0),24 }, + { IPv4(198,202,202,0),24 }, + { IPv4(198,202,204,0),24 }, + { IPv4(198,202,217,0),24 }, + { IPv4(198,202,228,0),23 }, + { IPv4(198,202,235,0),24 }, + { IPv4(198,202,243,0),24 }, + { IPv4(198,203,9,0),24 }, + { IPv4(198,203,11,0),24 }, + { IPv4(198,203,13,0),24 }, + { IPv4(198,203,16,0),21 }, + { IPv4(198,203,24,0),23 }, + { IPv4(198,203,32,0),20 }, + { IPv4(198,203,48,0),20 }, + { IPv4(198,203,145,0),24 }, + { IPv4(198,203,173,0),24 }, + { IPv4(198,203,178,0),24 }, + { IPv4(198,203,191,0),24 }, + { IPv4(198,203,192,0),24 }, + { IPv4(198,203,219,0),24 }, + { IPv4(198,203,246,0),24 }, + { IPv4(198,204,22,0),24 }, + { IPv4(198,204,92,0),24 }, + { IPv4(198,204,104,0),24 }, + { IPv4(198,204,116,0),22 }, + { IPv4(198,204,120,0),23 }, + { IPv4(198,204,122,0),24 }, + { IPv4(198,204,133,0),24 }, + { IPv4(198,204,134,0),24 }, + { IPv4(198,204,138,0),24 }, + { IPv4(198,204,141,0),24 }, + { IPv4(198,204,142,0),24 }, + { IPv4(198,205,14,0),24 }, + { IPv4(198,206,16,0),20 }, + { IPv4(198,206,47,0),24 }, + { IPv4(198,206,131,0),24 }, + { IPv4(198,206,134,0),24 }, + { IPv4(198,206,162,0),24 }, + { IPv4(198,206,175,0),24 }, + { IPv4(198,206,193,0),24 }, + { IPv4(198,206,194,0),24 }, + { IPv4(198,206,222,0),24 }, + { IPv4(198,206,223,0),24 }, + { IPv4(198,206,234,0),23 }, + { IPv4(198,206,236,0),24 }, + { IPv4(198,206,239,0),24 }, + { IPv4(198,206,240,0),23 }, + { IPv4(198,206,242,0),24 }, + { IPv4(198,206,243,0),24 }, + { IPv4(198,206,246,0),24 }, + { IPv4(198,206,247,0),24 }, + { IPv4(198,207,8,0),21 }, + { IPv4(198,207,153,0),24 }, + { IPv4(198,207,168,0),24 }, + { IPv4(198,207,169,0),24 }, + { IPv4(198,207,176,0),24 }, + { IPv4(198,207,179,0),24 }, + { IPv4(198,207,185,0),24 }, + { IPv4(198,207,193,0),24 }, + { IPv4(198,207,196,0),24 }, + { IPv4(198,207,229,0),24 }, + { IPv4(198,207,230,0),23 }, + { IPv4(198,207,232,0),24 }, + { IPv4(198,207,237,0),24 }, + { IPv4(198,207,238,0),24 }, + { IPv4(198,207,239,0),24 }, + { IPv4(198,207,240,0),24 }, + { IPv4(198,207,241,0),24 }, + { IPv4(198,208,6,0),24 }, + { IPv4(198,208,23,0),24 }, + { IPv4(198,208,28,0),24 }, + { IPv4(198,208,223,0),24 }, + { IPv4(198,209,0,0),19 }, + { IPv4(198,209,32,0),19 }, + { IPv4(198,209,64,0),19 }, + { IPv4(198,209,96,0),19 }, + { IPv4(198,209,128,0),19 }, + { IPv4(198,209,160,0),19 }, + { IPv4(198,209,192,0),19 }, + { IPv4(198,209,224,0),19 }, + { IPv4(198,211,0,0),16 }, + { IPv4(198,211,40,0),24 }, + { IPv4(198,211,54,0),24 }, + { IPv4(198,211,56,0),23 }, + { IPv4(198,211,65,0),24 }, + { IPv4(198,211,124,0),24 }, + { IPv4(198,212,166,0),24 }, + { IPv4(198,212,176,0),24 }, + { IPv4(198,212,187,0),24 }, + { IPv4(198,212,194,0),23 }, + { IPv4(198,212,196,0),23 }, + { IPv4(198,212,199,0),24 }, + { IPv4(198,212,205,0),24 }, + { IPv4(198,212,206,0),24 }, + { IPv4(198,212,207,0),24 }, + { IPv4(198,212,218,0),24 }, + { IPv4(198,212,246,0),24 }, + { IPv4(198,212,251,0),24 }, + { IPv4(198,217,216,0),21 }, + { IPv4(198,217,224,0),24 }, + { IPv4(198,218,0,0),16 }, + { IPv4(198,218,204,0),24 }, + { IPv4(198,220,0,0),16 }, + { IPv4(198,222,0,0),16 }, + { IPv4(198,223,29,0),24 }, + { IPv4(198,223,30,0),23 }, + { IPv4(198,223,97,0),24 }, + { IPv4(198,223,101,0),24 }, + { IPv4(198,223,102,0),24 }, + { IPv4(198,223,106,0),24 }, + { IPv4(198,223,128,0),21 }, + { IPv4(198,223,160,0),19 }, + { IPv4(198,228,0,0),19 }, + { IPv4(198,228,192,0),18 }, + { IPv4(198,232,32,0),19 }, + { IPv4(198,232,129,0),24 }, + { IPv4(198,232,136,0),24 }, + { IPv4(198,232,144,0),24 }, + { IPv4(198,232,168,0),23 }, + { IPv4(198,232,211,0),24 }, + { IPv4(198,232,214,0),24 }, + { IPv4(198,232,215,0),24 }, + { IPv4(198,232,216,0),24 }, + { IPv4(198,232,217,0),24 }, + { IPv4(198,232,236,0),24 }, + { IPv4(198,232,237,0),24 }, + { IPv4(198,232,238,0),24 }, + { IPv4(198,232,239,0),24 }, + { IPv4(198,233,0,0),16 }, + { IPv4(198,233,27,0),24 }, + { IPv4(198,234,0,0),16 }, + { IPv4(198,235,23,0),24 }, + { IPv4(198,235,56,0),24 }, + { IPv4(198,235,157,0),24 }, + { IPv4(198,235,177,0),24 }, + { IPv4(198,235,180,0),22 }, + { IPv4(198,235,202,0),24 }, + { IPv4(198,235,203,0),24 }, + { IPv4(198,235,204,0),24 }, + { IPv4(198,235,205,0),24 }, + { IPv4(198,236,0,0),21 }, + { IPv4(198,236,8,0),23 }, + { IPv4(198,236,21,0),24 }, + { IPv4(198,236,22,0),23 }, + { IPv4(198,236,24,0),21 }, + { IPv4(198,236,32,0),20 }, + { IPv4(198,236,48,0),21 }, + { IPv4(198,236,56,0),24 }, + { IPv4(198,236,128,0),19 }, + { IPv4(198,237,208,0),20 }, + { IPv4(198,237,224,0),19 }, + { IPv4(198,240,129,0),24 }, + { IPv4(198,240,130,0),24 }, + { IPv4(198,241,201,0),24 }, + { IPv4(198,241,202,0),24 }, + { IPv4(198,241,202,0),23 }, + { IPv4(198,241,203,0),24 }, + { IPv4(198,241,204,0),24 }, + { IPv4(198,242,23,0),24 }, + { IPv4(198,242,56,0),24 }, + { IPv4(198,242,57,0),24 }, + { IPv4(198,242,58,0),24 }, + { IPv4(198,242,109,0),24 }, + { IPv4(198,242,111,0),24 }, + { IPv4(198,242,208,0),22 }, + { IPv4(198,242,212,0),24 }, + { IPv4(198,242,213,0),24 }, + { IPv4(198,242,214,0),23 }, + { IPv4(198,242,216,0),24 }, + { IPv4(198,243,0,0),16 }, + { IPv4(198,243,69,0),24 }, + { IPv4(198,243,127,0),24 }, + { IPv4(198,243,128,0),17 }, + { IPv4(198,243,153,0),24 }, + { IPv4(198,243,180,0),24 }, + { IPv4(198,245,32,0),21 }, + { IPv4(198,245,40,0),23 }, + { IPv4(198,245,140,0),24 }, + { IPv4(198,245,183,0),24 }, + { IPv4(198,245,204,0),24 }, + { IPv4(198,245,206,0),24 }, + { IPv4(198,245,210,0),24 }, + { IPv4(198,245,211,0),24 }, + { IPv4(198,245,214,0),24 }, + { IPv4(198,246,0,0),21 }, + { IPv4(198,246,16,0),21 }, + { IPv4(198,246,24,0),23 }, + { IPv4(198,246,32,0),21 }, + { IPv4(198,246,132,0),23 }, + { IPv4(198,246,192,0),24 }, + { IPv4(198,246,200,0),24 }, + { IPv4(198,246,227,0),24 }, + { IPv4(198,246,233,0),24 }, + { IPv4(198,246,237,0),24 }, + { IPv4(198,246,246,0),24 }, + { IPv4(198,246,254,0),24 }, + { IPv4(198,247,0,0),16 }, + { IPv4(198,247,48,0),20 }, + { IPv4(198,247,96,0),19 }, + { IPv4(198,247,128,0),19 }, + { IPv4(198,247,184,0),22 }, + { IPv4(198,247,232,0),23 }, + { IPv4(198,247,234,0),24 }, + { IPv4(198,247,236,0),22 }, + { IPv4(198,248,64,0),19 }, + { IPv4(198,248,96,0),19 }, + { IPv4(198,249,61,0),24 }, + { IPv4(198,250,64,0),19 }, + { IPv4(198,250,128,0),18 }, + { IPv4(198,250,180,0),24 }, + { IPv4(198,250,192,0),19 }, + { IPv4(198,250,202,0),24 }, + { IPv4(198,250,203,0),24 }, + { IPv4(198,250,204,0),24 }, + { IPv4(198,250,224,0),20 }, + { IPv4(198,250,240,0),21 }, + { IPv4(198,250,248,0),22 }, + { IPv4(198,252,8,0),21 }, + { IPv4(198,252,32,0),19 }, + { IPv4(198,252,143,0),24 }, + { IPv4(198,252,175,0),24 }, + { IPv4(198,252,182,0),24 }, + { IPv4(198,252,186,0),24 }, + { IPv4(198,252,189,0),24 }, + { IPv4(198,252,190,0),24 }, + { IPv4(198,252,191,0),24 }, + { IPv4(198,252,192,0),24 }, + { IPv4(198,252,208,0),23 }, + { IPv4(198,252,214,0),24 }, + { IPv4(198,252,232,0),24 }, + { IPv4(198,252,237,0),24 }, + { IPv4(198,252,240,0),23 }, + { IPv4(198,252,244,0),24 }, + { IPv4(198,253,0,0),16 }, + { IPv4(198,253,0,0),20 }, + { IPv4(198,253,16,0),20 }, + { IPv4(198,253,40,0),21 }, + { IPv4(198,253,58,0),24 }, + { IPv4(198,253,60,0),24 }, + { IPv4(198,253,71,0),24 }, + { IPv4(198,253,72,0),21 }, + { IPv4(198,253,80,0),20 }, + { IPv4(198,253,99,0),24 }, + { IPv4(198,253,100,0),22 }, + { IPv4(198,253,104,0),22 }, + { IPv4(198,253,110,0),24 }, + { IPv4(198,253,111,0),24 }, + { IPv4(198,253,112,0),23 }, + { IPv4(198,253,114,0),24 }, + { IPv4(198,253,116,0),24 }, + { IPv4(198,253,117,0),24 }, + { IPv4(198,253,118,0),23 }, + { IPv4(198,253,120,0),24 }, + { IPv4(198,253,122,0),23 }, + { IPv4(198,253,124,0),22 }, + { IPv4(198,253,128,0),21 }, + { IPv4(198,253,136,0),21 }, + { IPv4(198,253,147,0),24 }, + { IPv4(198,253,148,0),23 }, + { IPv4(198,253,158,0),24 }, + { IPv4(198,253,161,0),24 }, + { IPv4(198,253,163,0),24 }, + { IPv4(198,253,166,0),23 }, + { IPv4(198,253,168,0),22 }, + { IPv4(198,253,173,0),24 }, + { IPv4(198,253,174,0),24 }, + { IPv4(198,253,175,0),24 }, + { IPv4(198,253,177,0),24 }, + { IPv4(198,253,178,0),24 }, + { IPv4(198,253,184,0),24 }, + { IPv4(198,253,185,0),24 }, + { IPv4(198,253,186,0),24 }, + { IPv4(198,253,187,0),24 }, + { IPv4(198,253,188,0),22 }, + { IPv4(198,253,192,0),22 }, + { IPv4(198,253,196,0),24 }, + { IPv4(198,253,198,0),24 }, + { IPv4(198,253,199,0),24 }, + { IPv4(198,253,200,0),24 }, + { IPv4(198,253,204,0),23 }, + { IPv4(198,253,206,0),24 }, + { IPv4(198,253,213,0),24 }, + { IPv4(198,253,225,0),24 }, + { IPv4(198,253,226,0),24 }, + { IPv4(198,253,232,0),23 }, + { IPv4(198,253,243,0),24 }, + { IPv4(198,253,246,0),23 }, + { IPv4(198,253,253,0),24 }, + { IPv4(198,253,254,0),24 }, + { IPv4(198,253,255,0),24 }, + { IPv4(198,254,0,0),20 }, + { IPv4(199,0,8,0),24 }, + { IPv4(199,1,1,0),24 }, + { IPv4(199,1,91,0),24 }, + { IPv4(199,1,156,0),24 }, + { IPv4(199,1,157,0),24 }, + { IPv4(199,1,204,0),22 }, + { IPv4(199,2,8,0),21 }, + { IPv4(199,2,16,0),20 }, + { IPv4(199,2,50,0),24 }, + { IPv4(199,2,64,0),19 }, + { IPv4(199,2,135,0),24 }, + { IPv4(199,2,137,0),24 }, + { IPv4(199,2,139,0),24 }, + { IPv4(199,3,10,0),23 }, + { IPv4(199,3,12,0),24 }, + { IPv4(199,3,109,0),24 }, + { IPv4(199,3,182,0),24 }, + { IPv4(199,3,240,0),24 }, + { IPv4(199,4,48,0),22 }, + { IPv4(199,4,56,0),22 }, + { IPv4(199,4,57,0),24 }, + { IPv4(199,4,58,0),24 }, + { IPv4(199,4,64,0),18 }, + { IPv4(199,4,140,0),22 }, + { IPv4(199,4,146,0),23 }, + { IPv4(199,4,151,0),24 }, + { IPv4(199,4,154,0),24 }, + { IPv4(199,4,164,0),22 }, + { IPv4(199,4,187,0),24 }, + { IPv4(199,4,191,0),24 }, + { IPv4(199,4,194,0),24 }, + { IPv4(199,4,220,0),24 }, + { IPv4(199,4,225,0),24 }, + { IPv4(199,4,235,0),24 }, + { IPv4(199,4,246,0),23 }, + { IPv4(199,4,249,0),24 }, + { IPv4(199,4,250,0),23 }, + { IPv4(199,4,252,0),24 }, + { IPv4(199,4,253,0),24 }, + { IPv4(199,5,8,0),21 }, + { IPv4(199,5,16,0),24 }, + { IPv4(199,5,60,0),24 }, + { IPv4(199,5,61,0),24 }, + { IPv4(199,5,133,0),24 }, + { IPv4(199,5,163,0),24 }, + { IPv4(199,5,174,0),24 }, + { IPv4(199,5,176,0),23 }, + { IPv4(199,5,178,0),24 }, + { IPv4(199,5,179,0),24 }, + { IPv4(199,5,180,0),24 }, + { IPv4(199,5,181,0),24 }, + { IPv4(199,5,182,0),24 }, + { IPv4(199,5,202,0),24 }, + { IPv4(199,5,204,0),22 }, + { IPv4(199,5,208,0),22 }, + { IPv4(199,5,225,0),24 }, + { IPv4(199,5,231,0),24 }, + { IPv4(199,5,232,0),24 }, + { IPv4(199,5,243,0),24 }, + { IPv4(199,5,254,0),24 }, + { IPv4(199,6,98,0),24 }, + { IPv4(199,6,127,0),24 }, + { IPv4(199,9,0,0),24 }, + { IPv4(199,9,1,0),24 }, + { IPv4(199,9,2,0),24 }, + { IPv4(199,9,10,0),23 }, + { IPv4(199,9,16,0),22 }, + { IPv4(199,9,64,0),18 }, + { IPv4(199,10,0,0),16 }, + { IPv4(199,10,16,0),21 }, + { IPv4(199,10,50,0),24 }, + { IPv4(199,10,62,0),24 }, + { IPv4(199,10,67,0),24 }, + { IPv4(199,10,77,0),24 }, + { IPv4(199,10,78,0),23 }, + { IPv4(199,10,80,0),24 }, + { IPv4(199,10,81,0),24 }, + { IPv4(199,10,93,0),24 }, + { IPv4(199,10,119,0),24 }, + { IPv4(199,10,127,0),24 }, + { IPv4(199,10,133,0),24 }, + { IPv4(199,10,135,0),24 }, + { IPv4(199,10,138,0),24 }, + { IPv4(199,10,139,0),24 }, + { IPv4(199,10,141,0),24 }, + { IPv4(199,10,142,0),24 }, + { IPv4(199,10,148,0),24 }, + { IPv4(199,10,200,0),24 }, + { IPv4(199,10,215,0),24 }, + { IPv4(199,10,231,0),24 }, + { IPv4(199,10,233,0),24 }, + { IPv4(199,15,0,0),21 }, + { IPv4(199,15,60,0),22 }, + { IPv4(199,16,32,0),19 }, + { IPv4(199,17,0,0),16 }, + { IPv4(199,19,8,0),24 }, + { IPv4(199,19,9,0),24 }, + { IPv4(199,20,8,0),21 }, + { IPv4(199,20,51,0),24 }, + { IPv4(199,20,56,0),24 }, + { IPv4(199,20,59,0),24 }, + { IPv4(199,20,64,0),18 }, + { IPv4(199,21,28,0),22 }, + { IPv4(199,22,0,0),16 }, + { IPv4(199,26,8,0),21 }, + { IPv4(199,26,153,0),24 }, + { IPv4(199,26,155,0),24 }, + { IPv4(199,26,165,0),24 }, + { IPv4(199,26,171,0),24 }, + { IPv4(199,26,199,0),24 }, + { IPv4(199,26,202,0),24 }, + { IPv4(199,26,225,0),24 }, + { IPv4(199,29,1,0),24 }, + { IPv4(199,29,2,0),24 }, + { IPv4(199,29,3,0),24 }, + { IPv4(199,29,6,0),24 }, + { IPv4(199,29,7,0),24 }, + { IPv4(199,29,8,0),24 }, + { IPv4(199,29,9,0),24 }, + { IPv4(199,29,31,0),24 }, + { IPv4(199,29,68,0),24 }, + { IPv4(199,29,92,0),22 }, + { IPv4(199,29,132,0),24 }, + { IPv4(199,29,141,0),24 }, + { IPv4(199,29,144,0),20 }, + { IPv4(199,29,184,0),24 }, + { IPv4(199,29,196,0),23 }, + { IPv4(199,29,201,0),24 }, + { IPv4(199,29,202,0),24 }, + { IPv4(199,29,203,0),24 }, + { IPv4(199,29,204,0),24 }, + { IPv4(199,29,205,0),24 }, + { IPv4(199,29,206,0),23 }, + { IPv4(199,29,208,0),23 }, + { IPv4(199,29,210,0),24 }, + { IPv4(199,29,211,0),24 }, + { IPv4(199,29,212,0),24 }, + { IPv4(199,29,213,0),24 }, + { IPv4(199,29,214,0),24 }, + { IPv4(199,29,215,0),24 }, + { IPv4(199,29,216,0),22 }, + { IPv4(199,29,220,0),22 }, + { IPv4(199,29,242,0),24 }, + { IPv4(199,29,245,0),24 }, + { IPv4(199,29,246,0),24 }, + { IPv4(199,29,247,0),24 }, + { IPv4(199,29,255,0),24 }, + { IPv4(199,30,4,0),24 }, + { IPv4(199,30,32,0),24 }, + { IPv4(199,31,2,0),24 }, + { IPv4(199,31,3,0),24 }, + { IPv4(199,31,8,0),22 }, + { IPv4(199,31,12,0),24 }, + { IPv4(199,31,21,0),24 }, + { IPv4(199,31,31,0),24 }, + { IPv4(199,31,107,0),24 }, + { IPv4(199,32,128,0),18 }, + { IPv4(199,32,192,0),19 }, + { IPv4(199,33,32,0),19 }, + { IPv4(199,33,64,0),24 }, + { IPv4(199,33,79,0),24 }, + { IPv4(199,33,81,0),24 }, + { IPv4(199,33,119,0),24 }, + { IPv4(199,33,128,0),24 }, + { IPv4(199,33,129,0),24 }, + { IPv4(199,33,144,0),24 }, + { IPv4(199,33,159,0),24 }, + { IPv4(199,33,164,0),23 }, + { IPv4(199,33,164,0),24 }, + { IPv4(199,33,165,0),24 }, + { IPv4(199,33,166,0),24 }, + { IPv4(199,33,167,0),24 }, + { IPv4(199,33,168,0),24 }, + { IPv4(199,33,169,0),24 }, + { IPv4(199,33,170,0),24 }, + { IPv4(199,33,171,0),24 }, + { IPv4(199,33,172,0),24 }, + { IPv4(199,33,173,0),24 }, + { IPv4(199,33,182,0),24 }, + { IPv4(199,33,203,0),24 }, + { IPv4(199,33,206,0),24 }, + { IPv4(199,33,217,0),24 }, + { IPv4(199,33,223,0),24 }, + { IPv4(199,33,224,0),23 }, + { IPv4(199,33,238,0),24 }, + { IPv4(199,33,244,0),24 }, + { IPv4(199,33,252,0),24 }, + { IPv4(199,34,16,0),20 }, + { IPv4(199,34,32,0),24 }, + { IPv4(199,34,53,0),24 }, + { IPv4(199,34,138,0),23 }, + { IPv4(199,34,167,0),24 }, + { IPv4(199,34,173,0),24 }, + { IPv4(199,34,183,0),24 }, + { IPv4(199,34,216,0),23 }, + { IPv4(199,35,0,0),16 }, + { IPv4(199,36,19,0),24 }, + { IPv4(199,36,24,0),24 }, + { IPv4(199,36,25,0),24 }, + { IPv4(199,37,0,0),17 }, + { IPv4(199,37,96,0),24 }, + { IPv4(199,37,112,0),24 }, + { IPv4(199,37,116,0),24 }, + { IPv4(199,37,128,0),24 }, + { IPv4(199,37,129,0),24 }, + { IPv4(199,37,130,0),24 }, + { IPv4(199,37,131,0),24 }, + { IPv4(199,37,132,0),24 }, + { IPv4(199,37,133,0),24 }, + { IPv4(199,37,138,0),24 }, + { IPv4(199,37,148,0),24 }, + { IPv4(199,37,158,0),24 }, + { IPv4(199,37,159,0),24 }, + { IPv4(199,37,160,0),24 }, + { IPv4(199,37,161,0),24 }, + { IPv4(199,37,162,0),24 }, + { IPv4(199,37,163,0),24 }, + { IPv4(199,37,164,0),24 }, + { IPv4(199,37,165,0),24 }, + { IPv4(199,37,170,0),24 }, + { IPv4(199,37,173,0),24 }, + { IPv4(199,37,180,0),24 }, + { IPv4(199,37,181,0),24 }, + { IPv4(199,37,192,0),18 }, + { IPv4(199,37,204,0),24 }, + { IPv4(199,37,205,0),24 }, + { IPv4(199,37,213,0),24 }, + { IPv4(199,37,214,0),24 }, + { IPv4(199,37,219,0),24 }, + { IPv4(199,38,0,0),24 }, + { IPv4(199,38,1,0),24 }, + { IPv4(199,38,2,0),24 }, + { IPv4(199,38,3,0),24 }, + { IPv4(199,38,4,0),24 }, + { IPv4(199,38,5,0),24 }, + { IPv4(199,38,6,0),24 }, + { IPv4(199,38,7,0),24 }, + { IPv4(199,38,32,0),20 }, + { IPv4(199,38,48,0),22 }, + { IPv4(199,38,133,0),24 }, + { IPv4(199,41,3,0),24 }, + { IPv4(199,41,8,0),23 }, + { IPv4(199,41,127,0),24 }, + { IPv4(199,41,196,0),24 }, + { IPv4(199,41,197,0),24 }, + { IPv4(199,41,198,0),23 }, + { IPv4(199,41,200,0),22 }, + { IPv4(199,41,248,0),24 }, + { IPv4(199,41,254,0),24 }, + { IPv4(199,42,104,0),23 }, + { IPv4(199,42,240,0),23 }, + { IPv4(199,43,32,0),24 }, + { IPv4(199,43,33,0),24 }, + { IPv4(199,43,34,0),24 }, + { IPv4(199,43,35,0),24 }, + { IPv4(199,43,48,0),24 }, + { IPv4(199,43,49,0),24 }, + { IPv4(199,43,51,0),24 }, + { IPv4(199,43,117,0),24 }, + { IPv4(199,43,172,0),24 }, + { IPv4(199,45,66,0),23 }, + { IPv4(199,45,68,0),24 }, + { IPv4(199,45,70,0),24 }, + { IPv4(199,45,84,0),24 }, + { IPv4(199,45,88,0),24 }, + { IPv4(199,45,123,0),24 }, + { IPv4(199,45,128,0),17 }, + { IPv4(199,45,150,0),24 }, + { IPv4(199,46,8,0),21 }, + { IPv4(199,46,16,0),20 }, + { IPv4(199,46,16,0),23 }, + { IPv4(199,46,18,0),23 }, + { IPv4(199,46,128,0),17 }, + { IPv4(199,46,200,0),24 }, + { IPv4(199,46,245,0),24 }, + { IPv4(199,46,255,0),24 }, + { IPv4(199,48,4,0),22 }, + { IPv4(199,48,22,0),24 }, + { IPv4(199,48,23,0),24 }, + { IPv4(199,48,24,0),24 }, + { IPv4(199,49,3,0),24 }, + { IPv4(199,49,22,0),24 }, + { IPv4(199,49,39,0),24 }, + { IPv4(199,49,70,0),24 }, + { IPv4(199,50,26,0),24 }, + { IPv4(199,50,29,0),24 }, + { IPv4(199,51,77,0),24 }, + { IPv4(199,51,99,0),24 }, + { IPv4(199,51,112,0),24 }, + { IPv4(199,53,16,0),24 }, + { IPv4(199,53,17,0),24 }, + { IPv4(199,53,19,0),24 }, + { IPv4(199,53,20,0),24 }, + { IPv4(199,53,22,0),24 }, + { IPv4(199,53,23,0),24 }, + { IPv4(199,53,74,0),24 }, + { IPv4(199,53,77,0),24 }, + { IPv4(199,53,78,0),24 }, + { IPv4(199,53,98,0),24 }, + { IPv4(199,53,100,0),24 }, + { IPv4(199,53,102,0),23 }, + { IPv4(199,53,183,0),24 }, + { IPv4(199,53,184,0),24 }, + { IPv4(199,57,0,0),16 }, + { IPv4(199,58,52,0),24 }, + { IPv4(199,59,40,0),24 }, + { IPv4(199,60,103,0),24 }, + { IPv4(199,60,237,0),24 }, + { IPv4(199,64,0,0),24 }, + { IPv4(199,64,1,0),24 }, + { IPv4(199,64,7,0),24 }, + { IPv4(199,64,8,0),24 }, + { IPv4(199,65,196,0),24 }, + { IPv4(199,66,1,0),24 }, + { IPv4(199,66,10,0),24 }, + { IPv4(199,67,0,0),24 }, + { IPv4(199,67,7,0),24 }, + { IPv4(199,67,16,0),20 }, + { IPv4(199,67,190,0),24 }, + { IPv4(199,68,35,0),24 }, + { IPv4(199,68,81,0),24 }, + { IPv4(199,69,0,0),16 }, + { IPv4(199,69,32,0),24 }, + { IPv4(199,70,5,0),24 }, + { IPv4(199,70,128,0),24 }, + { IPv4(199,70,144,0),24 }, + { IPv4(199,70,148,0),24 }, + { IPv4(199,71,27,0),24 }, + { IPv4(199,71,40,0),24 }, + { IPv4(199,71,52,0),24 }, + { IPv4(199,71,68,0),24 }, + { IPv4(199,71,115,0),24 }, + { IPv4(199,71,175,0),24 }, + { IPv4(199,71,182,0),23 }, + { IPv4(199,71,187,0),24 }, + { IPv4(199,71,188,0),24 }, + { IPv4(199,73,20,0),24 }, + { IPv4(199,73,32,0),21 }, + { IPv4(199,73,40,0),23 }, + { IPv4(199,74,8,0),21 }, + { IPv4(199,74,141,0),24 }, + { IPv4(199,74,142,0),24 }, + { IPv4(199,74,198,0),24 }, + { IPv4(199,74,206,0),24 }, + { IPv4(199,74,211,0),24 }, + { IPv4(199,74,216,0),22 }, + { IPv4(199,74,220,0),23 }, + { IPv4(199,74,242,0),24 }, + { IPv4(199,76,144,0),20 }, + { IPv4(199,76,160,0),19 }, + { IPv4(199,76,192,0),24 }, + { IPv4(199,76,198,0),24 }, + { IPv4(199,77,44,0),23 }, + { IPv4(199,77,46,0),24 }, + { IPv4(199,77,128,0),17 }, + { IPv4(199,78,60,0),23 }, + { IPv4(199,79,64,0),19 }, + { IPv4(199,79,131,0),24 }, + { IPv4(199,79,136,0),24 }, + { IPv4(199,79,142,0),24 }, + { IPv4(199,79,143,0),24 }, + { IPv4(199,79,144,0),24 }, + { IPv4(199,79,155,0),24 }, + { IPv4(199,79,168,0),22 }, + { IPv4(199,79,186,0),24 }, + { IPv4(199,79,200,0),24 }, + { IPv4(199,79,202,0),24 }, + { IPv4(199,79,215,0),24 }, + { IPv4(199,79,228,0),24 }, + { IPv4(199,79,236,0),24 }, + { IPv4(199,79,250,0),23 }, + { IPv4(199,79,252,0),23 }, + { IPv4(199,79,254,0),24 }, + { IPv4(199,80,50,0),24 }, + { IPv4(199,80,128,0),17 }, + { IPv4(199,81,0,0),16 }, + { IPv4(199,81,192,0),19 }, + { IPv4(199,82,0,0),16 }, + { IPv4(199,83,64,0),20 }, + { IPv4(199,84,1,0),24 }, + { IPv4(199,84,52,0),24 }, + { IPv4(199,84,53,0),24 }, + { IPv4(199,84,54,0),24 }, + { IPv4(199,84,135,0),24 }, + { IPv4(199,84,152,0),24 }, + { IPv4(199,84,172,0),24 }, + { IPv4(199,84,174,0),23 }, + { IPv4(199,85,7,0),24 }, + { IPv4(199,85,9,0),24 }, + { IPv4(199,85,19,0),24 }, + { IPv4(199,85,25,0),24 }, + { IPv4(199,85,107,0),24 }, + { IPv4(199,85,245,0),24 }, + { IPv4(199,86,0,0),16 }, + { IPv4(199,86,16,0),22 }, + { IPv4(199,86,27,0),24 }, + { IPv4(199,86,68,0),22 }, + { IPv4(199,86,128,0),17 }, + { IPv4(199,87,16,0),21 }, + { IPv4(199,87,24,0),23 }, + { IPv4(199,88,14,0),24 }, + { IPv4(199,88,104,0),23 }, + { IPv4(199,88,132,0),24 }, + { IPv4(199,88,134,0),23 }, + { IPv4(199,88,136,0),23 }, + { IPv4(199,88,145,0),24 }, + { IPv4(199,88,147,0),24 }, + { IPv4(199,88,158,0),24 }, + { IPv4(199,88,171,0),24 }, + { IPv4(199,88,179,0),24 }, + { IPv4(199,88,187,0),24 }, + { IPv4(199,88,205,0),24 }, + { IPv4(199,88,232,0),24 }, + { IPv4(199,88,234,0),24 }, + { IPv4(199,88,235,0),24 }, + { IPv4(199,88,249,0),24 }, + { IPv4(199,89,0,0),21 }, + { IPv4(199,89,8,0),21 }, + { IPv4(199,89,64,0),18 }, + { IPv4(199,89,128,0),24 }, + { IPv4(199,89,140,0),24 }, + { IPv4(199,89,163,0),24 }, + { IPv4(199,89,187,0),24 }, + { IPv4(199,89,192,0),23 }, + { IPv4(199,89,214,0),24 }, + { IPv4(199,89,224,0),24 }, + { IPv4(199,89,233,0),24 }, + { IPv4(199,89,234,0),24 }, + { IPv4(199,89,248,0),24 }, + { IPv4(199,89,253,0),24 }, + { IPv4(199,91,32,0),24 }, + { IPv4(199,91,33,0),24 }, + { IPv4(199,91,34,0),24 }, + { IPv4(199,91,35,0),24 }, + { IPv4(199,91,36,0),24 }, + { IPv4(199,91,37,0),24 }, + { IPv4(199,91,38,0),24 }, + { IPv4(199,91,39,0),24 }, + { IPv4(199,93,70,0),24 }, + { IPv4(199,93,71,0),24 }, + { IPv4(199,96,19,0),24 }, + { IPv4(199,96,32,0),22 }, + { IPv4(199,96,40,0),22 }, + { IPv4(199,96,44,0),23 }, + { IPv4(199,96,46,0),24 }, + { IPv4(199,97,12,0),24 }, + { IPv4(199,97,48,0),24 }, + { IPv4(199,97,98,0),24 }, + { IPv4(199,97,121,0),24 }, + { IPv4(199,97,192,0),22 }, + { IPv4(199,97,212,0),24 }, + { IPv4(199,98,7,0),24 }, + { IPv4(199,98,59,0),24 }, + { IPv4(199,98,84,0),24 }, + { IPv4(199,98,88,0),24 }, + { IPv4(199,98,112,0),23 }, + { IPv4(199,98,171,0),24 }, + { IPv4(199,98,200,0),24 }, + { IPv4(199,98,205,0),24 }, + { IPv4(199,99,2,0),24 }, + { IPv4(199,99,64,0),21 }, + { IPv4(199,99,72,0),23 }, + { IPv4(199,99,102,0),24 }, + { IPv4(199,99,156,0),22 }, + { IPv4(199,99,242,0),24 }, + { IPv4(199,99,243,0),24 }, + { IPv4(199,99,244,0),24 }, + { IPv4(199,99,248,0),21 }, + { IPv4(199,101,4,0),24 }, + { IPv4(199,101,6,0),24 }, + { IPv4(199,101,8,0),21 }, + { IPv4(199,102,9,0),24 }, + { IPv4(199,102,15,0),24 }, + { IPv4(199,102,39,0),24 }, + { IPv4(199,103,128,0),17 }, + { IPv4(199,104,0,0),18 }, + { IPv4(199,104,18,0),24 }, + { IPv4(199,104,19,0),24 }, + { IPv4(199,104,20,0),24 }, + { IPv4(199,104,22,0),24 }, + { IPv4(199,104,23,0),24 }, + { IPv4(199,104,32,0),19 }, + { IPv4(199,104,64,0),18 }, + { IPv4(199,104,76,0),24 }, + { IPv4(199,104,78,0),24 }, + { IPv4(199,104,79,0),24 }, + { IPv4(199,104,80,0),24 }, + { IPv4(199,104,81,0),24 }, + { IPv4(199,104,82,0),24 }, + { IPv4(199,104,83,0),24 }, + { IPv4(199,104,84,0),24 }, + { IPv4(199,104,107,0),24 }, + { IPv4(199,104,108,0),24 }, + { IPv4(199,104,112,0),24 }, + { IPv4(199,104,113,0),24 }, + { IPv4(199,104,114,0),24 }, + { IPv4(199,104,115,0),24 }, + { IPv4(199,104,116,0),24 }, + { IPv4(199,104,117,0),24 }, + { IPv4(199,104,118,0),24 }, + { IPv4(199,104,119,0),24 }, + { IPv4(199,104,120,0),21 }, + { IPv4(199,104,128,0),19 }, + { IPv4(199,104,128,0),24 }, + { IPv4(199,104,128,0),17 }, + { IPv4(199,104,132,0),24 }, + { IPv4(199,104,138,0),23 }, + { IPv4(199,104,140,0),22 }, + { IPv4(199,104,144,0),20 }, + { IPv4(199,104,146,0),24 }, + { IPv4(199,104,148,0),24 }, + { IPv4(199,104,149,0),24 }, + { IPv4(199,104,151,0),24 }, + { IPv4(199,104,192,0),19 }, + { IPv4(199,104,192,0),18 }, + { IPv4(199,104,231,0),24 }, + { IPv4(199,104,232,0),21 }, + { IPv4(199,104,244,0),23 }, + { IPv4(199,104,248,0),21 }, + { IPv4(199,104,248,0),22 }, + { IPv4(199,104,252,0),24 }, + { IPv4(199,105,0,0),18 }, + { IPv4(199,105,0,0),16 }, + { IPv4(199,105,64,0),23 }, + { IPv4(199,105,84,0),22 }, + { IPv4(199,105,112,0),21 }, + { IPv4(199,105,120,0),21 }, + { IPv4(199,105,138,0),24 }, + { IPv4(199,105,175,0),24 }, + { IPv4(199,105,186,0),24 }, + { IPv4(199,105,191,0),24 }, + { IPv4(199,105,192,0),20 }, + { IPv4(199,105,209,0),24 }, + { IPv4(199,106,0,0),15 }, + { IPv4(199,106,16,0),24 }, + { IPv4(199,106,17,0),24 }, + { IPv4(199,106,34,0),24 }, + { IPv4(199,106,35,0),24 }, + { IPv4(199,106,52,0),24 }, + { IPv4(199,106,56,0),24 }, + { IPv4(199,106,64,0),21 }, + { IPv4(199,106,65,0),24 }, + { IPv4(199,106,72,0),22 }, + { IPv4(199,106,78,0),24 }, + { IPv4(199,106,174,0),23 }, + { IPv4(199,106,176,0),23 }, + { IPv4(199,106,185,0),24 }, + { IPv4(199,106,186,0),24 }, + { IPv4(199,106,208,0),20 }, + { IPv4(199,106,232,0),21 }, + { IPv4(199,107,24,0),22 }, + { IPv4(199,107,26,0),23 }, + { IPv4(199,107,96,0),22 }, + { IPv4(199,107,144,0),24 }, + { IPv4(199,107,160,0),21 }, + { IPv4(199,108,0,0),16 }, + { IPv4(199,108,16,0),24 }, + { IPv4(199,108,40,0),24 }, + { IPv4(199,108,42,0),23 }, + { IPv4(199,108,44,0),22 }, + { IPv4(199,108,48,0),21 }, + { IPv4(199,108,56,0),23 }, + { IPv4(199,108,64,0),24 }, + { IPv4(199,108,66,0),24 }, + { IPv4(199,108,88,0),23 }, + { IPv4(199,108,160,0),21 }, + { IPv4(199,108,164,0),24 }, + { IPv4(199,108,167,0),24 }, + { IPv4(199,108,189,0),24 }, + { IPv4(199,108,224,0),22 }, + { IPv4(199,108,228,0),23 }, + { IPv4(199,109,32,0),22 }, + { IPv4(199,111,161,0),24 }, + { IPv4(199,112,0,0),19 }, + { IPv4(199,112,32,0),24 }, + { IPv4(199,112,36,0),24 }, + { IPv4(199,113,128,0),17 }, + { IPv4(199,114,0,0),21 }, + { IPv4(199,114,6,0),24 }, + { IPv4(199,114,8,0),22 }, + { IPv4(199,114,32,0),20 }, + { IPv4(199,114,48,0),22 }, + { IPv4(199,114,128,0),18 }, + { IPv4(199,115,8,0),21 }, + { IPv4(199,115,16,0),21 }, + { IPv4(199,115,24,0),23 }, + { IPv4(199,117,0,0),16 }, + { IPv4(199,117,144,0),22 }, + { IPv4(199,117,161,0),24 }, + { IPv4(199,119,33,0),24 }, + { IPv4(199,119,40,0),24 }, + { IPv4(199,120,16,0),20 }, + { IPv4(199,120,64,0),18 }, + { IPv4(199,120,79,0),24 }, + { IPv4(199,120,86,0),24 }, + { IPv4(199,120,153,0),24 }, + { IPv4(199,120,157,0),24 }, + { IPv4(199,120,161,0),24 }, + { IPv4(199,120,179,0),24 }, + { IPv4(199,120,183,0),24 }, + { IPv4(199,120,218,0),24 }, + { IPv4(199,120,249,0),24 }, + { IPv4(199,121,0,0),16 }, + { IPv4(199,121,31,0),24 }, + { IPv4(199,121,42,0),24 }, + { IPv4(199,121,124,0),24 }, + { IPv4(199,121,125,0),24 }, + { IPv4(199,121,131,0),24 }, + { IPv4(199,121,132,0),24 }, + { IPv4(199,121,155,0),24 }, + { IPv4(199,121,156,0),24 }, + { IPv4(199,121,157,0),24 }, + { IPv4(199,121,159,0),24 }, + { IPv4(199,121,160,0),24 }, + { IPv4(199,121,174,0),23 }, + { IPv4(199,121,185,0),24 }, + { IPv4(199,121,238,0),23 }, + { IPv4(199,121,240,0),24 }, + { IPv4(199,121,247,0),24 }, + { IPv4(199,122,4,0),23 }, + { IPv4(199,122,32,0),20 }, + { IPv4(199,122,49,0),24 }, + { IPv4(199,122,56,0),21 }, + { IPv4(199,123,16,0),20 }, + { IPv4(199,123,32,0),20 }, + { IPv4(199,123,71,0),24 }, + { IPv4(199,123,72,0),21 }, + { IPv4(199,123,80,0),21 }, + { IPv4(199,123,87,0),24 }, + { IPv4(199,123,88,0),23 }, + { IPv4(199,123,88,0),24 }, + { IPv4(199,123,89,0),24 }, + { IPv4(199,123,90,0),24 }, + { IPv4(199,123,92,0),24 }, + { IPv4(199,123,104,0),21 }, + { IPv4(199,123,112,0),21 }, + { IPv4(199,123,121,0),24 }, + { IPv4(199,123,122,0),23 }, + { IPv4(199,123,124,0),24 }, + { IPv4(199,124,8,0),21 }, + { IPv4(199,124,16,0),21 }, + { IPv4(199,125,8,0),24 }, + { IPv4(199,125,9,0),24 }, + { IPv4(199,125,10,0),24 }, + { IPv4(199,127,16,0),24 }, + { IPv4(199,127,25,0),24 }, + { IPv4(199,127,27,0),24 }, + { IPv4(199,127,32,0),21 }, + { IPv4(199,127,40,0),23 }, + { IPv4(199,131,64,0),19 }, + { IPv4(199,131,96,0),21 }, + { IPv4(199,131,104,0),22 }, + { IPv4(199,164,167,0),24 }, + { IPv4(199,164,176,0),23 }, + { IPv4(199,164,178,0),24 }, + { IPv4(199,164,180,0),23 }, + { IPv4(199,164,184,0),24 }, + { IPv4(199,164,185,0),24 }, + { IPv4(199,164,191,0),24 }, + { IPv4(199,164,192,0),24 }, + { IPv4(199,164,194,0),24 }, + { IPv4(199,164,200,0),24 }, + { IPv4(199,164,210,0),24 }, + { IPv4(199,164,214,0),24 }, + { IPv4(199,164,235,0),24 }, + { IPv4(199,164,236,0),24 }, + { IPv4(199,164,237,0),24 }, + { IPv4(199,164,250,0),24 }, + { IPv4(199,165,0,0),21 }, + { IPv4(199,165,8,0),22 }, + { IPv4(199,165,16,0),24 }, + { IPv4(199,165,16,0),20 }, + { IPv4(199,165,19,0),24 }, + { IPv4(199,165,80,0),21 }, + { IPv4(199,165,137,0),24 }, + { IPv4(199,165,138,0),24 }, + { IPv4(199,165,141,0),24 }, + { IPv4(199,165,150,0),24 }, + { IPv4(199,165,157,0),24 }, + { IPv4(199,165,165,0),24 }, + { IPv4(199,165,180,0),24 }, + { IPv4(199,165,206,0),24 }, + { IPv4(199,165,233,0),24 }, + { IPv4(199,165,247,0),24 }, + { IPv4(199,165,249,0),24 }, + { IPv4(199,165,250,0),24 }, + { IPv4(199,166,24,0),24 }, + { IPv4(199,166,25,0),24 }, + { IPv4(199,166,26,0),24 }, + { IPv4(199,166,27,0),24 }, + { IPv4(199,166,28,0),23 }, + { IPv4(199,166,30,0),24 }, + { IPv4(199,166,34,0),23 }, + { IPv4(199,166,36,0),23 }, + { IPv4(199,166,223,0),24 }, + { IPv4(199,168,32,0),24 }, + { IPv4(199,168,33,0),24 }, + { IPv4(199,168,35,0),24 }, + { IPv4(199,168,39,0),24 }, + { IPv4(199,169,208,0),20 }, + { IPv4(199,170,84,0),23 }, + { IPv4(199,170,88,0),21 }, + { IPv4(199,170,121,0),24 }, + { IPv4(199,170,132,0),24 }, + { IPv4(199,171,52,0),24 }, + { IPv4(199,171,96,0),24 }, + { IPv4(199,171,134,0),24 }, + { IPv4(199,171,200,0),24 }, + { IPv4(199,171,201,0),24 }, + { IPv4(199,172,169,0),24 }, + { IPv4(199,173,188,0),24 }, + { IPv4(199,173,232,0),22 }, + { IPv4(199,174,0,0),16 }, + { IPv4(199,174,0,0),18 }, + { IPv4(199,174,16,0),21 }, + { IPv4(199,174,136,0),24 }, + { IPv4(199,174,160,0),20 }, + { IPv4(199,174,176,0),21 }, + { IPv4(199,174,184,0),22 }, + { IPv4(199,174,196,0),22 }, + { IPv4(199,174,200,0),21 }, + { IPv4(199,174,208,0),20 }, + { IPv4(199,175,157,0),24 }, + { IPv4(199,175,219,0),24 }, + { IPv4(199,175,234,0),24 }, + { IPv4(199,177,58,0),24 }, + { IPv4(199,181,71,0),24 }, + { IPv4(199,181,92,0),22 }, + { IPv4(199,181,144,0),24 }, + { IPv4(199,181,150,0),24 }, + { IPv4(199,181,164,0),22 }, + { IPv4(199,181,168,0),24 }, + { IPv4(199,181,178,0),23 }, + { IPv4(199,181,179,0),24 }, + { IPv4(199,181,180,0),24 }, + { IPv4(199,181,193,0),24 }, + { IPv4(199,181,234,0),24 }, + { IPv4(199,181,237,0),24 }, + { IPv4(199,181,251,0),24 }, + { IPv4(199,181,252,0),24 }, + { IPv4(199,182,0,0),15 }, + { IPv4(199,182,110,0),24 }, + { IPv4(199,182,207,0),24 }, + { IPv4(199,182,227,0),24 }, + { IPv4(199,182,248,0),23 }, + { IPv4(199,182,250,0),24 }, + { IPv4(199,183,4,0),24 }, + { IPv4(199,183,32,0),24 }, + { IPv4(199,183,38,0),24 }, + { IPv4(199,183,44,0),24 }, + { IPv4(199,183,185,0),24 }, + { IPv4(199,183,186,0),24 }, + { IPv4(199,184,16,0),20 }, + { IPv4(199,184,75,0),24 }, + { IPv4(199,184,82,0),24 }, + { IPv4(199,184,120,0),22 }, + { IPv4(199,184,162,0),24 }, + { IPv4(199,184,165,0),24 }, + { IPv4(199,184,210,0),24 }, + { IPv4(199,184,226,0),24 }, + { IPv4(199,184,227,0),24 }, + { IPv4(199,184,228,0),24 }, + { IPv4(199,184,236,0),23 }, + { IPv4(199,184,238,0),24 }, + { IPv4(199,184,241,0),24 }, + { IPv4(199,184,243,0),24 }, + { IPv4(199,184,244,0),24 }, + { IPv4(199,184,252,0),24 }, + { IPv4(199,185,1,0),24 }, + { IPv4(199,185,4,0),24 }, + { IPv4(199,185,8,0),23 }, + { IPv4(199,185,104,0),24 }, + { IPv4(199,185,109,0),24 }, + { IPv4(199,185,110,0),24 }, + { IPv4(199,185,116,0),24 }, + { IPv4(199,185,117,0),24 }, + { IPv4(199,185,136,0),23 }, + { IPv4(199,185,230,0),23 }, + { IPv4(199,186,0,0),16 }, + { IPv4(199,189,0,0),24 }, + { IPv4(199,189,1,0),24 }, + { IPv4(199,189,2,0),24 }, + { IPv4(199,189,3,0),24 }, + { IPv4(199,189,4,0),24 }, + { IPv4(199,189,5,0),24 }, + { IPv4(199,189,8,0),21 }, + { IPv4(199,190,64,0),18 }, + { IPv4(199,190,65,0),24 }, + { IPv4(199,190,87,0),24 }, + { IPv4(199,190,99,0),24 }, + { IPv4(199,190,100,0),24 }, + { IPv4(199,190,104,0),24 }, + { IPv4(199,190,116,0),24 }, + { IPv4(199,190,134,0),24 }, + { IPv4(199,190,161,0),24 }, + { IPv4(199,190,174,0),24 }, + { IPv4(199,190,175,0),24 }, + { IPv4(199,190,178,0),23 }, + { IPv4(199,190,180,0),24 }, + { IPv4(199,190,182,0),24 }, + { IPv4(199,190,198,0),23 }, + { IPv4(199,190,200,0),24 }, + { IPv4(199,190,201,0),24 }, + { IPv4(199,190,209,0),24 }, + { IPv4(199,190,222,0),23 }, + { IPv4(199,190,224,0),23 }, + { IPv4(199,190,227,0),24 }, + { IPv4(199,190,244,0),24 }, + { IPv4(199,190,245,0),24 }, + { IPv4(199,190,246,0),24 }, + { IPv4(199,190,247,0),24 }, + { IPv4(199,190,248,0),24 }, + { IPv4(199,191,0,0),20 }, + { IPv4(199,191,32,0),24 }, + { IPv4(199,191,33,0),24 }, + { IPv4(199,191,34,0),24 }, + { IPv4(199,191,35,0),24 }, + { IPv4(199,191,36,0),24 }, + { IPv4(199,191,37,0),24 }, + { IPv4(199,191,128,0),21 }, + { IPv4(199,191,128,0),22 }, + { IPv4(199,191,136,0),21 }, + { IPv4(199,191,144,0),22 }, + { IPv4(199,191,144,0),21 }, + { IPv4(199,191,152,0),21 }, + { IPv4(199,191,160,0),24 }, + { IPv4(199,191,192,0),24 }, + { IPv4(199,191,200,0),24 }, + { IPv4(199,191,208,0),24 }, + { IPv4(199,192,0,0),21 }, + { IPv4(199,192,8,0),22 }, + { IPv4(199,195,64,0),19 }, + { IPv4(199,195,112,0),23 }, + { IPv4(199,196,54,0),24 }, + { IPv4(199,197,0,0),21 }, + { IPv4(199,197,8,0),22 }, + { IPv4(199,198,129,0),24 }, + { IPv4(199,199,0,0),16 }, + { IPv4(199,199,32,0),19 }, + { IPv4(199,199,70,0),24 }, + { IPv4(199,199,118,0),23 }, + { IPv4(199,199,120,0),21 }, + { IPv4(199,199,220,0),24 }, + { IPv4(199,200,9,0),24 }, + { IPv4(199,200,128,0),24 }, + { IPv4(199,201,0,0),16 }, + { IPv4(199,201,6,0),24 }, + { IPv4(199,201,16,0),22 }, + { IPv4(199,201,18,0),24 }, + { IPv4(199,201,20,0),24 }, + { IPv4(199,201,27,0),24 }, + { IPv4(199,201,128,0),24 }, + { IPv4(199,201,129,0),24 }, + { IPv4(199,201,140,0),24 }, + { IPv4(199,201,143,0),24 }, + { IPv4(199,201,145,0),24 }, + { IPv4(199,201,147,0),24 }, + { IPv4(199,201,153,0),24 }, + { IPv4(199,201,154,0),24 }, + { IPv4(199,201,156,0),24 }, + { IPv4(199,201,157,0),24 }, + { IPv4(199,201,158,0),24 }, + { IPv4(199,201,175,0),24 }, + { IPv4(199,201,181,0),24 }, + { IPv4(199,201,192,0),24 }, + { IPv4(199,201,213,0),24 }, + { IPv4(199,201,223,0),24 }, + { IPv4(199,201,231,0),24 }, + { IPv4(199,201,232,0),22 }, + { IPv4(199,201,236,0),24 }, + { IPv4(199,201,237,0),24 }, + { IPv4(199,201,248,0),24 }, + { IPv4(199,201,248,0),23 }, + { IPv4(199,201,249,0),24 }, + { IPv4(199,202,64,0),24 }, + { IPv4(199,205,1,0),24 }, + { IPv4(199,208,0,0),20 }, + { IPv4(199,208,1,0),24 }, + { IPv4(199,208,4,0),24 }, + { IPv4(199,208,5,0),24 }, + { IPv4(199,208,6,0),24 }, + { IPv4(199,208,7,0),24 }, + { IPv4(199,208,16,0),24 }, + { IPv4(199,208,19,0),24 }, + { IPv4(199,208,20,0),24 }, + { IPv4(199,208,21,0),24 }, + { IPv4(199,208,22,0),24 }, + { IPv4(199,208,23,0),24 }, + { IPv4(199,208,25,0),24 }, + { IPv4(199,208,64,0),18 }, + { IPv4(199,208,88,0),23 }, + { IPv4(199,208,91,0),24 }, + { IPv4(199,208,92,0),24 }, + { IPv4(199,208,110,0),24 }, + { IPv4(199,208,128,0),18 }, + { IPv4(199,208,157,0),24 }, + { IPv4(199,208,158,0),24 }, + { IPv4(199,208,172,0),24 }, + { IPv4(199,208,189,0),24 }, + { IPv4(199,208,193,0),24 }, + { IPv4(199,208,194,0),23 }, + { IPv4(199,208,197,0),24 }, + { IPv4(199,208,200,0),22 }, + { IPv4(199,208,213,0),24 }, + { IPv4(199,208,224,0),19 }, + { IPv4(199,208,242,0),24 }, + { IPv4(199,208,247,0),24 }, + { IPv4(199,208,248,0),24 }, + { IPv4(199,208,249,0),24 }, + { IPv4(199,209,0,0),23 }, + { IPv4(199,209,8,0),22 }, + { IPv4(199,209,12,0),24 }, + { IPv4(199,209,16,0),22 }, + { IPv4(199,209,20,0),23 }, + { IPv4(199,209,22,0),24 }, + { IPv4(199,209,32,0),19 }, + { IPv4(199,209,33,0),24 }, + { IPv4(199,209,38,0),24 }, + { IPv4(199,209,39,0),24 }, + { IPv4(199,209,64,0),19 }, + { IPv4(199,209,96,0),24 }, + { IPv4(199,209,98,0),24 }, + { IPv4(199,209,99,0),24 }, + { IPv4(199,209,128,0),17 }, + { IPv4(199,210,0,0),16 }, + { IPv4(199,211,39,0),24 }, + { IPv4(199,211,64,0),18 }, + { IPv4(199,211,65,0),24 }, + { IPv4(199,211,81,0),24 }, + { IPv4(199,211,89,0),24 }, + { IPv4(199,211,100,0),24 }, + { IPv4(199,211,117,0),24 }, + { IPv4(199,211,118,0),23 }, + { IPv4(199,211,120,0),24 }, + { IPv4(199,211,121,0),24 }, + { IPv4(199,211,122,0),24 }, + { IPv4(199,211,128,0),17 }, + { IPv4(199,211,128,0),23 }, + { IPv4(199,211,131,0),24 }, + { IPv4(199,211,134,0),24 }, + { IPv4(199,211,150,0),24 }, + { IPv4(199,211,157,0),24 }, + { IPv4(199,211,158,0),23 }, + { IPv4(199,211,160,0),24 }, + { IPv4(199,211,162,0),24 }, + { IPv4(199,211,163,0),24 }, + { IPv4(199,211,172,0),22 }, + { IPv4(199,211,180,0),24 }, + { IPv4(199,211,181,0),24 }, + { IPv4(199,211,182,0),24 }, + { IPv4(199,211,183,0),24 }, + { IPv4(199,211,192,0),23 }, + { IPv4(199,211,197,0),24 }, + { IPv4(199,211,198,0),24 }, + { IPv4(199,211,199,0),24 }, + { IPv4(199,211,211,0),24 }, + { IPv4(199,211,214,0),24 }, + { IPv4(199,211,219,0),24 }, + { IPv4(199,211,220,0),24 }, + { IPv4(199,211,225,0),24 }, + { IPv4(199,211,226,0),24 }, + { IPv4(199,211,228,0),24 }, + { IPv4(199,211,253,0),24 }, + { IPv4(199,212,18,0),24 }, + { IPv4(199,212,45,0),24 }, + { IPv4(199,212,53,0),24 }, + { IPv4(199,212,63,0),24 }, + { IPv4(199,212,120,0),22 }, + { IPv4(199,212,129,0),24 }, + { IPv4(199,212,132,0),24 }, + { IPv4(199,212,134,0),24 }, + { IPv4(199,212,135,0),24 }, + { IPv4(199,212,144,0),24 }, + { IPv4(199,212,150,0),24 }, + { IPv4(199,212,200,0),24 }, + { IPv4(199,212,215,0),24 }, + { IPv4(199,212,232,0),24 }, + { IPv4(199,216,250,0),23 }, + { IPv4(199,217,8,0),21 }, + { IPv4(199,217,128,0),17 }, + { IPv4(199,217,157,0),24 }, + { IPv4(199,217,214,0),23 }, + { IPv4(199,217,217,0),24 }, + { IPv4(199,217,220,0),24 }, + { IPv4(199,219,5,0),24 }, + { IPv4(199,219,128,0),18 }, + { IPv4(199,219,192,0),20 }, + { IPv4(199,219,208,0),21 }, + { IPv4(199,219,216,0),24 }, + { IPv4(199,221,65,0),24 }, + { IPv4(199,221,224,0),19 }, + { IPv4(199,222,4,0),24 }, + { IPv4(199,222,33,0),24 }, + { IPv4(199,222,100,0),24 }, + { IPv4(199,222,128,0),20 }, + { IPv4(199,222,141,0),24 }, + { IPv4(199,222,160,0),20 }, + { IPv4(199,223,139,0),24 }, + { IPv4(199,223,145,0),24 }, + { IPv4(199,223,148,0),24 }, + { IPv4(199,223,149,0),24 }, + { IPv4(199,223,155,0),24 }, + { IPv4(199,223,156,0),24 }, + { IPv4(199,223,178,0),24 }, + { IPv4(199,223,180,0),24 }, + { IPv4(199,224,0,0),20 }, + { IPv4(199,224,64,0),18 }, + { IPv4(199,225,0,0),20 }, + { IPv4(199,226,4,0),22 }, + { IPv4(199,226,8,0),21 }, + { IPv4(199,226,16,0),21 }, + { IPv4(199,226,64,0),19 }, + { IPv4(199,226,84,0),22 }, + { IPv4(199,226,96,0),20 }, + { IPv4(199,226,112,0),21 }, + { IPv4(199,226,120,0),22 }, + { IPv4(199,226,124,0),22 }, + { IPv4(199,226,156,0),24 }, + { IPv4(199,227,0,0),24 }, + { IPv4(199,227,0,0),16 }, + { IPv4(199,227,52,0),23 }, + { IPv4(199,227,56,0),23 }, + { IPv4(199,227,72,0),24 }, + { IPv4(199,227,100,0),23 }, + { IPv4(199,227,103,0),24 }, + { IPv4(199,227,115,0),24 }, + { IPv4(199,227,118,0),23 }, + { IPv4(199,227,120,0),23 }, + { IPv4(199,227,124,0),24 }, + { IPv4(199,227,127,0),24 }, + { IPv4(199,227,141,0),24 }, + { IPv4(199,227,153,0),24 }, + { IPv4(199,227,154,0),24 }, + { IPv4(199,227,158,0),24 }, + { IPv4(199,227,190,0),23 }, + { IPv4(199,227,208,0),23 }, + { IPv4(199,228,181,0),24 }, + { IPv4(199,229,1,0),24 }, + { IPv4(199,229,9,0),24 }, + { IPv4(199,229,10,0),24 }, + { IPv4(199,229,12,0),24 }, + { IPv4(199,229,13,0),24 }, + { IPv4(199,229,14,0),24 }, + { IPv4(199,229,20,0),24 }, + { IPv4(199,229,69,0),24 }, + { IPv4(199,229,97,0),24 }, + { IPv4(199,229,103,0),24 }, + { IPv4(199,229,115,0),24 }, + { IPv4(199,230,16,0),21 }, + { IPv4(199,230,26,0),24 }, + { IPv4(199,230,29,0),24 }, + { IPv4(199,230,128,0),24 }, + { IPv4(199,230,129,0),24 }, + { IPv4(199,230,130,0),24 }, + { IPv4(199,230,249,0),24 }, + { IPv4(199,231,48,0),24 }, + { IPv4(199,231,49,0),24 }, + { IPv4(199,231,50,0),24 }, + { IPv4(199,231,51,0),24 }, + { IPv4(199,232,0,0),16 }, + { IPv4(199,232,2,0),24 }, + { IPv4(199,232,56,0),21 }, + { IPv4(199,232,74,104),30 }, + { IPv4(199,232,92,0),22 }, + { IPv4(199,232,131,0),24 }, + { IPv4(199,232,132,0),23 }, + { IPv4(199,233,74,0),24 }, + { IPv4(199,233,77,0),24 }, + { IPv4(199,233,81,0),24 }, + { IPv4(199,233,82,0),24 }, + { IPv4(199,233,87,0),24 }, + { IPv4(199,233,92,0),24 }, + { IPv4(199,233,97,0),24 }, + { IPv4(199,233,98,0),24 }, + { IPv4(199,233,99,0),24 }, + { IPv4(199,233,111,0),24 }, + { IPv4(199,233,130,0),24 }, + { IPv4(199,233,134,0),24 }, + { IPv4(199,233,135,0),24 }, + { IPv4(199,233,154,0),24 }, + { IPv4(199,233,155,0),24 }, + { IPv4(199,233,182,0),24 }, + { IPv4(199,233,185,0),24 }, + { IPv4(199,234,0,0),16 }, + { IPv4(199,234,146,0),24 }, + { IPv4(199,234,225,0),24 }, + { IPv4(199,234,227,0),24 }, + { IPv4(199,236,0,0),14 }, + { IPv4(199,236,212,0),23 }, + { IPv4(199,237,32,0),23 }, + { IPv4(199,240,0,0),16 }, + { IPv4(199,240,130,0),23 }, + { IPv4(199,240,134,0),24 }, + { IPv4(199,240,142,0),23 }, + { IPv4(199,240,170,0),23 }, + { IPv4(199,240,175,0),24 }, + { IPv4(199,240,176,0),23 }, + { IPv4(199,240,226,0),24 }, + { IPv4(199,242,7,0),24 }, + { IPv4(199,242,138,0),24 }, + { IPv4(199,242,141,0),24 }, + { IPv4(199,242,169,0),24 }, + { IPv4(199,242,201,0),24 }, + { IPv4(199,242,206,0),24 }, + { IPv4(199,242,211,0),24 }, + { IPv4(199,242,223,0),24 }, + { IPv4(199,242,244,0),24 }, + { IPv4(199,244,33,0),24 }, + { IPv4(199,244,35,0),24 }, + { IPv4(199,244,192,0),22 }, + { IPv4(199,244,196,0),23 }, + { IPv4(199,244,223,0),24 }, + { IPv4(199,245,16,0),20 }, + { IPv4(199,245,32,0),24 }, + { IPv4(199,245,64,0),24 }, + { IPv4(199,245,81,0),24 }, + { IPv4(199,245,82,0),24 }, + { IPv4(199,245,86,0),24 }, + { IPv4(199,245,87,0),24 }, + { IPv4(199,245,89,0),24 }, + { IPv4(199,245,95,0),24 }, + { IPv4(199,245,96,0),24 }, + { IPv4(199,245,97,0),24 }, + { IPv4(199,245,103,0),24 }, + { IPv4(199,245,110,0),24 }, + { IPv4(199,245,114,0),23 }, + { IPv4(199,245,118,0),23 }, + { IPv4(199,245,120,0),23 }, + { IPv4(199,245,131,0),24 }, + { IPv4(199,245,134,0),23 }, + { IPv4(199,245,140,0),24 }, + { IPv4(199,245,155,0),24 }, + { IPv4(199,245,156,0),24 }, + { IPv4(199,245,157,0),24 }, + { IPv4(199,245,172,0),24 }, + { IPv4(199,245,173,0),24 }, + { IPv4(199,245,177,0),24 }, + { IPv4(199,245,206,0),24 }, + { IPv4(199,245,225,0),24 }, + { IPv4(199,245,242,0),24 }, + { IPv4(199,245,244,0),23 }, + { IPv4(199,246,2,0),24 }, + { IPv4(199,246,3,0),24 }, + { IPv4(199,246,7,0),24 }, + { IPv4(199,246,26,0),24 }, + { IPv4(199,246,36,0),24 }, + { IPv4(199,246,37,0),24 }, + { IPv4(199,246,52,0),22 }, + { IPv4(199,246,56,0),23 }, + { IPv4(199,246,67,0),24 }, + { IPv4(199,246,68,0),24 }, + { IPv4(199,246,107,0),24 }, + { IPv4(199,246,138,0),24 }, + { IPv4(199,246,218,0),23 }, + { IPv4(199,246,220,0),23 }, + { IPv4(199,246,230,0),24 }, + { IPv4(199,246,231,0),24 }, + { IPv4(199,246,232,0),24 }, + { IPv4(199,246,233,0),24 }, + { IPv4(199,246,234,0),24 }, + { IPv4(199,246,235,0),24 }, + { IPv4(199,246,236,0),24 }, + { IPv4(199,246,237,0),24 }, + { IPv4(199,246,238,0),24 }, + { IPv4(199,246,239,0),24 }, + { IPv4(199,246,240,0),24 }, + { IPv4(199,246,241,0),24 }, + { IPv4(199,246,242,0),24 }, + { IPv4(199,246,243,0),24 }, + { IPv4(199,246,244,0),24 }, + { IPv4(199,246,245,0),24 }, + { IPv4(199,246,246,0),24 }, + { IPv4(199,246,247,0),24 }, + { IPv4(199,246,248,0),24 }, + { IPv4(199,246,249,0),24 }, + { IPv4(199,246,250,0),24 }, + { IPv4(199,246,251,0),24 }, + { IPv4(199,246,252,0),24 }, + { IPv4(199,246,253,0),24 }, + { IPv4(199,247,254,0),24 }, + { IPv4(199,248,148,0),22 }, + { IPv4(199,248,170,0),24 }, + { IPv4(199,248,180,0),24 }, + { IPv4(199,248,185,0),24 }, + { IPv4(199,248,201,0),24 }, + { IPv4(199,248,228,0),24 }, + { IPv4(199,248,238,0),24 }, + { IPv4(199,248,240,0),24 }, + { IPv4(199,248,245,0),24 }, + { IPv4(199,248,255,0),24 }, + { IPv4(199,249,136,0),24 }, + { IPv4(199,249,137,0),24 }, + { IPv4(199,249,138,0),24 }, + { IPv4(199,249,150,0),24 }, + { IPv4(199,249,163,0),24 }, + { IPv4(199,249,169,0),24 }, + { IPv4(199,249,180,0),24 }, + { IPv4(199,249,185,0),24 }, + { IPv4(199,249,191,0),24 }, + { IPv4(199,249,198,0),24 }, + { IPv4(199,249,199,0),24 }, + { IPv4(199,249,200,0),23 }, + { IPv4(199,249,223,0),24 }, + { IPv4(199,249,229,0),24 }, + { IPv4(199,250,8,0),24 }, + { IPv4(199,250,13,0),24 }, + { IPv4(199,250,70,0),23 }, + { IPv4(199,250,136,0),24 }, + { IPv4(199,250,137,0),24 }, + { IPv4(199,250,138,0),24 }, + { IPv4(199,250,139,0),24 }, + { IPv4(199,250,140,0),24 }, + { IPv4(199,250,141,0),24 }, + { IPv4(199,250,142,0),24 }, + { IPv4(199,250,143,0),24 }, + { IPv4(199,250,181,0),24 }, + { IPv4(199,251,0,0),16 }, + { IPv4(199,251,27,0),24 }, + { IPv4(199,251,88,0),24 }, + { IPv4(199,251,89,0),24 }, + { IPv4(199,251,187,0),24 }, + { IPv4(199,251,188,0),24 }, + { IPv4(199,251,189,0),24 }, + { IPv4(199,251,217,0),24 }, + { IPv4(199,251,218,0),24 }, + { IPv4(199,251,219,0),24 }, + { IPv4(199,252,8,0),22 }, + { IPv4(199,252,12,0),24 }, + { IPv4(199,252,16,0),21 }, + { IPv4(199,252,24,0),24 }, + { IPv4(199,252,32,0),20 }, + { IPv4(199,252,48,0),22 }, + { IPv4(199,252,128,0),24 }, + { IPv4(199,252,128,0),18 }, + { IPv4(199,252,131,0),24 }, + { IPv4(199,252,132,0),24 }, + { IPv4(199,252,137,0),24 }, + { IPv4(199,252,138,0),24 }, + { IPv4(199,252,139,0),24 }, + { IPv4(199,252,142,0),24 }, + { IPv4(199,252,153,0),24 }, + { IPv4(199,252,155,0),24 }, + { IPv4(199,252,156,0),24 }, + { IPv4(199,252,180,0),24 }, + { IPv4(199,253,32,0),20 }, + { IPv4(199,253,48,0),21 }, + { IPv4(199,253,174,0),24 }, + { IPv4(199,253,200,0),22 }, + { IPv4(199,253,200,0),21 }, + { IPv4(199,253,246,0),24 }, + { IPv4(199,253,248,0),24 }, + { IPv4(199,254,8,0),21 }, + { IPv4(199,254,138,0),24 }, + { IPv4(199,254,154,0),24 }, + { IPv4(199,254,168,0),24 }, + { IPv4(199,254,169,0),24 }, + { IPv4(199,254,179,0),24 }, + { IPv4(199,254,188,0),24 }, + { IPv4(199,254,199,0),24 }, + { IPv4(199,254,202,0),24 }, + { IPv4(199,254,213,0),24 }, + { IPv4(199,254,225,0),24 }, + { IPv4(199,254,229,0),24 }, + { IPv4(200,0,103,0),24 }, + { IPv4(200,0,104,0),24 }, + { IPv4(200,0,105,0),24 }, + { IPv4(200,0,106,0),24 }, + { IPv4(200,0,111,0),24 }, + { IPv4(200,0,112,0),24 }, + { IPv4(200,0,113,0),24 }, + { IPv4(200,0,147,0),24 }, + { IPv4(200,0,155,0),24 }, + { IPv4(200,0,156,0),24 }, + { IPv4(200,0,157,0),24 }, + { IPv4(200,0,158,0),24 }, + { IPv4(200,0,159,0),24 }, + { IPv4(200,0,181,0),24 }, + { IPv4(200,0,182,0),24 }, + { IPv4(200,0,182,0),23 }, + { IPv4(200,0,183,0),24 }, + { IPv4(200,0,187,0),24 }, + { IPv4(200,0,193,0),24 }, + { IPv4(200,0,216,0),21 }, + { IPv4(200,0,224,0),19 }, + { IPv4(200,1,32,0),19 }, + { IPv4(200,1,128,0),24 }, + { IPv4(200,1,143,0),24 }, + { IPv4(200,1,152,0),24 }, + { IPv4(200,1,156,0),22 }, + { IPv4(200,1,161,0),24 }, + { IPv4(200,2,91,0),24 }, + { IPv4(200,3,32,0),22 }, + { IPv4(200,3,60,0),22 }, + { IPv4(200,3,67,0),24 }, + { IPv4(200,3,94,0),24 }, + { IPv4(200,3,95,0),24 }, + { IPv4(200,3,120,0),21 }, + { IPv4(200,3,240,0),24 }, + { IPv4(200,4,0,0),21 }, + { IPv4(200,4,8,0),24 }, + { IPv4(200,4,14,0),24 }, + { IPv4(200,4,15,0),24 }, + { IPv4(200,4,48,0),22 }, + { IPv4(200,4,57,0),24 }, + { IPv4(200,4,96,0),19 }, + { IPv4(200,4,128,0),20 }, + { IPv4(200,4,144,0),24 }, + { IPv4(200,4,146,0),24 }, + { IPv4(200,4,147,0),24 }, + { IPv4(200,4,148,0),24 }, + { IPv4(200,4,149,0),24 }, + { IPv4(200,4,150,0),24 }, + { IPv4(200,4,151,0),24 }, + { IPv4(200,4,152,0),24 }, + { IPv4(200,4,153,0),24 }, + { IPv4(200,4,154,0),24 }, + { IPv4(200,4,155,0),24 }, + { IPv4(200,4,156,0),24 }, + { IPv4(200,4,157,0),24 }, + { IPv4(200,4,159,0),24 }, + { IPv4(200,5,32,0),21 }, + { IPv4(200,5,64,0),18 }, + { IPv4(200,5,192,0),18 }, + { IPv4(200,6,65,0),24 }, + { IPv4(200,6,66,0),24 }, + { IPv4(200,6,73,0),24 }, + { IPv4(200,6,74,0),24 }, + { IPv4(200,6,77,0),24 }, + { IPv4(200,9,145,0),24 }, + { IPv4(200,9,146,0),24 }, + { IPv4(200,9,212,0),23 }, + { IPv4(200,9,219,0),24 }, + { IPv4(200,9,235,0),24 }, + { IPv4(200,9,237,0),24 }, + { IPv4(200,10,96,0),22 }, + { IPv4(200,10,122,0),24 }, + { IPv4(200,10,128,0),24 }, + { IPv4(200,10,143,0),24 }, + { IPv4(200,10,186,0),24 }, + { IPv4(200,10,207,0),24 }, + { IPv4(200,10,233,0),24 }, + { IPv4(200,10,241,0),24 }, + { IPv4(200,10,243,0),24 }, + { IPv4(200,11,34,0),24 }, + { IPv4(200,12,25,0),24 }, + { IPv4(200,12,32,0),20 }, + { IPv4(200,12,56,0),21 }, + { IPv4(200,12,64,0),24 }, + { IPv4(200,12,65,0),24 }, + { IPv4(200,12,66,0),24 }, + { IPv4(200,12,67,0),24 }, + { IPv4(200,12,69,0),24 }, + { IPv4(200,12,71,0),24 }, + { IPv4(200,12,78,0),24 }, + { IPv4(200,12,84,0),24 }, + { IPv4(200,12,88,0),24 }, + { IPv4(200,12,95,0),24 }, + { IPv4(200,12,126,0),24 }, + { IPv4(200,12,127,0),24 }, + { IPv4(200,12,158,0),23 }, + { IPv4(200,12,164,0),24 }, + { IPv4(200,12,166,0),24 }, + { IPv4(200,12,193,0),24 }, + { IPv4(200,12,224,0),20 }, + { IPv4(200,13,52,0),24 }, + { IPv4(200,13,53,0),24 }, + { IPv4(200,13,54,0),23 }, + { IPv4(200,13,88,0),24 }, + { IPv4(200,13,110,0),24 }, + { IPv4(200,13,111,0),24 }, + { IPv4(200,13,113,0),24 }, + { IPv4(200,13,116,0),24 }, + { IPv4(200,14,114,0),24 }, + { IPv4(200,14,115,0),24 }, + { IPv4(200,14,192,0),24 }, + { IPv4(200,14,205,0),24 }, + { IPv4(200,14,206,0),24 }, + { IPv4(200,14,207,0),24 }, + { IPv4(200,14,208,0),24 }, + { IPv4(200,14,209,0),24 }, + { IPv4(200,14,210,0),24 }, + { IPv4(200,14,211,0),24 }, + { IPv4(200,14,215,0),24 }, + { IPv4(200,14,221,0),24 }, + { IPv4(200,14,231,0),24 }, + { IPv4(200,14,232,0),24 }, + { IPv4(200,14,233,0),24 }, + { IPv4(200,14,234,0),24 }, + { IPv4(200,14,236,0),24 }, + { IPv4(200,14,237,0),24 }, + { IPv4(200,14,238,0),24 }, + { IPv4(200,14,239,0),24 }, + { IPv4(200,14,241,0),24 }, + { IPv4(200,14,242,0),23 }, + { IPv4(200,14,244,0),24 }, + { IPv4(200,14,253,0),24 }, + { IPv4(200,15,0,0),16 }, + { IPv4(200,15,28,0),24 }, + { IPv4(200,16,32,0),20 }, + { IPv4(200,16,35,0),24 }, + { IPv4(200,16,48,0),20 }, + { IPv4(200,16,86,0),24 }, + { IPv4(200,16,96,0),20 }, + { IPv4(200,16,162,0),23 }, + { IPv4(200,16,170,0),24 }, + { IPv4(200,16,195,0),24 }, + { IPv4(200,16,199,0),24 }, + { IPv4(200,16,206,0),23 }, + { IPv4(200,16,209,0),24 }, + { IPv4(200,16,210,0),23 }, + { IPv4(200,16,246,0),24 }, + { IPv4(200,23,1,0),24 }, + { IPv4(200,23,3,0),24 }, + { IPv4(200,23,5,0),24 }, + { IPv4(200,23,16,0),24 }, + { IPv4(200,23,17,0),24 }, + { IPv4(200,23,21,0),24 }, + { IPv4(200,23,22,0),24 }, + { IPv4(200,23,23,0),24 }, + { IPv4(200,23,35,0),24 }, + { IPv4(200,23,36,0),23 }, + { IPv4(200,23,40,0),24 }, + { IPv4(200,23,41,0),24 }, + { IPv4(200,23,42,0),24 }, + { IPv4(200,23,43,0),24 }, + { IPv4(200,23,63,0),24 }, + { IPv4(200,23,64,0),24 }, + { IPv4(200,23,66,0),24 }, + { IPv4(200,23,74,0),24 }, + { IPv4(200,23,76,0),24 }, + { IPv4(200,23,77,0),24 }, + { IPv4(200,23,78,0),24 }, + { IPv4(200,23,79,0),24 }, + { IPv4(200,23,80,0),24 }, + { IPv4(200,23,83,0),24 }, + { IPv4(200,23,84,0),24 }, + { IPv4(200,23,87,0),24 }, + { IPv4(200,23,91,0),24 }, + { IPv4(200,23,96,0),24 }, + { IPv4(200,23,100,0),24 }, + { IPv4(200,23,101,0),24 }, + { IPv4(200,23,103,0),24 }, + { IPv4(200,23,105,0),24 }, + { IPv4(200,23,108,0),24 }, + { IPv4(200,23,109,0),24 }, + { IPv4(200,23,110,0),24 }, + { IPv4(200,23,120,0),24 }, + { IPv4(200,23,128,0),24 }, + { IPv4(200,23,129,0),24 }, + { IPv4(200,23,130,0),24 }, + { IPv4(200,23,132,0),24 }, + { IPv4(200,23,134,0),24 }, + { IPv4(200,23,135,0),24 }, + { IPv4(200,23,136,0),24 }, + { IPv4(200,23,140,0),24 }, + { IPv4(200,23,144,0),24 }, + { IPv4(200,23,145,0),24 }, + { IPv4(200,23,146,0),24 }, + { IPv4(200,23,148,0),24 }, + { IPv4(200,23,149,0),24 }, + { IPv4(200,23,150,0),24 }, + { IPv4(200,23,156,0),24 }, + { IPv4(200,23,159,0),24 }, + { IPv4(200,23,160,0),22 }, + { IPv4(200,23,164,0),24 }, + { IPv4(200,23,166,0),24 }, + { IPv4(200,23,172,0),24 }, + { IPv4(200,23,176,0),24 }, + { IPv4(200,23,177,0),24 }, + { IPv4(200,23,178,0),24 }, + { IPv4(200,23,192,0),22 }, + { IPv4(200,23,196,0),24 }, + { IPv4(200,23,197,0),24 }, + { IPv4(200,23,208,0),24 }, + { IPv4(200,23,209,0),24 }, + { IPv4(200,23,210,0),24 }, + { IPv4(200,23,210,0),23 }, + { IPv4(200,23,211,0),24 }, + { IPv4(200,23,214,0),24 }, + { IPv4(200,23,217,0),24 }, + { IPv4(200,23,219,0),24 }, + { IPv4(200,23,240,0),24 }, + { IPv4(200,23,240,0),21 }, + { IPv4(200,23,241,0),24 }, + { IPv4(200,23,242,0),24 }, + { IPv4(200,23,243,0),24 }, + { IPv4(200,23,245,0),24 }, + { IPv4(200,23,247,0),24 }, + { IPv4(200,23,248,0),24 }, + { IPv4(200,23,249,0),24 }, + { IPv4(200,23,250,0),24 }, + { IPv4(200,23,251,0),24 }, + { IPv4(200,23,252,0),24 }, + { IPv4(200,23,253,0),24 }, + { IPv4(200,24,160,0),20 }, + { IPv4(200,24,176,0),24 }, + { IPv4(200,24,177,0),24 }, + { IPv4(200,24,178,0),23 }, + { IPv4(200,24,180,0),22 }, + { IPv4(200,24,184,0),21 }, + { IPv4(200,24,208,0),20 }, + { IPv4(200,26,26,0),24 }, + { IPv4(200,26,33,0),24 }, + { IPv4(200,26,48,0),20 }, + { IPv4(200,26,64,0),18 }, + { IPv4(200,27,0,0),16 }, + { IPv4(200,27,2,0),24 }, + { IPv4(200,27,9,0),24 }, + { IPv4(200,27,60,0),24 }, + { IPv4(200,27,61,0),24 }, + { IPv4(200,27,63,0),24 }, + { IPv4(200,27,64,0),24 }, + { IPv4(200,27,64,0),19 }, + { IPv4(200,27,66,0),24 }, + { IPv4(200,27,67,0),24 }, + { IPv4(200,27,73,0),24 }, + { IPv4(200,27,96,0),19 }, + { IPv4(200,27,99,0),24 }, + { IPv4(200,27,109,0),24 }, + { IPv4(200,27,160,0),19 }, + { IPv4(200,27,192,0),19 }, + { IPv4(200,27,198,0),24 }, + { IPv4(200,27,200,0),24 }, + { IPv4(200,27,201,0),24 }, + { IPv4(200,27,202,0),24 }, + { IPv4(200,27,203,0),24 }, + { IPv4(200,27,204,0),24 }, + { IPv4(200,27,205,0),24 }, + { IPv4(200,27,224,0),19 }, + { IPv4(200,28,152,0),24 }, + { IPv4(200,30,128,0),24 }, + { IPv4(200,30,129,0),24 }, + { IPv4(200,30,192,0),24 }, + { IPv4(200,30,193,0),24 }, + { IPv4(200,30,194,0),24 }, + { IPv4(200,30,195,0),24 }, + { IPv4(200,30,196,0),24 }, + { IPv4(200,30,197,0),24 }, + { IPv4(200,30,198,0),24 }, + { IPv4(200,30,199,0),24 }, + { IPv4(200,30,200,0),24 }, + { IPv4(200,30,201,0),24 }, + { IPv4(200,30,202,0),24 }, + { IPv4(200,30,203,0),24 }, + { IPv4(200,30,204,0),24 }, + { IPv4(200,30,205,0),24 }, + { IPv4(200,30,206,0),24 }, + { IPv4(200,30,207,0),24 }, + { IPv4(200,30,208,0),24 }, + { IPv4(200,30,209,0),24 }, + { IPv4(200,30,210,0),24 }, + { IPv4(200,30,211,0),24 }, + { IPv4(200,30,212,0),24 }, + { IPv4(200,30,213,0),24 }, + { IPv4(200,30,214,0),24 }, + { IPv4(200,30,215,0),24 }, + { IPv4(200,30,216,0),24 }, + { IPv4(200,30,217,0),24 }, + { IPv4(200,30,218,0),24 }, + { IPv4(200,30,219,0),24 }, + { IPv4(200,30,220,0),24 }, + { IPv4(200,30,221,0),24 }, + { IPv4(200,30,222,0),24 }, + { IPv4(200,30,223,0),24 }, + { IPv4(200,30,224,0),24 }, + { IPv4(200,30,225,0),24 }, + { IPv4(200,30,226,0),24 }, + { IPv4(200,30,227,0),24 }, + { IPv4(200,30,228,0),24 }, + { IPv4(200,30,229,0),24 }, + { IPv4(200,30,230,0),24 }, + { IPv4(200,30,231,0),24 }, + { IPv4(200,30,232,0),24 }, + { IPv4(200,30,233,0),24 }, + { IPv4(200,30,234,0),24 }, + { IPv4(200,30,235,0),24 }, + { IPv4(200,30,236,0),24 }, + { IPv4(200,30,237,0),24 }, + { IPv4(200,30,238,0),24 }, + { IPv4(200,30,239,0),24 }, + { IPv4(200,30,240,0),24 }, + { IPv4(200,30,241,0),24 }, + { IPv4(200,30,242,0),24 }, + { IPv4(200,30,243,0),24 }, + { IPv4(200,30,244,0),24 }, + { IPv4(200,30,245,0),24 }, + { IPv4(200,30,246,0),24 }, + { IPv4(200,30,247,0),24 }, + { IPv4(200,30,248,0),24 }, + { IPv4(200,30,249,0),24 }, + { IPv4(200,30,250,0),24 }, + { IPv4(200,30,251,0),24 }, + { IPv4(200,30,252,0),24 }, + { IPv4(200,30,253,0),24 }, + { IPv4(200,30,254,0),24 }, + { IPv4(200,30,255,0),24 }, + { IPv4(200,32,0,0),19 }, + { IPv4(200,32,0,0),21 }, + { IPv4(200,32,8,0),21 }, + { IPv4(200,32,16,0),21 }, + { IPv4(200,32,24,0),21 }, + { IPv4(200,32,32,0),19 }, + { IPv4(200,33,1,0),24 }, + { IPv4(200,33,3,0),24 }, + { IPv4(200,33,7,0),24 }, + { IPv4(200,33,8,0),24 }, + { IPv4(200,33,15,0),24 }, + { IPv4(200,33,16,0),24 }, + { IPv4(200,33,20,0),24 }, + { IPv4(200,33,21,0),24 }, + { IPv4(200,33,25,0),24 }, + { IPv4(200,33,30,0),24 }, + { IPv4(200,33,31,0),24 }, + { IPv4(200,33,32,0),24 }, + { IPv4(200,33,34,0),24 }, + { IPv4(200,33,36,0),24 }, + { IPv4(200,33,53,0),24 }, + { IPv4(200,33,56,0),24 }, + { IPv4(200,33,60,0),24 }, + { IPv4(200,33,61,0),24 }, + { IPv4(200,33,62,0),23 }, + { IPv4(200,33,68,0),24 }, + { IPv4(200,33,71,0),24 }, + { IPv4(200,33,72,0),24 }, + { IPv4(200,33,74,0),24 }, + { IPv4(200,33,79,0),24 }, + { IPv4(200,33,97,0),24 }, + { IPv4(200,33,99,0),24 }, + { IPv4(200,33,100,0),22 }, + { IPv4(200,33,104,0),23 }, + { IPv4(200,33,111,0),24 }, + { IPv4(200,33,112,0),24 }, + { IPv4(200,33,116,0),24 }, + { IPv4(200,33,117,0),24 }, + { IPv4(200,33,121,0),24 }, + { IPv4(200,33,122,0),24 }, + { IPv4(200,33,136,0),21 }, + { IPv4(200,33,137,0),24 }, + { IPv4(200,33,140,0),24 }, + { IPv4(200,33,142,0),24 }, + { IPv4(200,33,143,0),24 }, + { IPv4(200,33,144,0),21 }, + { IPv4(200,33,151,0),24 }, + { IPv4(200,33,164,0),22 }, + { IPv4(200,33,169,0),24 }, + { IPv4(200,33,170,0),24 }, + { IPv4(200,33,181,0),24 }, + { IPv4(200,33,188,0),24 }, + { IPv4(200,33,189,0),24 }, + { IPv4(200,33,190,0),24 }, + { IPv4(200,33,191,0),24 }, + { IPv4(200,33,206,0),24 }, + { IPv4(200,33,240,0),22 }, + { IPv4(200,33,244,0),24 }, + { IPv4(200,33,245,0),24 }, + { IPv4(200,33,252,0),24 }, + { IPv4(200,34,32,0),20 }, + { IPv4(200,34,48,0),21 }, + { IPv4(200,34,56,0),22 }, + { IPv4(200,34,96,0),24 }, + { IPv4(200,34,97,0),24 }, + { IPv4(200,34,98,0),24 }, + { IPv4(200,34,99,0),24 }, + { IPv4(200,34,100,0),24 }, + { IPv4(200,34,101,0),24 }, + { IPv4(200,34,102,0),24 }, + { IPv4(200,34,103,0),24 }, + { IPv4(200,34,104,0),24 }, + { IPv4(200,34,105,0),24 }, + { IPv4(200,34,106,0),24 }, + { IPv4(200,34,107,0),24 }, + { IPv4(200,34,108,0),24 }, + { IPv4(200,34,109,0),24 }, + { IPv4(200,34,110,0),24 }, + { IPv4(200,34,112,0),20 }, + { IPv4(200,34,128,0),24 }, + { IPv4(200,34,139,0),24 }, + { IPv4(200,34,140,0),24 }, + { IPv4(200,34,142,0),24 }, + { IPv4(200,34,149,0),24 }, + { IPv4(200,34,150,0),24 }, + { IPv4(200,34,152,0),24 }, + { IPv4(200,34,153,0),24 }, + { IPv4(200,34,154,0),24 }, + { IPv4(200,34,155,0),24 }, + { IPv4(200,34,157,0),24 }, + { IPv4(200,34,159,0),24 }, + { IPv4(200,34,160,0),22 }, + { IPv4(200,34,164,0),22 }, + { IPv4(200,34,168,0),24 }, + { IPv4(200,34,169,0),24 }, + { IPv4(200,34,171,0),24 }, + { IPv4(200,34,175,0),24 }, + { IPv4(200,34,176,0),24 }, + { IPv4(200,34,177,0),24 }, + { IPv4(200,34,178,0),24 }, + { IPv4(200,34,179,0),24 }, + { IPv4(200,34,181,0),24 }, + { IPv4(200,34,182,0),24 }, + { IPv4(200,34,183,0),24 }, + { IPv4(200,34,185,0),24 }, + { IPv4(200,34,186,0),24 }, + { IPv4(200,34,187,0),24 }, + { IPv4(200,34,188,0),24 }, + { IPv4(200,34,189,0),24 }, + { IPv4(200,34,190,0),24 }, + { IPv4(200,34,191,0),24 }, + { IPv4(200,34,201,0),24 }, + { IPv4(200,34,204,0),24 }, + { IPv4(200,34,205,0),24 }, + { IPv4(200,34,206,0),24 }, + { IPv4(200,34,221,0),24 }, + { IPv4(200,34,222,0),24 }, + { IPv4(200,34,223,0),24 }, + { IPv4(200,36,0,0),19 }, + { IPv4(200,36,12,0),24 }, + { IPv4(200,36,27,0),24 }, + { IPv4(200,36,28,0),22 }, + { IPv4(200,36,32,0),19 }, + { IPv4(200,36,64,0),19 }, + { IPv4(200,36,128,0),19 }, + { IPv4(200,36,176,0),20 }, + { IPv4(200,36,224,0),20 }, + { IPv4(200,36,229,0),24 }, + { IPv4(200,36,240,0),21 }, + { IPv4(200,36,248,0),21 }, + { IPv4(200,38,0,0),19 }, + { IPv4(200,38,32,0),19 }, + { IPv4(200,38,96,0),20 }, + { IPv4(200,38,112,0),21 }, + { IPv4(200,38,120,0),24 }, + { IPv4(200,38,121,0),24 }, + { IPv4(200,38,122,0),24 }, + { IPv4(200,38,126,0),24 }, + { IPv4(200,38,128,0),19 }, + { IPv4(200,38,152,0),24 }, + { IPv4(200,38,192,0),19 }, + { IPv4(200,38,215,0),24 }, + { IPv4(200,38,241,0),24 }, + { IPv4(200,39,0,0),20 }, + { IPv4(200,39,16,0),20 }, + { IPv4(200,39,32,0),19 }, + { IPv4(200,39,64,0),19 }, + { IPv4(200,39,160,0),19 }, + { IPv4(200,39,192,0),24 }, + { IPv4(200,39,212,0),22 }, + { IPv4(200,39,216,0),23 }, + { IPv4(200,39,219,0),24 }, + { IPv4(200,41,9,0),24 }, + { IPv4(200,41,54,0),23 }, + { IPv4(200,41,94,0),23 }, + { IPv4(200,41,129,0),24 }, + { IPv4(200,41,146,0),23 }, + { IPv4(200,41,176,0),24 }, + { IPv4(200,41,199,0),24 }, + { IPv4(200,41,210,0),23 }, + { IPv4(200,41,224,0),20 }, + { IPv4(200,41,246,0),23 }, + { IPv4(200,42,0,0),19 }, + { IPv4(200,42,32,0),24 }, + { IPv4(200,42,33,0),24 }, + { IPv4(200,42,34,0),24 }, + { IPv4(200,42,64,0),19 }, + { IPv4(200,42,96,0),21 }, + { IPv4(200,42,96,0),19 }, + { IPv4(200,42,112,0),21 }, + { IPv4(200,42,128,0),19 }, + { IPv4(200,42,144,0),21 }, + { IPv4(200,47,152,0),24 }, + { IPv4(200,47,154,0),24 }, + { IPv4(200,47,157,0),24 }, + { IPv4(200,47,159,0),24 }, + { IPv4(200,47,177,0),24 }, + { IPv4(200,47,179,0),24 }, + { IPv4(200,47,184,0),24 }, + { IPv4(200,47,185,0),24 }, + { IPv4(200,47,186,0),24 }, + { IPv4(200,47,187,0),24 }, + { IPv4(200,47,188,0),24 }, + { IPv4(200,49,0,0),24 }, + { IPv4(200,49,90,0),24 }, + { IPv4(200,50,67,0),24 }, + { IPv4(200,51,0,0),23 }, + { IPv4(200,51,14,0),24 }, + { IPv4(200,51,19,0),24 }, + { IPv4(200,51,27,0),24 }, + { IPv4(200,51,40,0),21 }, + { IPv4(200,51,58,0),24 }, + { IPv4(200,51,64,0),24 }, + { IPv4(200,51,65,0),24 }, + { IPv4(200,51,70,0),24 }, + { IPv4(200,51,80,0),20 }, + { IPv4(200,51,96,0),21 }, + { IPv4(200,51,144,0),22 }, + { IPv4(200,51,148,0),22 }, + { IPv4(200,51,152,0),22 }, + { IPv4(200,51,167,0),24 }, + { IPv4(200,51,170,0),24 }, + { IPv4(200,51,188,0),23 }, + { IPv4(200,51,192,0),20 }, + { IPv4(200,51,238,0),24 }, + { IPv4(200,51,242,0),23 }, + { IPv4(200,51,252,0),22 }, + { IPv4(200,52,16,0),21 }, + { IPv4(200,52,24,0),21 }, + { IPv4(200,52,103,0),24 }, + { IPv4(200,52,112,0),24 }, + { IPv4(200,52,112,0),20 }, + { IPv4(200,52,114,0),24 }, + { IPv4(200,52,115,0),24 }, + { IPv4(200,52,116,0),24 }, + { IPv4(200,52,117,0),24 }, + { IPv4(200,52,118,0),24 }, + { IPv4(200,52,119,0),24 }, + { IPv4(200,52,120,0),24 }, + { IPv4(200,52,121,0),24 }, + { IPv4(200,52,122,0),24 }, + { IPv4(200,52,123,0),24 }, + { IPv4(200,52,124,0),24 }, + { IPv4(200,52,125,0),24 }, + { IPv4(200,52,126,0),24 }, + { IPv4(200,52,127,0),24 }, + { IPv4(200,52,144,0),20 }, + { IPv4(200,52,161,0),24 }, + { IPv4(200,52,163,0),24 }, + { IPv4(200,52,164,0),24 }, + { IPv4(200,52,165,0),24 }, + { IPv4(200,52,166,0),24 }, + { IPv4(200,52,174,0),24 }, + { IPv4(200,52,175,0),24 }, + { IPv4(200,52,190,0),24 }, + { IPv4(200,52,191,0),24 }, + { IPv4(200,52,240,0),20 }, + { IPv4(200,53,224,0),20 }, + { IPv4(200,55,42,0),24 }, + { IPv4(200,56,64,0),20 }, + { IPv4(200,56,80,0),20 }, + { IPv4(200,56,85,0),24 }, + { IPv4(200,56,90,0),24 }, + { IPv4(200,56,112,0),24 }, + { IPv4(200,56,112,0),20 }, + { IPv4(200,56,113,0),24 }, + { IPv4(200,56,114,0),24 }, + { IPv4(200,56,115,0),24 }, + { IPv4(200,56,116,0),24 }, + { IPv4(200,56,123,0),24 }, + { IPv4(200,56,124,0),24 }, + { IPv4(200,56,126,0),24 }, + { IPv4(200,56,127,0),24 }, + { IPv4(200,56,192,0),24 }, + { IPv4(200,56,193,0),24 }, + { IPv4(200,56,194,0),24 }, + { IPv4(200,56,195,0),24 }, + { IPv4(200,56,196,0),24 }, + { IPv4(200,56,197,0),24 }, + { IPv4(200,56,198,0),24 }, + { IPv4(200,56,199,0),24 }, + { IPv4(200,56,200,0),24 }, + { IPv4(200,56,224,0),20 }, + { IPv4(200,57,32,0),20 }, + { IPv4(200,57,48,0),20 }, + { IPv4(200,57,80,0),20 }, + { IPv4(200,57,128,0),20 }, + { IPv4(200,57,144,0),20 }, + { IPv4(200,61,32,0),20 }, + { IPv4(200,61,32,0),23 }, + { IPv4(200,61,34,0),23 }, + { IPv4(200,61,36,0),23 }, + { IPv4(200,61,38,0),23 }, + { IPv4(200,61,38,0),24 }, + { IPv4(200,61,40,0),24 }, + { IPv4(200,61,40,0),23 }, + { IPv4(200,61,42,0),23 }, + { IPv4(200,61,44,0),23 }, + { IPv4(200,61,46,0),23 }, + { IPv4(200,61,48,0),23 }, + { IPv4(200,61,50,0),23 }, + { IPv4(200,61,52,0),23 }, + { IPv4(200,61,54,0),23 }, + { IPv4(200,61,56,0),23 }, + { IPv4(200,61,58,0),23 }, + { IPv4(200,61,60,0),23 }, + { IPv4(200,61,62,0),23 }, + { IPv4(200,61,64,0),24 }, + { IPv4(200,61,128,0),19 }, + { IPv4(200,62,7,0),24 }, + { IPv4(200,62,16,0),20 }, + { IPv4(200,62,128,0),21 }, + { IPv4(200,62,128,0),24 }, + { IPv4(200,62,130,0),24 }, + { IPv4(200,62,136,0),21 }, + { IPv4(200,62,144,0),21 }, + { IPv4(200,62,192,0),21 }, + { IPv4(200,62,200,0),21 }, + { IPv4(200,64,0,0),16 }, + { IPv4(200,64,0,0),18 }, + { IPv4(200,64,18,0),24 }, + { IPv4(200,64,64,0),18 }, + { IPv4(200,64,128,0),19 }, + { IPv4(200,64,160,0),19 }, + { IPv4(200,64,192,0),19 }, + { IPv4(200,64,224,0),19 }, + { IPv4(200,65,0,0),18 }, + { IPv4(200,65,0,0),16 }, + { IPv4(200,65,7,0),24 }, + { IPv4(200,65,8,0),24 }, + { IPv4(200,65,22,0),24 }, + { IPv4(200,65,24,0),24 }, + { IPv4(200,65,25,0),24 }, + { IPv4(200,65,32,0),24 }, + { IPv4(200,65,64,0),18 }, + { IPv4(200,65,113,0),24 }, + { IPv4(200,65,114,0),24 }, + { IPv4(200,65,128,0),19 }, + { IPv4(200,65,160,0),19 }, + { IPv4(200,65,175,0),24 }, + { IPv4(200,65,192,0),19 }, + { IPv4(200,65,224,0),19 }, + { IPv4(200,66,112,0),24 }, + { IPv4(200,66,112,0),20 }, + { IPv4(200,66,113,0),24 }, + { IPv4(200,66,114,0),24 }, + { IPv4(200,66,115,0),24 }, + { IPv4(200,66,119,0),24 }, + { IPv4(200,66,123,0),24 }, + { IPv4(200,66,125,0),24 }, + { IPv4(200,66,128,0),19 }, + { IPv4(200,66,128,0),17 }, + { IPv4(200,66,160,0),19 }, + { IPv4(200,66,192,0),20 }, + { IPv4(200,66,208,0),20 }, + { IPv4(200,66,224,0),20 }, + { IPv4(200,66,240,0),20 }, + { IPv4(200,67,0,0),16 }, + { IPv4(200,67,0,0),17 }, + { IPv4(200,67,128,0),17 }, + { IPv4(200,69,0,0),22 }, + { IPv4(200,69,4,0),22 }, + { IPv4(200,69,8,0),22 }, + { IPv4(200,69,12,0),22 }, + { IPv4(200,74,1,0),24 }, + { IPv4(200,74,29,0),24 }, + { IPv4(200,74,30,0),24 }, + { IPv4(200,74,31,0),24 }, + { IPv4(200,74,137,0),24 }, + { IPv4(200,74,160,0),24 }, + { IPv4(200,74,161,0),24 }, + { IPv4(200,74,162,0),24 }, + { IPv4(200,74,163,0),24 }, + { IPv4(200,74,164,0),24 }, + { IPv4(200,74,165,0),24 }, + { IPv4(200,74,166,0),24 }, + { IPv4(200,74,167,0),24 }, + { IPv4(200,74,168,0),24 }, + { IPv4(200,74,169,0),24 }, + { IPv4(200,74,170,0),24 }, + { IPv4(200,74,171,0),24 }, + { IPv4(200,74,172,0),24 }, + { IPv4(200,74,173,0),24 }, + { IPv4(200,74,174,0),24 }, + { IPv4(200,74,175,0),24 }, + { IPv4(200,76,192,0),20 }, + { IPv4(200,76,208,0),20 }, + { IPv4(200,80,128,0),23 }, + { IPv4(200,80,130,0),23 }, + { IPv4(200,80,132,0),23 }, + { IPv4(200,80,136,0),23 }, + { IPv4(200,155,0,0),21 }, + { IPv4(200,160,32,0),20 }, + { IPv4(200,169,80,0),20 }, + { IPv4(200,170,64,0),20 }, + { IPv4(200,173,0,0),16 }, + { IPv4(200,175,64,0),18 }, + { IPv4(200,175,128,0),18 }, + { IPv4(200,187,96,0),20 }, + { IPv4(200,187,112,0),22 }, + { IPv4(200,187,116,0),24 }, + { IPv4(200,187,160,0),20 }, + { IPv4(200,189,0,0),22 }, + { IPv4(200,189,4,0),22 }, + { IPv4(200,189,160,0),19 }, + { IPv4(200,190,0,0),16 }, + { IPv4(200,192,0,0),18 }, + { IPv4(200,192,80,0),20 }, + { IPv4(200,192,128,0),20 }, + { IPv4(200,192,160,0),20 }, + { IPv4(200,192,224,0),21 }, + { IPv4(200,194,112,0),20 }, + { IPv4(200,195,128,0),19 }, + { IPv4(200,195,160,0),20 }, + { IPv4(200,195,176,0),20 }, + { IPv4(200,195,192,0),20 }, + { IPv4(200,195,208,0),20 }, + { IPv4(200,198,176,0),20 }, + { IPv4(200,198,184,0),23 }, + { IPv4(200,198,188,0),24 }, + { IPv4(200,201,192,0),18 }, + { IPv4(200,202,5,0),24 }, + { IPv4(200,202,6,0),24 }, + { IPv4(200,202,7,0),24 }, + { IPv4(200,202,13,0),24 }, + { IPv4(200,202,14,0),24 }, + { IPv4(200,218,0,0),19 }, + { IPv4(200,225,80,0),20 }, + { IPv4(200,225,144,0),20 }, + { IPv4(200,225,192,0),21 }, + { IPv4(200,225,192,0),18 }, + { IPv4(200,225,200,0),21 }, + { IPv4(200,225,208,0),21 }, + { IPv4(200,225,216,0),21 }, + { IPv4(200,225,224,0),21 }, + { IPv4(200,225,232,0),21 }, + { IPv4(200,225,240,0),21 }, + { IPv4(200,225,248,0),21 }, + { IPv4(200,226,0,0),19 }, + { IPv4(200,226,64,0),20 }, + { IPv4(200,226,80,0),20 }, + { IPv4(200,226,96,0),20 }, + { IPv4(200,226,128,0),17 }, + { IPv4(200,229,0,0),20 }, + { IPv4(200,229,16,0),20 }, + { IPv4(200,229,32,0),20 }, + { IPv4(200,229,112,0),24 }, + { IPv4(200,229,113,0),24 }, + { IPv4(200,229,114,0),24 }, + { IPv4(200,229,115,0),24 }, + { IPv4(200,229,117,0),24 }, + { IPv4(200,229,118,0),24 }, + { IPv4(200,229,120,0),24 }, + { IPv4(200,229,123,0),24 }, + { IPv4(200,229,128,0),20 }, + { IPv4(200,229,224,0),20 }, + { IPv4(200,229,240,0),21 }, + { IPv4(201,115,100,0),24 }, + { IPv4(202,0,16,0),20 }, + { IPv4(202,0,65,0),24 }, + { IPv4(202,0,71,0),24 }, + { IPv4(202,0,117,0),24 }, + { IPv4(202,0,118,0),24 }, + { IPv4(202,0,126,0),24 }, + { IPv4(202,0,127,0),24 }, + { IPv4(202,0,149,0),24 }, + { IPv4(202,0,155,0),24 }, + { IPv4(202,1,3,0),24 }, + { IPv4(202,1,6,0),24 }, + { IPv4(202,1,7,0),24 }, + { IPv4(202,1,224,0),21 }, + { IPv4(202,1,233,0),24 }, + { IPv4(202,2,8,0),21 }, + { IPv4(202,2,52,0),22 }, + { IPv4(202,3,8,0),22 }, + { IPv4(202,3,12,0),24 }, + { IPv4(202,4,0,0),21 }, + { IPv4(202,4,8,0),24 }, + { IPv4(202,4,185,0),24 }, + { IPv4(202,4,186,0),24 }, + { IPv4(202,4,187,0),24 }, + { IPv4(202,4,189,0),24 }, + { IPv4(202,4,252,0),22 }, + { IPv4(202,5,32,0),19 }, + { IPv4(202,5,64,0),20 }, + { IPv4(202,5,160,0),24 }, + { IPv4(202,5,166,0),24 }, + { IPv4(202,5,170,0),24 }, + { IPv4(202,5,172,0),24 }, + { IPv4(202,5,192,0),19 }, + { IPv4(202,6,100,0),23 }, + { IPv4(202,6,107,0),24 }, + { IPv4(202,6,124,0),22 }, + { IPv4(202,6,192,0),20 }, + { IPv4(202,7,34,0),24 }, + { IPv4(202,7,64,0),19 }, + { IPv4(202,7,80,0),21 }, + { IPv4(202,7,99,0),24 }, + { IPv4(202,7,100,0),24 }, + { IPv4(202,7,101,0),24 }, + { IPv4(202,7,102,0),24 }, + { IPv4(202,7,103,0),24 }, + { IPv4(202,7,168,0),24 }, + { IPv4(202,7,174,0),24 }, + { IPv4(202,7,179,0),24 }, + { IPv4(202,7,182,0),24 }, + { IPv4(202,7,187,0),24 }, + { IPv4(202,7,188,0),24 }, + { IPv4(202,7,189,0),24 }, + { IPv4(202,7,196,0),24 }, + { IPv4(202,7,198,0),24 }, + { IPv4(202,7,199,0),24 }, + { IPv4(202,7,215,0),24 }, + { IPv4(202,7,219,0),24 }, + { IPv4(202,8,1,0),24 }, + { IPv4(202,8,224,0),24 }, + { IPv4(202,8,225,0),24 }, + { IPv4(202,8,226,0),24 }, + { IPv4(202,8,227,0),24 }, + { IPv4(202,8,236,0),24 }, + { IPv4(202,8,237,0),24 }, + { IPv4(202,8,243,0),24 }, + { IPv4(202,8,245,0),24 }, + { IPv4(202,8,246,0),24 }, + { IPv4(202,8,247,0),24 }, + { IPv4(202,8,248,0),24 }, + { IPv4(202,8,249,0),24 }, + { IPv4(202,8,251,0),24 }, + { IPv4(202,9,64,0),19 }, + { IPv4(202,9,144,0),24 }, + { IPv4(202,9,147,0),24 }, + { IPv4(202,9,148,0),24 }, + { IPv4(202,9,149,0),24 }, + { IPv4(202,9,151,0),24 }, + { IPv4(202,9,160,0),22 }, + { IPv4(202,9,174,0),24 }, + { IPv4(202,9,176,0),24 }, + { IPv4(202,9,179,0),24 }, + { IPv4(202,9,180,0),24 }, + { IPv4(202,9,181,0),24 }, + { IPv4(202,9,183,0),24 }, + { IPv4(202,9,187,0),24 }, + { IPv4(202,9,188,0),22 }, + { IPv4(202,9,191,0),24 }, + { IPv4(202,9,255,0),24 }, + { IPv4(202,10,32,0),21 }, + { IPv4(202,11,160,0),22 }, + { IPv4(202,12,8,0),24 }, + { IPv4(202,12,19,0),24 }, + { IPv4(202,12,26,0),24 }, + { IPv4(202,12,27,0),24 }, + { IPv4(202,12,28,0),24 }, + { IPv4(202,12,62,0),24 }, + { IPv4(202,12,87,0),24 }, + { IPv4(202,12,94,0),23 }, + { IPv4(202,12,97,0),24 }, + { IPv4(202,12,112,0),24 }, + { IPv4(202,12,113,0),24 }, + { IPv4(202,13,236,0),22 }, + { IPv4(202,14,19,0),24 }, + { IPv4(202,14,20,0),22 }, + { IPv4(202,14,32,0),19 }, + { IPv4(202,14,82,0),24 }, + { IPv4(202,14,89,0),24 }, + { IPv4(202,14,95,0),24 }, + { IPv4(202,14,99,0),24 }, + { IPv4(202,14,102,0),24 }, + { IPv4(202,14,134,0),24 }, + { IPv4(202,14,141,0),24 }, + { IPv4(202,14,145,0),24 }, + { IPv4(202,14,164,0),24 }, + { IPv4(202,14,229,0),24 }, + { IPv4(202,15,64,0),19 }, + { IPv4(202,15,128,0),18 }, + { IPv4(202,16,104,0),24 }, + { IPv4(202,16,192,0),21 }, + { IPv4(202,16,225,0),24 }, + { IPv4(202,17,16,0),20 }, + { IPv4(202,17,128,0),19 }, + { IPv4(202,17,172,0),22 }, + { IPv4(202,17,180,0),24 }, + { IPv4(202,17,184,0),22 }, + { IPv4(202,17,192,0),23 }, + { IPv4(202,17,208,0),22 }, + { IPv4(202,17,212,0),22 }, + { IPv4(202,17,242,0),23 }, + { IPv4(202,17,242,0),24 }, + { IPv4(202,18,248,0),23 }, + { IPv4(202,18,250,0),24 }, + { IPv4(202,19,0,0),20 }, + { IPv4(202,19,32,0),24 }, + { IPv4(202,19,100,0),22 }, + { IPv4(202,19,112,0),24 }, + { IPv4(202,19,120,0),24 }, + { IPv4(202,19,125,0),24 }, + { IPv4(202,19,214,0),23 }, + { IPv4(202,19,237,0),24 }, + { IPv4(202,20,16,0),20 }, + { IPv4(202,20,64,0),24 }, + { IPv4(202,20,65,0),24 }, + { IPv4(202,20,67,0),24 }, + { IPv4(202,20,68,0),24 }, + { IPv4(202,20,81,0),24 }, + { IPv4(202,20,84,0),23 }, + { IPv4(202,20,92,0),24 }, + { IPv4(202,20,99,0),24 }, + { IPv4(202,20,105,0),24 }, + { IPv4(202,20,106,0),23 }, + { IPv4(202,20,119,0),24 }, + { IPv4(202,21,0,0),21 }, + { IPv4(202,21,8,0),21 }, + { IPv4(202,21,32,0),19 }, + { IPv4(202,21,140,0),24 }, + { IPv4(202,21,144,0),24 }, + { IPv4(202,21,149,0),24 }, + { IPv4(202,21,157,0),24 }, + { IPv4(202,22,8,0),21 }, + { IPv4(202,22,32,0),19 }, + { IPv4(202,22,163,0),24 }, + { IPv4(202,22,166,0),24 }, + { IPv4(202,22,167,0),24 }, + { IPv4(202,22,252,0),24 }, + { IPv4(202,22,255,0),24 }, + { IPv4(202,23,72,0),21 }, + { IPv4(202,23,88,0),24 }, + { IPv4(202,23,93,0),24 }, + { IPv4(202,23,124,0),24 }, + { IPv4(202,24,40,0),22 }, + { IPv4(202,24,192,0),24 }, + { IPv4(202,25,80,0),20 }, + { IPv4(202,25,99,0),24 }, + { IPv4(202,25,115,0),24 }, + { IPv4(202,25,116,0),22 }, + { IPv4(202,25,162,0),23 }, + { IPv4(202,25,192,0),20 }, + { IPv4(202,26,92,0),24 }, + { IPv4(202,26,94,0),23 }, + { IPv4(202,26,187,0),24 }, + { IPv4(202,26,240,0),21 }, + { IPv4(202,27,0,0),16 }, + { IPv4(202,27,16,0),20 }, + { IPv4(202,27,17,0),24 }, + { IPv4(202,27,48,0),21 }, + { IPv4(202,27,56,0),22 }, + { IPv4(202,27,76,0),24 }, + { IPv4(202,27,77,0),24 }, + { IPv4(202,27,83,0),24 }, + { IPv4(202,27,100,0),22 }, + { IPv4(202,27,110,0),24 }, + { IPv4(202,27,140,0),22 }, + { IPv4(202,27,156,0),22 }, + { IPv4(202,27,184,0),23 }, + { IPv4(202,27,192,0),21 }, + { IPv4(202,27,200,0),22 }, + { IPv4(202,27,204,0),24 }, + { IPv4(202,27,209,0),24 }, + { IPv4(202,27,210,0),24 }, + { IPv4(202,27,211,0),24 }, + { IPv4(202,27,212,0),22 }, + { IPv4(202,27,216,0),22 }, + { IPv4(202,27,217,0),24 }, + { IPv4(202,27,222,0),24 }, + { IPv4(202,27,236,0),24 }, + { IPv4(202,27,247,0),24 }, + { IPv4(202,27,250,0),24 }, + { IPv4(202,27,251,0),24 }, + { IPv4(202,28,17,0),24 }, + { IPv4(202,28,24,0),22 }, + { IPv4(202,28,24,0),24 }, + { IPv4(202,28,25,0),24 }, + { IPv4(202,28,26,0),24 }, + { IPv4(202,28,27,0),24 }, + { IPv4(202,28,32,0),22 }, + { IPv4(202,28,68,0),24 }, + { IPv4(202,28,69,0),24 }, + { IPv4(202,28,70,0),24 }, + { IPv4(202,28,71,0),24 }, + { IPv4(202,28,92,0),24 }, + { IPv4(202,28,92,0),22 }, + { IPv4(202,28,116,0),22 }, + { IPv4(202,28,128,0),21 }, + { IPv4(202,28,136,0),21 }, + { IPv4(202,28,144,0),21 }, + { IPv4(202,28,152,0),21 }, + { IPv4(202,28,160,0),21 }, + { IPv4(202,28,168,0),21 }, + { IPv4(202,28,176,0),21 }, + { IPv4(202,28,184,0),21 }, + { IPv4(202,30,0,0),19 }, + { IPv4(202,30,14,0),23 }, + { IPv4(202,30,32,0),23 }, + { IPv4(202,30,34,0),24 }, + { IPv4(202,30,35,0),24 }, + { IPv4(202,30,36,0),23 }, + { IPv4(202,30,38,0),24 }, + { IPv4(202,30,40,0),22 }, + { IPv4(202,30,44,0),24 }, + { IPv4(202,30,46,0),23 }, + { IPv4(202,30,49,0),24 }, + { IPv4(202,30,50,0),23 }, + { IPv4(202,30,52,0),23 }, + { IPv4(202,30,54,0),24 }, + { IPv4(202,30,55,0),24 }, + { IPv4(202,30,56,0),23 }, + { IPv4(202,30,58,0),24 }, + { IPv4(202,30,60,0),22 }, + { IPv4(202,30,64,0),19 }, + { IPv4(202,30,89,0),24 }, + { IPv4(202,30,94,0),24 }, + { IPv4(202,30,96,0),20 }, + { IPv4(202,30,112,0),21 }, + { IPv4(202,30,128,0),17 }, + { IPv4(202,30,128,0),18 }, + { IPv4(202,30,183,0),24 }, + { IPv4(202,30,184,0),24 }, + { IPv4(202,30,190,0),24 }, + { IPv4(202,30,192,0),24 }, + { IPv4(202,30,192,0),18 }, + { IPv4(202,30,192,0),19 }, + { IPv4(202,30,193,0),24 }, + { IPv4(202,30,201,0),24 }, + { IPv4(202,30,224,0),19 }, + { IPv4(202,30,224,0),24 }, + { IPv4(202,30,231,0),24 }, + { IPv4(202,30,232,0),24 }, + { IPv4(202,30,233,0),24 }, + { IPv4(202,30,234,0),24 }, + { IPv4(202,30,235,0),24 }, + { IPv4(202,30,236,0),24 }, + { IPv4(202,30,237,0),24 }, + { IPv4(202,30,238,0),24 }, + { IPv4(202,31,23,0),24 }, + { IPv4(202,31,24,0),22 }, + { IPv4(202,31,29,0),24 }, + { IPv4(202,31,30,0),23 }, + { IPv4(202,31,32,0),20 }, + { IPv4(202,31,48,0),22 }, + { IPv4(202,31,52,0),23 }, + { IPv4(202,31,54,0),24 }, + { IPv4(202,31,56,0),21 }, + { IPv4(202,31,64,0),21 }, + { IPv4(202,31,72,0),23 }, + { IPv4(202,31,75,0),24 }, + { IPv4(202,31,76,0),22 }, + { IPv4(202,31,80,0),21 }, + { IPv4(202,31,88,0),22 }, + { IPv4(202,31,92,0),23 }, + { IPv4(202,31,128,0),20 }, + { IPv4(202,31,144,0),21 }, + { IPv4(202,31,152,0),24 }, + { IPv4(202,31,153,0),24 }, + { IPv4(202,31,154,0),24 }, + { IPv4(202,31,156,0),22 }, + { IPv4(202,31,160,0),20 }, + { IPv4(202,31,180,0),24 }, + { IPv4(202,31,181,0),24 }, + { IPv4(202,31,184,0),21 }, + { IPv4(202,31,192,0),20 }, + { IPv4(202,31,208,0),22 }, + { IPv4(202,31,222,0),24 }, + { IPv4(202,31,224,0),19 }, + { IPv4(202,33,0,0),16 }, + { IPv4(202,34,32,0),24 }, + { IPv4(202,35,72,0),22 }, + { IPv4(202,35,230,0),24 }, + { IPv4(202,36,0,0),16 }, + { IPv4(202,36,43,0),24 }, + { IPv4(202,36,45,0),24 }, + { IPv4(202,36,46,0),24 }, + { IPv4(202,36,75,0),24 }, + { IPv4(202,36,80,0),24 }, + { IPv4(202,36,114,0),24 }, + { IPv4(202,36,121,0),24 }, + { IPv4(202,36,147,0),24 }, + { IPv4(202,36,148,0),24 }, + { IPv4(202,36,154,0),24 }, + { IPv4(202,36,164,0),22 }, + { IPv4(202,36,164,0),23 }, + { IPv4(202,36,166,0),23 }, + { IPv4(202,36,174,0),24 }, + { IPv4(202,36,195,0),24 }, + { IPv4(202,36,202,0),24 }, + { IPv4(202,36,204,0),23 }, + { IPv4(202,36,226,0),24 }, + { IPv4(202,36,227,0),24 }, + { IPv4(202,36,235,0),24 }, + { IPv4(202,37,0,0),16 }, + { IPv4(202,37,0,0),20 }, + { IPv4(202,37,64,0),23 }, + { IPv4(202,37,70,0),24 }, + { IPv4(202,37,71,0),24 }, + { IPv4(202,37,75,0),24 }, + { IPv4(202,37,86,0),23 }, + { IPv4(202,37,88,0),24 }, + { IPv4(202,37,93,0),24 }, + { IPv4(202,37,106,0),24 }, + { IPv4(202,37,107,0),24 }, + { IPv4(202,37,117,0),24 }, + { IPv4(202,37,118,0),24 }, + { IPv4(202,37,120,0),24 }, + { IPv4(202,37,124,0),23 }, + { IPv4(202,37,127,0),24 }, + { IPv4(202,37,129,0),24 }, + { IPv4(202,37,160,0),24 }, + { IPv4(202,37,168,0),24 }, + { IPv4(202,37,220,0),24 }, + { IPv4(202,37,240,0),23 }, + { IPv4(202,37,254,0),24 }, + { IPv4(202,38,8,0),21 }, + { IPv4(202,38,45,0),24 }, + { IPv4(202,38,132,0),22 }, + { IPv4(202,38,161,0),24 }, + { IPv4(202,38,164,0),22 }, + { IPv4(202,39,0,0),18 }, + { IPv4(202,39,64,0),20 }, + { IPv4(202,39,112,0),20 }, + { IPv4(202,39,128,0),17 }, + { IPv4(202,40,16,0),20 }, + { IPv4(202,40,224,0),21 }, + { IPv4(202,40,224,0),19 }, + { IPv4(202,41,106,0),24 }, + { IPv4(202,43,64,0),19 }, + { IPv4(202,43,96,0),19 }, + { IPv4(202,43,248,0),21 }, + { IPv4(202,44,8,0),21 }, + { IPv4(202,44,64,0),24 }, + { IPv4(202,44,68,0),22 }, + { IPv4(202,44,140,0),23 }, + { IPv4(202,44,142,0),23 }, + { IPv4(202,44,144,0),24 }, + { IPv4(202,44,148,0),22 }, + { IPv4(202,44,192,0),18 }, + { IPv4(202,44,216,0),24 }, + { IPv4(202,46,0,0),20 }, + { IPv4(202,46,24,0),22 }, + { IPv4(202,46,28,0),24 }, + { IPv4(202,46,31,0),24 }, + { IPv4(202,46,64,0),20 }, + { IPv4(202,46,80,0),22 }, + { IPv4(202,46,84,0),24 }, + { IPv4(202,46,108,0),22 }, + { IPv4(202,46,130,0),23 }, + { IPv4(202,46,240,0),20 }, + { IPv4(202,47,1,0),24 }, + { IPv4(202,47,56,0),24 }, + { IPv4(202,47,64,0),20 }, + { IPv4(202,47,125,0),24 }, + { IPv4(202,47,132,0),23 }, + { IPv4(202,47,140,0),24 }, + { IPv4(202,47,160,0),19 }, + { IPv4(202,47,224,0),20 }, + { IPv4(202,47,240,0),21 }, + { IPv4(202,47,248,0),23 }, + { IPv4(202,47,250,0),24 }, + { IPv4(202,47,251,0),24 }, + { IPv4(202,47,252,0),24 }, + { IPv4(202,47,252,0),23 }, + { IPv4(202,47,253,0),24 }, + { IPv4(202,47,254,0),24 }, + { IPv4(202,48,8,0),21 }, + { IPv4(202,48,48,0),20 }, + { IPv4(202,48,96,0),22 }, + { IPv4(202,48,106,0),23 }, + { IPv4(202,48,160,0),22 }, + { IPv4(202,48,192,0),24 }, + { IPv4(202,48,208,0),24 }, + { IPv4(202,49,0,0),21 }, + { IPv4(202,49,16,0),20 }, + { IPv4(202,49,62,0),24 }, + { IPv4(202,49,64,0),21 }, + { IPv4(202,49,80,0),23 }, + { IPv4(202,49,84,0),24 }, + { IPv4(202,49,141,0),24 }, + { IPv4(202,49,152,0),21 }, + { IPv4(202,49,172,0),22 }, + { IPv4(202,49,183,0),24 }, + { IPv4(202,49,189,0),24 }, + { IPv4(202,49,224,0),20 }, + { IPv4(202,49,233,0),24 }, + { IPv4(202,49,250,0),24 }, + { IPv4(202,50,0,0),16 }, + { IPv4(202,50,49,0),24 }, + { IPv4(202,50,52,0),22 }, + { IPv4(202,50,60,0),22 }, + { IPv4(202,50,64,0),21 }, + { IPv4(202,50,72,0),24 }, + { IPv4(202,50,94,0),24 }, + { IPv4(202,50,95,0),24 }, + { IPv4(202,50,112,0),24 }, + { IPv4(202,50,137,0),24 }, + { IPv4(202,50,143,0),24 }, + { IPv4(202,50,164,0),24 }, + { IPv4(202,50,170,0),24 }, + { IPv4(202,50,177,0),24 }, + { IPv4(202,50,196,0),22 }, + { IPv4(202,50,200,0),21 }, + { IPv4(202,50,208,0),20 }, + { IPv4(202,51,64,0),24 }, + { IPv4(202,51,65,0),24 }, + { IPv4(202,51,71,0),24 }, + { IPv4(202,51,75,0),24 }, + { IPv4(202,51,76,0),24 }, + { IPv4(202,51,77,0),24 }, + { IPv4(202,51,88,0),24 }, + { IPv4(202,51,93,0),24 }, + { IPv4(202,51,94,0),24 }, + { IPv4(202,51,95,0),24 }, + { IPv4(202,51,96,0),19 }, + { IPv4(202,51,136,0),24 }, + { IPv4(202,51,137,0),24 }, + { IPv4(202,51,138,0),24 }, + { IPv4(202,51,151,0),24 }, + { IPv4(202,51,159,0),24 }, + { IPv4(202,51,192,0),20 }, + { IPv4(202,51,192,0),19 }, + { IPv4(202,51,208,0),20 }, + { IPv4(202,52,32,0),22 }, + { IPv4(202,52,64,0),18 }, + { IPv4(202,52,128,0),19 }, + { IPv4(202,52,224,0),19 }, + { IPv4(202,53,90,0),23 }, + { IPv4(202,53,96,0),20 }, + { IPv4(202,53,224,0),22 }, + { IPv4(202,53,226,0),23 }, + { IPv4(202,53,228,0),23 }, + { IPv4(202,53,230,0),24 }, + { IPv4(202,53,232,0),22 }, + { IPv4(202,53,240,0),21 }, + { IPv4(202,53,248,0),21 }, + { IPv4(202,55,143,0),24 }, + { IPv4(202,56,32,0),23 }, + { IPv4(202,56,48,0),20 }, + { IPv4(202,56,152,0),24 }, + { IPv4(202,56,153,0),24 }, + { IPv4(202,56,156,0),23 }, + { IPv4(202,56,158,0),23 }, + { IPv4(202,56,158,0),24 }, + { IPv4(202,56,159,0),24 }, + { IPv4(202,56,192,0),24 }, + { IPv4(202,56,193,0),24 }, + { IPv4(202,56,194,0),24 }, + { IPv4(202,56,195,0),24 }, + { IPv4(202,56,196,0),24 }, + { IPv4(202,56,196,0),23 }, + { IPv4(202,56,196,0),22 }, + { IPv4(202,56,197,0),24 }, + { IPv4(202,56,198,0),24 }, + { IPv4(202,56,198,0),23 }, + { IPv4(202,56,199,0),24 }, + { IPv4(202,56,200,0),22 }, + { IPv4(202,56,204,0),22 }, + { IPv4(202,56,204,0),24 }, + { IPv4(202,56,205,0),24 }, + { IPv4(202,56,206,0),24 }, + { IPv4(202,56,207,0),24 }, + { IPv4(202,56,212,0),22 }, + { IPv4(202,56,216,0),22 }, + { IPv4(202,56,220,0),22 }, + { IPv4(202,56,221,0),24 }, + { IPv4(202,56,222,0),24 }, + { IPv4(202,56,223,0),24 }, + { IPv4(202,56,224,0),22 }, + { IPv4(202,56,224,0),24 }, + { IPv4(202,56,224,0),20 }, + { IPv4(202,56,225,0),24 }, + { IPv4(202,56,229,0),24 }, + { IPv4(202,56,230,0),24 }, + { IPv4(202,56,231,0),24 }, + { IPv4(202,56,232,0),22 }, + { IPv4(202,56,236,0),22 }, + { IPv4(202,56,237,0),24 }, + { IPv4(202,56,239,0),24 }, + { IPv4(202,56,241,0),24 }, + { IPv4(202,56,245,0),24 }, + { IPv4(202,56,248,0),21 }, + { IPv4(202,56,248,0),24 }, + { IPv4(202,56,249,0),24 }, + { IPv4(202,56,250,0),24 }, + { IPv4(202,56,251,0),24 }, + { IPv4(202,56,252,0),24 }, + { IPv4(202,56,252,0),22 }, + { IPv4(202,56,253,0),24 }, + { IPv4(202,56,254,0),24 }, + { IPv4(202,56,255,0),24 }, + { IPv4(202,57,0,0),24 }, + { IPv4(202,57,1,0),24 }, + { IPv4(202,57,2,0),24 }, + { IPv4(202,57,3,0),24 }, + { IPv4(202,57,32,0),24 }, + { IPv4(202,57,33,0),24 }, + { IPv4(202,57,34,0),24 }, + { IPv4(202,57,35,0),24 }, + { IPv4(202,57,39,0),24 }, + { IPv4(202,57,49,0),24 }, + { IPv4(202,57,96,0),19 }, + { IPv4(202,57,128,0),18 }, + { IPv4(202,58,64,0),20 }, + { IPv4(202,58,96,0),19 }, + { IPv4(202,58,100,0),24 }, + { IPv4(202,58,104,0),24 }, + { IPv4(202,58,113,0),24 }, + { IPv4(202,58,115,0),24 }, + { IPv4(202,58,116,0),24 }, + { IPv4(202,58,117,0),24 }, + { IPv4(202,58,118,0),24 }, + { IPv4(202,58,122,0),24 }, + { IPv4(202,58,192,0),22 }, + { IPv4(202,58,196,0),23 }, + { IPv4(202,59,70,0),24 }, + { IPv4(202,59,71,0),24 }, + { IPv4(202,59,72,0),24 }, + { IPv4(202,59,73,0),24 }, + { IPv4(202,59,224,0),19 }, + { IPv4(202,60,192,0),24 }, + { IPv4(202,60,192,0),21 }, + { IPv4(202,61,64,0),19 }, + { IPv4(202,61,64,0),22 }, + { IPv4(202,61,68,0),22 }, + { IPv4(202,61,72,0),22 }, + { IPv4(202,61,77,0),24 }, + { IPv4(202,61,84,0),23 }, + { IPv4(202,61,199,0),24 }, + { IPv4(202,61,236,0),24 }, + { IPv4(202,61,236,0),22 }, + { IPv4(202,61,237,0),24 }, + { IPv4(202,61,238,0),24 }, + { IPv4(202,61,239,0),24 }, + { IPv4(202,62,68,0),22 }, + { IPv4(202,62,72,0),22 }, + { IPv4(202,62,83,0),24 }, + { IPv4(202,62,85,0),24 }, + { IPv4(202,62,94,0),24 }, + { IPv4(202,62,95,0),24 }, + { IPv4(202,62,128,0),19 }, + { IPv4(202,62,192,0),19 }, + { IPv4(202,63,192,0),19 }, + { IPv4(202,63,218,0),24 }, + { IPv4(202,63,219,0),24 }, + { IPv4(202,65,69,0),24 }, + { IPv4(202,66,24,0),24 }, + { IPv4(202,67,32,0),20 }, + { IPv4(202,68,143,0),24 }, + { IPv4(202,68,144,0),24 }, + { IPv4(202,68,147,0),24 }, + { IPv4(202,68,158,0),24 }, + { IPv4(202,70,32,0),21 }, + { IPv4(202,71,141,0),24 }, + { IPv4(202,71,150,0),24 }, + { IPv4(202,71,151,0),24 }, + { IPv4(202,71,152,0),24 }, + { IPv4(202,71,153,0),24 }, + { IPv4(202,71,154,0),24 }, + { IPv4(202,71,155,0),24 }, + { IPv4(202,71,158,0),24 }, + { IPv4(202,71,159,0),24 }, + { IPv4(202,72,32,0),20 }, + { IPv4(202,72,64,0),21 }, + { IPv4(202,72,64,0),24 }, + { IPv4(202,72,65,0),24 }, + { IPv4(202,72,66,0),24 }, + { IPv4(202,72,67,0),24 }, + { IPv4(202,72,68,0),24 }, + { IPv4(202,72,69,0),24 }, + { IPv4(202,72,70,0),24 }, + { IPv4(202,72,71,0),24 }, + { IPv4(202,72,72,0),24 }, + { IPv4(202,72,72,0),21 }, + { IPv4(202,72,73,0),24 }, + { IPv4(202,72,74,0),24 }, + { IPv4(202,72,75,0),24 }, + { IPv4(202,72,76,0),24 }, + { IPv4(202,72,77,0),24 }, + { IPv4(202,72,78,0),24 }, + { IPv4(202,72,79,0),24 }, + { IPv4(202,72,128,0),19 }, + { IPv4(202,74,32,0),19 }, + { IPv4(202,74,96,0),20 }, + { IPv4(202,74,102,0),24 }, + { IPv4(202,74,112,0),20 }, + { IPv4(202,75,32,0),20 }, + { IPv4(202,75,96,0),20 }, + { IPv4(202,75,128,0),18 }, + { IPv4(202,77,96,0),22 }, + { IPv4(202,77,100,0),22 }, + { IPv4(202,77,104,0),22 }, + { IPv4(202,77,108,0),22 }, + { IPv4(202,77,112,0),22 }, + { IPv4(202,77,116,0),24 }, + { IPv4(202,77,116,0),22 }, + { IPv4(202,77,117,0),24 }, + { IPv4(202,77,120,0),21 }, + { IPv4(202,78,128,0),21 }, + { IPv4(202,78,128,0),19 }, + { IPv4(202,78,137,0),24 }, + { IPv4(202,78,156,0),22 }, + { IPv4(202,78,156,0),24 }, + { IPv4(202,78,157,0),24 }, + { IPv4(202,78,158,0),24 }, + { IPv4(202,78,159,0),24 }, + { IPv4(202,79,32,0),24 }, + { IPv4(202,79,33,0),24 }, + { IPv4(202,79,34,0),24 }, + { IPv4(202,79,35,0),24 }, + { IPv4(202,79,36,0),24 }, + { IPv4(202,79,37,0),24 }, + { IPv4(202,79,38,0),24 }, + { IPv4(202,79,39,0),24 }, + { IPv4(202,79,40,0),24 }, + { IPv4(202,79,41,0),24 }, + { IPv4(202,79,42,0),24 }, + { IPv4(202,79,43,0),24 }, + { IPv4(202,79,44,0),24 }, + { IPv4(202,79,45,0),24 }, + { IPv4(202,79,46,0),24 }, + { IPv4(202,79,47,0),24 }, + { IPv4(202,79,48,0),24 }, + { IPv4(202,79,49,0),24 }, + { IPv4(202,79,50,0),24 }, + { IPv4(202,79,51,0),24 }, + { IPv4(202,79,52,0),24 }, + { IPv4(202,79,53,0),24 }, + { IPv4(202,79,54,0),24 }, + { IPv4(202,79,55,0),24 }, + { IPv4(202,79,56,0),24 }, + { IPv4(202,79,57,0),24 }, + { IPv4(202,79,58,0),24 }, + { IPv4(202,79,59,0),24 }, + { IPv4(202,79,128,0),19 }, + { IPv4(202,79,192,0),19 }, + { IPv4(202,80,224,0),19 }, + { IPv4(202,81,96,0),24 }, + { IPv4(202,81,97,0),24 }, + { IPv4(202,81,98,0),24 }, + { IPv4(202,81,99,0),24 }, + { IPv4(202,81,100,0),24 }, + { IPv4(202,81,101,0),24 }, + { IPv4(202,81,102,0),24 }, + { IPv4(202,81,108,0),24 }, + { IPv4(202,81,120,0),24 }, + { IPv4(202,81,121,0),24 }, + { IPv4(202,81,127,0),24 }, + { IPv4(202,83,32,0),19 }, + { IPv4(202,84,10,0),23 }, + { IPv4(202,84,16,0),24 }, + { IPv4(202,84,17,0),24 }, + { IPv4(202,84,146,0),23 }, + { IPv4(202,85,160,0),22 }, + { IPv4(202,88,135,0),24 }, + { IPv4(202,88,136,0),24 }, + { IPv4(202,88,149,0),24 }, + { IPv4(202,88,152,0),24 }, + { IPv4(202,88,160,0),24 }, + { IPv4(202,88,224,0),21 }, + { IPv4(202,88,232,0),21 }, + { IPv4(202,89,64,0),21 }, + { IPv4(202,89,96,0),19 }, + { IPv4(202,89,128,0),19 }, + { IPv4(202,89,196,0),24 }, + { IPv4(202,89,196,0),22 }, + { IPv4(202,89,197,0),24 }, + { IPv4(202,89,199,0),24 }, + { IPv4(202,89,200,0),24 }, + { IPv4(202,89,201,0),24 }, + { IPv4(202,90,192,0),24 }, + { IPv4(202,90,193,0),24 }, + { IPv4(202,90,194,0),24 }, + { IPv4(202,91,128,0),23 }, + { IPv4(202,91,130,0),23 }, + { IPv4(202,92,0,0),22 }, + { IPv4(202,92,32,0),23 }, + { IPv4(202,92,47,0),24 }, + { IPv4(202,92,64,0),19 }, + { IPv4(202,92,88,0),21 }, + { IPv4(202,92,96,0),19 }, + { IPv4(202,92,100,0),24 }, + { IPv4(202,93,0,0),20 }, + { IPv4(202,93,64,0),19 }, + { IPv4(202,93,128,0),19 }, + { IPv4(202,93,252,0),22 }, + { IPv4(202,94,0,0),19 }, + { IPv4(202,94,160,0),20 }, + { IPv4(202,95,0,0),22 }, + { IPv4(202,95,128,0),19 }, + { IPv4(202,95,144,0),23 }, + { IPv4(202,95,152,0),22 }, + { IPv4(202,95,156,0),22 }, + { IPv4(202,96,0,0),18 }, + { IPv4(202,96,64,0),19 }, + { IPv4(202,96,96,0),19 }, + { IPv4(202,96,128,0),18 }, + { IPv4(202,96,192,0),19 }, + { IPv4(202,96,192,0),18 }, + { IPv4(202,96,224,0),19 }, + { IPv4(202,97,0,0),19 }, + { IPv4(202,97,32,0),19 }, + { IPv4(202,97,96,0),20 }, + { IPv4(202,97,96,0),19 }, + { IPv4(202,97,128,0),19 }, + { IPv4(202,97,160,0),19 }, + { IPv4(202,98,0,0),19 }, + { IPv4(202,98,32,0),19 }, + { IPv4(202,98,64,0),19 }, + { IPv4(202,98,96,0),19 }, + { IPv4(202,98,128,0),19 }, + { IPv4(202,98,160,0),19 }, + { IPv4(202,98,192,0),19 }, + { IPv4(202,98,224,0),19 }, + { IPv4(202,99,0,0),18 }, + { IPv4(202,99,64,0),18 }, + { IPv4(202,99,128,0),19 }, + { IPv4(202,99,160,0),19 }, + { IPv4(202,100,64,0),19 }, + { IPv4(202,100,96,0),19 }, + { IPv4(202,100,128,0),19 }, + { IPv4(202,100,160,0),19 }, + { IPv4(202,100,192,0),19 }, + { IPv4(202,100,224,0),19 }, + { IPv4(202,101,0,0),16 }, + { IPv4(202,101,0,0),18 }, + { IPv4(202,101,64,0),19 }, + { IPv4(202,101,96,0),19 }, + { IPv4(202,101,128,0),19 }, + { IPv4(202,101,160,0),19 }, + { IPv4(202,101,192,0),18 }, + { IPv4(202,102,0,0),17 }, + { IPv4(202,102,128,0),18 }, + { IPv4(202,102,192,0),19 }, + { IPv4(202,102,224,0),19 }, + { IPv4(202,103,0,0),18 }, + { IPv4(202,103,64,0),19 }, + { IPv4(202,103,96,0),19 }, + { IPv4(202,103,128,0),18 }, + { IPv4(202,103,192,0),19 }, + { IPv4(202,103,224,0),19 }, + { IPv4(202,104,0,0),16 }, + { IPv4(202,105,0,0),16 }, + { IPv4(202,106,0,0),19 }, + { IPv4(202,106,32,0),20 }, + { IPv4(202,106,48,0),20 }, + { IPv4(202,106,64,0),18 }, + { IPv4(202,106,128,0),18 }, + { IPv4(202,106,192,0),19 }, + { IPv4(202,106,224,0),19 }, + { IPv4(202,107,0,0),17 }, + { IPv4(202,107,128,0),18 }, + { IPv4(202,107,192,0),18 }, + { IPv4(202,108,0,0),17 }, + { IPv4(202,108,128,0),17 }, + { IPv4(202,109,0,0),17 }, + { IPv4(202,109,128,0),18 }, + { IPv4(202,109,192,0),18 }, + { IPv4(202,110,0,0),18 }, + { IPv4(202,110,64,0),18 }, + { IPv4(202,110,128,0),18 }, + { IPv4(202,110,192,0),18 }, + { IPv4(202,111,0,0),17 }, + { IPv4(202,111,128,0),19 }, + { IPv4(202,111,160,0),19 }, + { IPv4(202,111,192,0),19 }, + { IPv4(202,111,224,0),19 }, + { IPv4(202,112,248,0),24 }, + { IPv4(202,122,1,0),24 }, + { IPv4(202,122,7,0),24 }, + { IPv4(202,122,128,0),24 }, + { IPv4(202,125,80,0),20 }, + { IPv4(202,127,0,0),23 }, + { IPv4(202,127,12,0),22 }, + { IPv4(202,127,16,0),20 }, + { IPv4(202,127,40,0),21 }, + { IPv4(202,127,48,0),23 }, + { IPv4(202,127,144,0),23 }, + { IPv4(202,127,157,0),24 }, + { IPv4(202,127,159,0),24 }, + { IPv4(202,127,160,0),21 }, + { IPv4(202,127,192,0),23 }, + { IPv4(202,127,200,0),21 }, + { IPv4(202,128,128,0),19 }, + { IPv4(202,128,132,0),24 }, + { IPv4(202,129,0,0),19 }, + { IPv4(202,129,192,0),19 }, + { IPv4(202,130,0,0),19 }, + { IPv4(202,130,70,0),23 }, + { IPv4(202,130,72,0),22 }, + { IPv4(202,130,76,0),23 }, + { IPv4(202,130,79,0),24 }, + { IPv4(202,130,80,0),23 }, + { IPv4(202,130,82,0),23 }, + { IPv4(202,130,96,0),19 }, + { IPv4(202,130,96,0),23 }, + { IPv4(202,130,104,0),23 }, + { IPv4(202,130,106,0),24 }, + { IPv4(202,130,224,0),20 }, + { IPv4(202,130,240,0),21 }, + { IPv4(202,130,248,0),21 }, + { IPv4(202,131,0,0),21 }, + { IPv4(202,131,114,0),24 }, + { IPv4(202,131,119,0),24 }, + { IPv4(202,131,144,0),20 }, + { IPv4(202,131,224,0),19 }, + { IPv4(202,133,3,0),24 }, + { IPv4(202,133,75,0),24 }, + { IPv4(202,133,79,0),24 }, + { IPv4(202,133,128,0),20 }, + { IPv4(202,133,144,0),20 }, + { IPv4(202,133,160,0),20 }, + { IPv4(202,133,224,0),19 }, + { IPv4(202,134,192,0),22 }, + { IPv4(202,134,196,0),22 }, + { IPv4(202,134,200,0),22 }, + { IPv4(202,134,204,0),22 }, + { IPv4(202,134,224,0),19 }, + { IPv4(202,135,0,0),16 }, + { IPv4(202,136,254,0),23 }, + { IPv4(202,137,0,0),21 }, + { IPv4(202,137,8,0),22 }, + { IPv4(202,137,8,0),21 }, + { IPv4(202,137,64,0),19 }, + { IPv4(202,137,128,0),19 }, + { IPv4(202,138,14,0),24 }, + { IPv4(202,138,48,0),21 }, + { IPv4(202,138,63,0),24 }, + { IPv4(202,138,128,0),18 }, + { IPv4(202,138,160,0),23 }, + { IPv4(202,138,202,0),23 }, + { IPv4(202,139,59,0),24 }, + { IPv4(202,139,173,0),24 }, + { IPv4(202,139,174,0),24 }, + { IPv4(202,140,0,0),19 }, + { IPv4(202,140,128,0),21 }, + { IPv4(202,140,144,0),24 }, + { IPv4(202,140,145,0),24 }, + { IPv4(202,140,146,0),24 }, + { IPv4(202,140,147,0),24 }, + { IPv4(202,140,150,0),23 }, + { IPv4(202,141,81,0),24 }, + { IPv4(202,141,216,0),21 }, + { IPv4(202,142,64,0),21 }, + { IPv4(202,142,88,0),21 }, + { IPv4(202,142,96,0),21 }, + { IPv4(202,143,48,0),21 }, + { IPv4(202,143,56,0),21 }, + { IPv4(202,143,128,0),19 }, + { IPv4(202,143,224,0),21 }, + { IPv4(202,144,8,0),24 }, + { IPv4(202,144,13,0),24 }, + { IPv4(202,144,14,0),24 }, + { IPv4(202,144,20,0),24 }, + { IPv4(202,144,22,0),24 }, + { IPv4(202,144,27,0),24 }, + { IPv4(202,144,28,0),24 }, + { IPv4(202,144,34,0),24 }, + { IPv4(202,144,35,0),24 }, + { IPv4(202,144,44,0),24 }, + { IPv4(202,144,48,0),20 }, + { IPv4(202,144,54,0),24 }, + { IPv4(202,144,55,0),24 }, + { IPv4(202,144,64,0),24 }, + { IPv4(202,144,65,0),24 }, + { IPv4(202,144,74,0),24 }, + { IPv4(202,144,75,0),24 }, + { IPv4(202,144,76,0),24 }, + { IPv4(202,144,77,0),24 }, + { IPv4(202,144,79,0),24 }, + { IPv4(202,144,83,0),24 }, + { IPv4(202,144,86,0),24 }, + { IPv4(202,144,91,0),24 }, + { IPv4(202,144,95,0),24 }, + { IPv4(202,144,96,0),24 }, + { IPv4(202,144,98,0),24 }, + { IPv4(202,144,99,0),24 }, + { IPv4(202,144,105,0),24 }, + { IPv4(202,144,109,0),24 }, + { IPv4(202,144,110,0),24 }, + { IPv4(202,144,119,0),24 }, + { IPv4(202,144,120,0),24 }, + { IPv4(202,144,125,0),24 }, + { IPv4(202,144,128,0),19 }, + { IPv4(202,145,0,0),22 }, + { IPv4(202,146,0,0),22 }, + { IPv4(202,146,4,0),23 }, + { IPv4(202,146,32,0),19 }, + { IPv4(202,146,144,0),24 }, + { IPv4(202,146,224,0),19 }, + { IPv4(202,146,226,0),24 }, + { IPv4(202,146,227,0),24 }, + { IPv4(202,146,228,0),23 }, + { IPv4(202,146,230,0),24 }, + { IPv4(202,146,231,0),24 }, + { IPv4(202,146,232,0),24 }, + { IPv4(202,146,236,0),24 }, + { IPv4(202,146,237,0),24 }, + { IPv4(202,146,239,0),24 }, + { IPv4(202,146,244,0),22 }, + { IPv4(202,146,253,0),24 }, + { IPv4(202,146,254,0),24 }, + { IPv4(202,146,255,0),24 }, + { IPv4(202,147,0,0),24 }, + { IPv4(202,147,128,0),19 }, + { IPv4(202,147,192,0),23 }, + { IPv4(202,147,194,0),23 }, + { IPv4(202,147,240,0),20 }, + { IPv4(202,148,0,0),22 }, + { IPv4(202,148,4,0),24 }, + { IPv4(202,148,5,0),24 }, + { IPv4(202,148,6,0),24 }, + { IPv4(202,148,7,0),24 }, + { IPv4(202,148,8,0),21 }, + { IPv4(202,148,11,0),24 }, + { IPv4(202,148,16,0),24 }, + { IPv4(202,148,17,0),24 }, + { IPv4(202,148,20,0),24 }, + { IPv4(202,149,79,0),24 }, + { IPv4(202,149,80,0),23 }, + { IPv4(202,149,82,0),24 }, + { IPv4(202,149,128,0),20 }, + { IPv4(202,149,128,0),19 }, + { IPv4(202,149,144,0),22 }, + { IPv4(202,149,148,0),22 }, + { IPv4(202,149,152,0),24 }, + { IPv4(202,149,208,0),21 }, + { IPv4(202,149,216,0),21 }, + { IPv4(202,149,216,0),24 }, + { IPv4(202,149,240,0),21 }, + { IPv4(202,149,248,0),21 }, + { IPv4(202,150,0,0),21 }, + { IPv4(202,150,8,0),21 }, + { IPv4(202,150,32,0),20 }, + { IPv4(202,150,46,0),24 }, + { IPv4(202,150,47,0),24 }, + { IPv4(202,150,64,0),19 }, + { IPv4(202,150,224,0),19 }, + { IPv4(202,151,32,0),24 }, + { IPv4(202,151,192,0),18 }, + { IPv4(202,152,0,0),19 }, + { IPv4(202,152,0,0),22 }, + { IPv4(202,152,4,0),22 }, + { IPv4(202,152,12,0),22 }, + { IPv4(202,152,16,0),22 }, + { IPv4(202,152,20,0),22 }, + { IPv4(202,152,24,0),22 }, + { IPv4(202,152,28,0),22 }, + { IPv4(202,152,32,0),20 }, + { IPv4(202,152,156,0),24 }, + { IPv4(202,152,224,0),19 }, + { IPv4(202,153,32,0),22 }, + { IPv4(202,153,42,0),23 }, + { IPv4(202,153,128,0),21 }, + { IPv4(202,153,224,0),23 }, + { IPv4(202,153,224,0),20 }, + { IPv4(202,153,240,0),20 }, + { IPv4(202,153,248,0),21 }, + { IPv4(202,154,0,0),22 }, + { IPv4(202,154,4,0),22 }, + { IPv4(202,154,4,0),24 }, + { IPv4(202,154,8,0),22 }, + { IPv4(202,154,12,0),22 }, + { IPv4(202,154,16,0),22 }, + { IPv4(202,154,16,0),20 }, + { IPv4(202,154,20,0),22 }, + { IPv4(202,154,24,0),22 }, + { IPv4(202,154,24,0),24 }, + { IPv4(202,154,28,0),22 }, + { IPv4(202,154,29,0),24 }, + { IPv4(202,154,32,0),20 }, + { IPv4(202,154,32,0),22 }, + { IPv4(202,154,36,0),22 }, + { IPv4(202,154,40,0),22 }, + { IPv4(202,154,42,0),24 }, + { IPv4(202,154,43,0),24 }, + { IPv4(202,154,44,0),22 }, + { IPv4(202,154,48,0),22 }, + { IPv4(202,154,48,0),20 }, + { IPv4(202,154,52,0),22 }, + { IPv4(202,154,56,0),22 }, + { IPv4(202,154,60,0),22 }, + { IPv4(202,154,64,0),20 }, + { IPv4(202,154,128,0),19 }, + { IPv4(202,154,192,0),19 }, + { IPv4(202,155,0,0),22 }, + { IPv4(202,155,3,0),24 }, + { IPv4(202,155,4,0),23 }, + { IPv4(202,155,6,0),24 }, + { IPv4(202,155,7,0),24 }, + { IPv4(202,155,8,0),24 }, + { IPv4(202,155,9,0),24 }, + { IPv4(202,155,10,0),23 }, + { IPv4(202,155,12,0),22 }, + { IPv4(202,155,16,0),22 }, + { IPv4(202,155,20,0),22 }, + { IPv4(202,155,24,0),23 }, + { IPv4(202,155,26,0),23 }, + { IPv4(202,155,28,0),23 }, + { IPv4(202,155,30,0),23 }, + { IPv4(202,155,32,0),23 }, + { IPv4(202,155,34,0),23 }, + { IPv4(202,155,36,0),23 }, + { IPv4(202,155,38,0),23 }, + { IPv4(202,155,40,0),22 }, + { IPv4(202,155,44,0),22 }, + { IPv4(202,155,48,0),24 }, + { IPv4(202,155,49,0),24 }, + { IPv4(202,155,50,0),23 }, + { IPv4(202,155,52,0),23 }, + { IPv4(202,155,54,0),23 }, + { IPv4(202,155,56,0),24 }, + { IPv4(202,155,57,0),24 }, + { IPv4(202,155,58,0),24 }, + { IPv4(202,155,59,0),24 }, + { IPv4(202,155,60,0),23 }, + { IPv4(202,155,62,0),23 }, + { IPv4(202,155,64,0),23 }, + { IPv4(202,155,66,0),23 }, + { IPv4(202,155,68,0),23 }, + { IPv4(202,155,70,0),23 }, + { IPv4(202,155,72,0),23 }, + { IPv4(202,155,74,0),23 }, + { IPv4(202,155,76,0),23 }, + { IPv4(202,155,78,0),23 }, + { IPv4(202,155,80,0),23 }, + { IPv4(202,155,82,0),23 }, + { IPv4(202,155,84,0),23 }, + { IPv4(202,155,86,0),24 }, + { IPv4(202,155,87,0),24 }, + { IPv4(202,155,88,0),24 }, + { IPv4(202,155,89,0),24 }, + { IPv4(202,155,90,0),23 }, + { IPv4(202,155,92,0),23 }, + { IPv4(202,155,93,0),24 }, + { IPv4(202,155,94,0),23 }, + { IPv4(202,155,96,0),23 }, + { IPv4(202,155,98,0),23 }, + { IPv4(202,155,100,0),23 }, + { IPv4(202,155,102,0),24 }, + { IPv4(202,155,103,0),24 }, + { IPv4(202,155,104,0),23 }, + { IPv4(202,155,106,0),23 }, + { IPv4(202,155,108,0),23 }, + { IPv4(202,155,110,0),23 }, + { IPv4(202,155,112,0),23 }, + { IPv4(202,155,114,0),23 }, + { IPv4(202,155,116,0),23 }, + { IPv4(202,155,118,0),23 }, + { IPv4(202,155,120,0),23 }, + { IPv4(202,155,122,0),23 }, + { IPv4(202,155,124,0),24 }, + { IPv4(202,155,125,0),24 }, + { IPv4(202,155,126,0),24 }, + { IPv4(202,155,127,0),24 }, + { IPv4(202,156,0,0),16 }, + { IPv4(202,156,0,0),19 }, + { IPv4(202,156,32,0),19 }, + { IPv4(202,156,64,0),19 }, + { IPv4(202,156,96,0),19 }, + { IPv4(202,156,128,0),19 }, + { IPv4(202,156,160,0),19 }, + { IPv4(202,156,192,0),19 }, + { IPv4(202,156,224,0),19 }, + { IPv4(202,157,0,0),23 }, + { IPv4(202,157,67,0),24 }, + { IPv4(202,157,128,0),19 }, + { IPv4(202,157,160,0),21 }, + { IPv4(202,157,182,0),23 }, + { IPv4(202,158,0,0),18 }, + { IPv4(202,158,0,0),19 }, + { IPv4(202,158,0,0),17 }, + { IPv4(202,158,24,0),22 }, + { IPv4(202,158,24,0),21 }, + { IPv4(202,158,28,0),22 }, + { IPv4(202,158,31,0),24 }, + { IPv4(202,158,32,0),21 }, + { IPv4(202,158,32,0),19 }, + { IPv4(202,158,36,0),22 }, + { IPv4(202,158,40,0),21 }, + { IPv4(202,158,48,0),21 }, + { IPv4(202,158,48,0),22 }, + { IPv4(202,158,52,0),22 }, + { IPv4(202,158,56,0),21 }, + { IPv4(202,158,64,0),21 }, + { IPv4(202,158,64,0),19 }, + { IPv4(202,158,72,0),21 }, + { IPv4(202,158,80,0),21 }, + { IPv4(202,158,80,0),22 }, + { IPv4(202,158,80,0),24 }, + { IPv4(202,158,80,0),23 }, + { IPv4(202,158,82,0),23 }, + { IPv4(202,158,84,0),22 }, + { IPv4(202,158,88,0),22 }, + { IPv4(202,158,92,0),22 }, + { IPv4(202,158,96,0),21 }, + { IPv4(202,158,96,0),19 }, + { IPv4(202,158,96,0),20 }, + { IPv4(202,158,96,0),22 }, + { IPv4(202,158,100,0),22 }, + { IPv4(202,158,104,0),22 }, + { IPv4(202,158,104,0),21 }, + { IPv4(202,158,108,0),22 }, + { IPv4(202,158,112,0),20 }, + { IPv4(202,158,112,0),21 }, + { IPv4(202,158,112,0),22 }, + { IPv4(202,158,116,0),22 }, + { IPv4(202,158,120,0),22 }, + { IPv4(202,158,120,0),21 }, + { IPv4(202,158,124,0),22 }, + { IPv4(202,159,0,0),19 }, + { IPv4(202,159,32,0),22 }, + { IPv4(202,159,36,0),24 }, + { IPv4(202,159,37,0),24 }, + { IPv4(202,159,38,0),23 }, + { IPv4(202,159,40,0),22 }, + { IPv4(202,159,44,0),23 }, + { IPv4(202,159,46,0),24 }, + { IPv4(202,159,47,0),24 }, + { IPv4(202,159,48,0),20 }, + { IPv4(202,159,64,0),19 }, + { IPv4(202,159,96,0),19 }, + { IPv4(202,160,0,0),19 }, + { IPv4(202,160,64,0),19 }, + { IPv4(202,160,224,0),19 }, + { IPv4(202,160,235,0),24 }, + { IPv4(202,161,0,0),21 }, + { IPv4(202,161,31,0),24 }, + { IPv4(202,161,32,0),19 }, + { IPv4(202,161,128,0),19 }, + { IPv4(202,161,160,0),20 }, + { IPv4(202,162,192,0),20 }, + { IPv4(202,163,96,0),19 }, + { IPv4(202,163,128,0),24 }, + { IPv4(202,163,129,0),24 }, + { IPv4(202,163,130,0),24 }, + { IPv4(202,163,131,0),24 }, + { IPv4(202,163,132,0),24 }, + { IPv4(202,163,224,0),19 }, + { IPv4(202,163,234,0),24 }, + { IPv4(202,163,240,0),20 }, + { IPv4(202,163,248,0),21 }, + { IPv4(202,163,248,0),24 }, + { IPv4(202,164,32,0),21 }, + { IPv4(202,164,96,0),19 }, + { IPv4(202,164,160,0),19 }, + { IPv4(202,164,185,0),24 }, + { IPv4(202,165,0,0),19 }, + { IPv4(202,165,40,0),21 }, + { IPv4(202,165,64,0),19 }, + { IPv4(202,165,64,0),20 }, + { IPv4(202,165,70,0),23 }, + { IPv4(202,165,80,0),20 }, + { IPv4(202,165,225,0),24 }, + { IPv4(202,165,230,0),24 }, + { IPv4(202,165,231,0),24 }, + { IPv4(202,165,246,0),24 }, + { IPv4(202,166,0,0),17 }, + { IPv4(202,166,160,0),19 }, + { IPv4(202,166,192,0),18 }, + { IPv4(202,167,4,0),24 }, + { IPv4(202,168,192,0),20 }, + { IPv4(202,168,254,0),23 }, + { IPv4(202,169,128,0),18 }, + { IPv4(202,169,224,0),20 }, + { IPv4(202,171,64,0),24 }, + { IPv4(202,171,65,0),24 }, + { IPv4(202,171,66,0),24 }, + { IPv4(202,171,67,0),24 }, + { IPv4(202,171,68,0),24 }, + { IPv4(202,171,69,0),24 }, + { IPv4(202,171,70,0),24 }, + { IPv4(202,171,71,0),24 }, + { IPv4(202,171,72,0),24 }, + { IPv4(202,171,73,0),24 }, + { IPv4(202,171,74,0),24 }, + { IPv4(202,171,75,0),24 }, + { IPv4(202,171,76,0),24 }, + { IPv4(202,171,77,0),24 }, + { IPv4(202,171,78,0),24 }, + { IPv4(202,171,192,0),20 }, + { IPv4(202,172,106,0),24 }, + { IPv4(202,172,120,0),24 }, + { IPv4(202,172,121,0),24 }, + { IPv4(202,172,122,0),24 }, + { IPv4(202,172,123,0),24 }, + { IPv4(202,172,124,0),24 }, + { IPv4(202,172,210,0),24 }, + { IPv4(202,172,224,0),19 }, + { IPv4(202,173,32,0),19 }, + { IPv4(202,173,64,0),22 }, + { IPv4(202,173,69,0),24 }, + { IPv4(202,173,70,0),24 }, + { IPv4(202,174,144,0),24 }, + { IPv4(202,177,0,0),19 }, + { IPv4(202,177,128,0),20 }, + { IPv4(202,177,128,0),19 }, + { IPv4(202,177,136,0),23 }, + { IPv4(202,177,138,0),23 }, + { IPv4(202,177,140,0),22 }, + { IPv4(202,177,144,0),20 }, + { IPv4(202,177,150,0),23 }, + { IPv4(202,177,156,0),22 }, + { IPv4(202,177,160,0),19 }, + { IPv4(202,177,160,0),23 }, + { IPv4(202,177,170,0),23 }, + { IPv4(202,178,128,0),18 }, + { IPv4(202,178,128,0),17 }, + { IPv4(202,178,192,0),19 }, + { IPv4(202,178,224,0),19 }, + { IPv4(202,179,0,0),19 }, + { IPv4(202,179,64,0),23 }, + { IPv4(202,179,66,0),24 }, + { IPv4(202,179,137,0),24 }, + { IPv4(202,179,147,0),24 }, + { IPv4(202,179,150,0),24 }, + { IPv4(202,179,154,0),24 }, + { IPv4(202,179,157,0),24 }, + { IPv4(202,179,158,0),24 }, + { IPv4(202,180,0,0),20 }, + { IPv4(202,180,0,0),24 }, + { IPv4(202,180,1,0),24 }, + { IPv4(202,180,10,0),24 }, + { IPv4(202,180,11,0),24 }, + { IPv4(202,180,12,0),24 }, + { IPv4(202,180,13,0),24 }, + { IPv4(202,180,16,0),21 }, + { IPv4(202,180,21,0),24 }, + { IPv4(202,180,24,0),22 }, + { IPv4(202,180,28,0),22 }, + { IPv4(202,180,64,0),18 }, + { IPv4(202,180,64,0),19 }, + { IPv4(202,180,96,0),19 }, + { IPv4(202,181,136,0),21 }, + { IPv4(202,181,144,0),20 }, + { IPv4(202,181,184,0),21 }, + { IPv4(202,181,216,0),21 }, + { IPv4(202,182,0,0),19 }, + { IPv4(202,182,16,0),20 }, + { IPv4(202,182,224,0),24 }, + { IPv4(202,182,225,0),24 }, + { IPv4(202,183,0,0),19 }, + { IPv4(202,183,128,0),17 }, + { IPv4(202,183,188,0),22 }, + { IPv4(202,183,192,0),18 }, + { IPv4(202,183,214,0),24 }, + { IPv4(202,183,233,0),24 }, + { IPv4(202,183,234,0),24 }, + { IPv4(202,184,0,0),15 }, + { IPv4(202,186,0,0),15 }, + { IPv4(202,188,0,0),17 }, + { IPv4(202,188,0,0),16 }, + { IPv4(202,188,128,0),17 }, + { IPv4(202,189,0,0),18 }, + { IPv4(202,190,0,0),16 }, + { IPv4(202,208,160,0),19 }, + { IPv4(202,208,192,0),19 }, + { IPv4(202,208,224,0),19 }, + { IPv4(202,210,11,0),24 }, + { IPv4(202,210,60,0),22 }, + { IPv4(202,210,64,0),18 }, + { IPv4(202,211,128,0),17 }, + { IPv4(202,213,0,0),22 }, + { IPv4(202,213,17,0),24 }, + { IPv4(202,213,160,0),20 }, + { IPv4(202,215,0,0),16 }, + { IPv4(202,216,0,0),19 }, + { IPv4(202,217,128,0),17 }, + { IPv4(202,220,6,0),23 }, + { IPv4(202,220,37,0),24 }, + { IPv4(202,220,40,0),21 }, + { IPv4(202,220,70,0),23 }, + { IPv4(202,220,93,0),24 }, + { IPv4(202,220,124,0),22 }, + { IPv4(202,220,160,0),19 }, + { IPv4(202,222,0,0),20 }, + { IPv4(202,222,192,0),18 }, + { IPv4(202,224,64,0),19 }, + { IPv4(202,225,0,0),16 }, + { IPv4(202,227,0,0),18 }, + { IPv4(202,227,192,0),18 }, + { IPv4(202,228,0,0),18 }, + { IPv4(202,228,128,0),18 }, + { IPv4(202,231,64,0),18 }, + { IPv4(202,231,128,0),19 }, + { IPv4(202,231,160,0),19 }, + { IPv4(202,235,0,0),18 }, + { IPv4(202,236,36,0),23 }, + { IPv4(202,236,144,0),23 }, + { IPv4(202,236,160,0),23 }, + { IPv4(202,236,167,0),24 }, + { IPv4(202,237,0,0),23 }, + { IPv4(202,237,13,0),24 }, + { IPv4(202,237,115,0),24 }, + { IPv4(202,237,147,0),24 }, + { IPv4(202,237,154,0),24 }, + { IPv4(202,237,175,0),24 }, + { IPv4(202,237,176,0),22 }, + { IPv4(202,237,192,0),19 }, + { IPv4(202,238,32,0),20 }, + { IPv4(202,238,128,0),18 }, + { IPv4(202,239,128,0),18 }, + { IPv4(202,239,192,0),18 }, + { IPv4(202,240,112,0),22 }, + { IPv4(202,240,176,0),23 }, + { IPv4(202,241,0,0),17 }, + { IPv4(202,242,5,0),24 }, + { IPv4(202,242,18,0),23 }, + { IPv4(202,242,20,0),24 }, + { IPv4(202,242,57,0),24 }, + { IPv4(202,242,76,0),23 }, + { IPv4(202,242,78,0),23 }, + { IPv4(202,242,132,0),22 }, + { IPv4(202,242,240,0),23 }, + { IPv4(202,243,104,0),24 }, + { IPv4(202,243,105,0),24 }, + { IPv4(202,243,186,0),24 }, + { IPv4(202,243,216,0),24 }, + { IPv4(202,244,4,0),24 }, + { IPv4(202,244,32,0),21 }, + { IPv4(202,244,58,0),24 }, + { IPv4(202,244,70,0),24 }, + { IPv4(202,244,71,0),24 }, + { IPv4(202,244,93,0),24 }, + { IPv4(202,244,95,0),24 }, + { IPv4(202,244,152,0),24 }, + { IPv4(202,244,160,0),19 }, + { IPv4(202,245,131,0),24 }, + { IPv4(202,245,142,0),24 }, + { IPv4(202,245,148,0),23 }, + { IPv4(202,245,153,0),24 }, + { IPv4(202,245,162,0),24 }, + { IPv4(202,245,172,0),23 }, + { IPv4(202,245,174,0),24 }, + { IPv4(202,245,244,0),24 }, + { IPv4(202,245,254,0),24 }, + { IPv4(202,246,4,0),22 }, + { IPv4(202,246,14,0),24 }, + { IPv4(202,246,20,0),22 }, + { IPv4(202,246,54,0),24 }, + { IPv4(202,246,114,0),24 }, + { IPv4(202,246,160,0),22 }, + { IPv4(202,246,164,0),24 }, + { IPv4(202,246,244,0),22 }, + { IPv4(202,246,248,0),21 }, + { IPv4(202,247,0,0),17 }, + { IPv4(202,249,0,0),17 }, + { IPv4(202,250,75,0),24 }, + { IPv4(202,250,219,0),24 }, + { IPv4(202,250,236,0),24 }, + { IPv4(202,251,241,0),24 }, + { IPv4(202,252,96,0),21 }, + { IPv4(202,252,116,0),22 }, + { IPv4(202,252,206,0),24 }, + { IPv4(202,253,104,0),24 }, + { IPv4(202,253,208,0),24 }, + { IPv4(202,253,223,0),24 }, + { IPv4(202,253,243,0),24 }, + { IPv4(202,254,64,0),23 }, + { IPv4(202,254,106,0),24 }, + { IPv4(202,254,111,0),24 }, + { IPv4(202,255,16,0),21 }, + { IPv4(202,255,40,0),22 }, + { IPv4(202,255,44,0),22 }, + { IPv4(202,255,72,0),22 }, + { IPv4(202,255,204,0),22 }, + { IPv4(203,0,12,0),24 }, + { IPv4(203,0,15,0),24 }, + { IPv4(203,0,25,0),24 }, + { IPv4(203,0,27,0),24 }, + { IPv4(203,0,31,0),24 }, + { IPv4(203,0,38,0),24 }, + { IPv4(203,0,41,0),24 }, + { IPv4(203,0,98,0),24 }, + { IPv4(203,0,112,0),24 }, + { IPv4(203,0,124,0),22 }, + { IPv4(203,0,145,0),24 }, + { IPv4(203,0,146,0),23 }, + { IPv4(203,0,148,0),23 }, + { IPv4(203,0,154,0),24 }, + { IPv4(203,0,155,0),24 }, + { IPv4(203,0,225,0),24 }, + { IPv4(203,1,24,0),24 }, + { IPv4(203,1,89,0),24 }, + { IPv4(203,1,109,0),24 }, + { IPv4(203,1,237,0),24 }, + { IPv4(203,1,250,0),24 }, + { IPv4(203,1,251,0),24 }, + { IPv4(203,1,255,0),24 }, + { IPv4(203,2,228,0),24 }, + { IPv4(203,3,44,0),24 }, + { IPv4(203,3,71,0),24 }, + { IPv4(203,3,79,0),24 }, + { IPv4(203,3,101,0),24 }, + { IPv4(203,3,127,0),24 }, + { IPv4(203,3,129,0),24 }, + { IPv4(203,3,134,0),24 }, + { IPv4(203,3,138,0),24 }, + { IPv4(203,3,144,0),20 }, + { IPv4(203,4,148,0),23 }, + { IPv4(203,4,161,0),24 }, + { IPv4(203,4,185,0),24 }, + { IPv4(203,4,190,0),24 }, + { IPv4(203,4,192,0),21 }, + { IPv4(203,4,224,0),24 }, + { IPv4(203,5,6,0),24 }, + { IPv4(203,5,13,0),24 }, + { IPv4(203,5,20,0),24 }, + { IPv4(203,5,23,0),24 }, + { IPv4(203,5,24,0),24 }, + { IPv4(203,5,30,0),24 }, + { IPv4(203,5,31,0),24 }, + { IPv4(203,5,62,0),24 }, + { IPv4(203,5,78,0),23 }, + { IPv4(203,5,127,0),24 }, + { IPv4(203,5,168,0),22 }, + { IPv4(203,5,248,0),24 }, + { IPv4(203,5,249,0),24 }, + { IPv4(203,5,250,0),24 }, + { IPv4(203,5,251,0),24 }, + { IPv4(203,6,135,0),24 }, + { IPv4(203,6,156,0),24 }, + { IPv4(203,7,132,0),24 }, + { IPv4(203,7,133,0),24 }, + { IPv4(203,7,134,0),24 }, + { IPv4(203,7,135,0),24 }, + { IPv4(203,7,137,0),24 }, + { IPv4(203,7,198,0),24 }, + { IPv4(203,7,208,0),20 }, + { IPv4(203,7,255,0),24 }, + { IPv4(203,8,1,0),24 }, + { IPv4(203,8,4,0),24 }, + { IPv4(203,8,4,0),22 }, + { IPv4(203,8,5,0),24 }, + { IPv4(203,8,6,0),24 }, + { IPv4(203,8,7,0),24 }, + { IPv4(203,8,12,0),22 }, + { IPv4(203,8,20,0),24 }, + { IPv4(203,8,71,0),24 }, + { IPv4(203,8,84,0),24 }, + { IPv4(203,8,94,0),24 }, + { IPv4(203,8,113,0),24 }, + { IPv4(203,8,114,0),24 }, + { IPv4(203,8,163,0),24 }, + { IPv4(203,8,164,0),24 }, + { IPv4(203,8,170,0),24 }, + { IPv4(203,8,171,0),24 }, + { IPv4(203,8,174,0),24 }, + { IPv4(203,8,176,0),21 }, + { IPv4(203,8,185,0),24 }, + { IPv4(203,8,194,0),24 }, + { IPv4(203,8,200,0),24 }, + { IPv4(203,8,201,0),24 }, + { IPv4(203,8,202,0),24 }, + { IPv4(203,9,35,0),24 }, + { IPv4(203,9,68,0),22 }, + { IPv4(203,9,84,0),24 }, + { IPv4(203,9,102,0),24 }, + { IPv4(203,9,124,0),24 }, + { IPv4(203,9,125,0),24 }, + { IPv4(203,9,151,0),24 }, + { IPv4(203,9,157,0),24 }, + { IPv4(203,9,190,0),23 }, + { IPv4(203,10,36,0),24 }, + { IPv4(203,10,78,0),24 }, + { IPv4(203,11,75,0),24 }, + { IPv4(203,11,81,0),24 }, + { IPv4(203,11,128,0),22 }, + { IPv4(203,11,132,0),22 }, + { IPv4(203,11,140,0),24 }, + { IPv4(203,11,167,0),24 }, + { IPv4(203,11,177,0),24 }, + { IPv4(203,11,178,0),23 }, + { IPv4(203,11,222,0),23 }, + { IPv4(203,12,30,0),24 }, + { IPv4(203,12,31,0),24 }, + { IPv4(203,12,42,0),24 }, + { IPv4(203,12,48,0),22 }, + { IPv4(203,12,51,0),24 }, + { IPv4(203,12,83,0),24 }, + { IPv4(203,12,97,0),24 }, + { IPv4(203,12,115,0),24 }, + { IPv4(203,12,144,0),21 }, + { IPv4(203,12,163,0),24 }, + { IPv4(203,12,172,0),22 }, + { IPv4(203,12,216,0),23 }, + { IPv4(203,12,235,0),24 }, + { IPv4(203,12,236,0),24 }, + { IPv4(203,12,236,0),22 }, + { IPv4(203,12,237,0),24 }, + { IPv4(203,12,238,0),24 }, + { IPv4(203,12,239,0),24 }, + { IPv4(203,13,23,0),24 }, + { IPv4(203,13,25,0),24 }, + { IPv4(203,13,35,0),24 }, + { IPv4(203,13,74,0),24 }, + { IPv4(203,13,144,0),24 }, + { IPv4(203,13,174,0),24 }, + { IPv4(203,13,220,0),23 }, + { IPv4(203,14,59,0),24 }, + { IPv4(203,14,105,0),24 }, + { IPv4(203,14,111,0),24 }, + { IPv4(203,14,167,0),24 }, + { IPv4(203,14,177,0),24 }, + { IPv4(203,14,180,0),24 }, + { IPv4(203,14,202,0),24 }, + { IPv4(203,14,212,0),24 }, + { IPv4(203,14,223,0),24 }, + { IPv4(203,15,68,0),24 }, + { IPv4(203,15,69,0),24 }, + { IPv4(203,15,95,0),24 }, + { IPv4(203,15,104,0),24 }, + { IPv4(203,15,108,0),24 }, + { IPv4(203,15,120,0),23 }, + { IPv4(203,15,134,0),23 }, + { IPv4(203,15,138,0),24 }, + { IPv4(203,15,141,0),24 }, + { IPv4(203,15,142,0),24 }, + { IPv4(203,15,148,0),24 }, + { IPv4(203,15,152,0),24 }, + { IPv4(203,15,153,0),24 }, + { IPv4(203,15,249,0),24 }, + { IPv4(203,15,251,0),24 }, + { IPv4(203,15,252,0),24 }, + { IPv4(203,16,26,0),24 }, + { IPv4(203,16,33,0),24 }, + { IPv4(203,16,35,0),24 }, + { IPv4(203,16,52,0),23 }, + { IPv4(203,16,54,0),24 }, + { IPv4(203,16,60,0),24 }, + { IPv4(203,16,61,0),24 }, + { IPv4(203,16,139,0),24 }, + { IPv4(203,16,143,0),24 }, + { IPv4(203,16,169,0),24 }, + { IPv4(203,16,170,0),24 }, + { IPv4(203,16,176,0),24 }, + { IPv4(203,16,180,0),22 }, + { IPv4(203,16,192,0),23 }, + { IPv4(203,16,225,0),24 }, + { IPv4(203,16,226,0),24 }, + { IPv4(203,16,232,0),24 }, + { IPv4(203,16,233,0),24 }, + { IPv4(203,16,246,0),24 }, + { IPv4(203,17,19,0),24 }, + { IPv4(203,17,22,0),24 }, + { IPv4(203,17,40,0),21 }, + { IPv4(203,17,43,0),24 }, + { IPv4(203,17,54,0),24 }, + { IPv4(203,17,69,0),24 }, + { IPv4(203,17,71,0),24 }, + { IPv4(203,17,112,0),24 }, + { IPv4(203,17,113,0),24 }, + { IPv4(203,17,123,0),24 }, + { IPv4(203,17,125,0),24 }, + { IPv4(203,17,162,0),24 }, + { IPv4(203,17,165,0),24 }, + { IPv4(203,17,167,0),24 }, + { IPv4(203,17,168,0),21 }, + { IPv4(203,17,183,0),24 }, + { IPv4(203,17,192,0),24 }, + { IPv4(203,17,253,0),24 }, + { IPv4(203,18,0,0),24 }, + { IPv4(203,18,6,0),24 }, + { IPv4(203,18,22,0),24 }, + { IPv4(203,18,28,0),24 }, + { IPv4(203,18,38,0),24 }, + { IPv4(203,18,39,0),24 }, + { IPv4(203,18,143,0),24 }, + { IPv4(203,18,174,0),24 }, + { IPv4(203,18,246,0),24 }, + { IPv4(203,19,0,0),24 }, + { IPv4(203,19,2,0),24 }, + { IPv4(203,19,4,0),24 }, + { IPv4(203,19,12,0),24 }, + { IPv4(203,19,22,0),24 }, + { IPv4(203,19,31,0),24 }, + { IPv4(203,19,47,0),24 }, + { IPv4(203,19,53,0),24 }, + { IPv4(203,19,75,0),24 }, + { IPv4(203,19,80,0),24 }, + { IPv4(203,19,87,0),24 }, + { IPv4(203,19,88,0),24 }, + { IPv4(203,19,120,0),24 }, + { IPv4(203,19,121,0),24 }, + { IPv4(203,19,122,0),24 }, + { IPv4(203,19,124,0),24 }, + { IPv4(203,19,126,0),24 }, + { IPv4(203,19,127,0),24 }, + { IPv4(203,19,132,0),24 }, + { IPv4(203,19,147,0),24 }, + { IPv4(203,19,157,0),24 }, + { IPv4(203,19,243,0),24 }, + { IPv4(203,19,251,0),24 }, + { IPv4(203,19,252,0),24 }, + { IPv4(203,20,25,0),24 }, + { IPv4(203,20,32,0),24 }, + { IPv4(203,20,36,0),24 }, + { IPv4(203,20,44,0),24 }, + { IPv4(203,20,45,0),24 }, + { IPv4(203,20,52,0),24 }, + { IPv4(203,20,53,0),24 }, + { IPv4(203,20,62,0),24 }, + { IPv4(203,20,72,0),24 }, + { IPv4(203,20,80,0),24 }, + { IPv4(203,20,97,0),24 }, + { IPv4(203,20,99,0),24 }, + { IPv4(203,20,102,0),23 }, + { IPv4(203,20,115,0),24 }, + { IPv4(203,20,125,0),24 }, + { IPv4(203,20,234,0),24 }, + { IPv4(203,20,244,0),24 }, + { IPv4(203,20,245,0),24 }, + { IPv4(203,21,20,0),24 }, + { IPv4(203,21,46,0),24 }, + { IPv4(203,21,67,0),24 }, + { IPv4(203,21,122,0),24 }, + { IPv4(203,21,123,0),24 }, + { IPv4(203,21,125,0),24 }, + { IPv4(203,21,127,0),24 }, + { IPv4(203,21,132,0),24 }, + { IPv4(203,21,134,0),24 }, + { IPv4(203,21,150,0),23 }, + { IPv4(203,21,216,0),24 }, + { IPv4(203,22,18,0),24 }, + { IPv4(203,22,19,0),24 }, + { IPv4(203,22,70,0),24 }, + { IPv4(203,22,82,0),24 }, + { IPv4(203,22,110,0),23 }, + { IPv4(203,22,130,0),24 }, + { IPv4(203,22,132,0),24 }, + { IPv4(203,22,192,0),24 }, + { IPv4(203,22,205,0),24 }, + { IPv4(203,22,214,0),24 }, + { IPv4(203,22,229,0),24 }, + { IPv4(203,22,249,0),24 }, + { IPv4(203,22,254,0),24 }, + { IPv4(203,23,3,0),24 }, + { IPv4(203,23,14,0),24 }, + { IPv4(203,23,17,0),24 }, + { IPv4(203,23,29,0),24 }, + { IPv4(203,23,32,0),22 }, + { IPv4(203,23,42,0),24 }, + { IPv4(203,23,43,0),24 }, + { IPv4(203,23,50,0),24 }, + { IPv4(203,23,53,0),24 }, + { IPv4(203,23,77,0),24 }, + { IPv4(203,23,78,0),23 }, + { IPv4(203,23,83,0),24 }, + { IPv4(203,23,87,0),24 }, + { IPv4(203,23,88,0),23 }, + { IPv4(203,23,97,0),24 }, + { IPv4(203,23,111,0),24 }, + { IPv4(203,23,155,0),24 }, + { IPv4(203,23,156,0),23 }, + { IPv4(203,23,164,0),24 }, + { IPv4(203,23,165,0),24 }, + { IPv4(203,23,166,0),24 }, + { IPv4(203,23,175,0),24 }, + { IPv4(203,23,186,0),24 }, + { IPv4(203,23,190,0),24 }, + { IPv4(203,23,200,0),24 }, + { IPv4(203,23,201,0),24 }, + { IPv4(203,23,202,0),24 }, + { IPv4(203,23,203,0),24 }, + { IPv4(203,23,225,0),24 }, + { IPv4(203,23,236,0),24 }, + { IPv4(203,23,237,0),24 }, + { IPv4(203,23,238,0),24 }, + { IPv4(203,23,239,0),24 }, + { IPv4(203,24,19,0),24 }, + { IPv4(203,24,52,0),24 }, + { IPv4(203,24,53,0),24 }, + { IPv4(203,24,62,0),24 }, + { IPv4(203,24,66,0),24 }, + { IPv4(203,24,70,0),23 }, + { IPv4(203,24,75,0),24 }, + { IPv4(203,24,82,0),23 }, + { IPv4(203,24,91,0),24 }, + { IPv4(203,24,95,0),24 }, + { IPv4(203,24,105,0),24 }, + { IPv4(203,24,107,0),24 }, + { IPv4(203,24,110,0),24 }, + { IPv4(203,24,126,0),24 }, + { IPv4(203,24,127,0),24 }, + { IPv4(203,24,134,0),23 }, + { IPv4(203,24,150,0),24 }, + { IPv4(203,24,163,0),24 }, + { IPv4(203,24,214,0),24 }, + { IPv4(203,24,215,0),24 }, + { IPv4(203,24,218,0),24 }, + { IPv4(203,24,241,0),24 }, + { IPv4(203,24,246,0),24 }, + { IPv4(203,24,251,0),24 }, + { IPv4(203,25,25,0),24 }, + { IPv4(203,25,67,0),24 }, + { IPv4(203,25,76,0),24 }, + { IPv4(203,25,79,0),24 }, + { IPv4(203,25,84,0),23 }, + { IPv4(203,25,96,0),24 }, + { IPv4(203,25,110,0),23 }, + { IPv4(203,25,119,0),24 }, + { IPv4(203,25,120,0),24 }, + { IPv4(203,25,129,0),24 }, + { IPv4(203,25,130,0),24 }, + { IPv4(203,25,148,0),24 }, + { IPv4(203,25,159,0),24 }, + { IPv4(203,25,165,0),24 }, + { IPv4(203,25,178,0),24 }, + { IPv4(203,25,183,0),24 }, + { IPv4(203,25,188,0),24 }, + { IPv4(203,25,189,0),24 }, + { IPv4(203,25,192,0),24 }, + { IPv4(203,25,193,0),24 }, + { IPv4(203,25,195,0),24 }, + { IPv4(203,26,8,0),22 }, + { IPv4(203,26,18,0),24 }, + { IPv4(203,26,20,0),24 }, + { IPv4(203,26,21,0),24 }, + { IPv4(203,26,25,0),24 }, + { IPv4(203,26,26,0),24 }, + { IPv4(203,26,28,0),24 }, + { IPv4(203,26,37,0),24 }, + { IPv4(203,26,38,0),24 }, + { IPv4(203,26,39,0),24 }, + { IPv4(203,26,45,0),24 }, + { IPv4(203,26,47,0),24 }, + { IPv4(203,26,54,0),24 }, + { IPv4(203,26,66,0),23 }, + { IPv4(203,26,79,0),24 }, + { IPv4(203,26,82,0),23 }, + { IPv4(203,26,89,0),24 }, + { IPv4(203,26,108,0),24 }, + { IPv4(203,26,112,0),24 }, + { IPv4(203,26,127,0),24 }, + { IPv4(203,26,130,0),24 }, + { IPv4(203,26,141,0),24 }, + { IPv4(203,26,215,0),24 }, + { IPv4(203,26,216,0),24 }, + { IPv4(203,26,225,0),24 }, + { IPv4(203,26,237,0),24 }, + { IPv4(203,26,240,0),23 }, + { IPv4(203,26,247,0),24 }, + { IPv4(203,27,1,0),24 }, + { IPv4(203,27,5,0),24 }, + { IPv4(203,27,9,0),24 }, + { IPv4(203,27,18,0),24 }, + { IPv4(203,27,47,0),24 }, + { IPv4(203,27,49,0),24 }, + { IPv4(203,27,51,0),24 }, + { IPv4(203,27,68,0),24 }, + { IPv4(203,27,69,0),24 }, + { IPv4(203,27,85,0),24 }, + { IPv4(203,27,87,0),24 }, + { IPv4(203,27,90,0),24 }, + { IPv4(203,27,91,0),24 }, + { IPv4(203,27,92,0),24 }, + { IPv4(203,27,100,0),24 }, + { IPv4(203,27,104,0),23 }, + { IPv4(203,27,192,0),24 }, + { IPv4(203,27,203,0),24 }, + { IPv4(203,27,208,0),24 }, + { IPv4(203,27,209,0),24 }, + { IPv4(203,27,210,0),24 }, + { IPv4(203,27,211,0),24 }, + { IPv4(203,27,212,0),24 }, + { IPv4(203,27,213,0),24 }, + { IPv4(203,27,214,0),24 }, + { IPv4(203,27,215,0),24 }, + { IPv4(203,27,222,0),24 }, + { IPv4(203,27,231,0),24 }, + { IPv4(203,27,242,0),24 }, + { IPv4(203,27,248,0),24 }, + { IPv4(203,28,17,0),24 }, + { IPv4(203,28,22,0),24 }, + { IPv4(203,28,32,0),24 }, + { IPv4(203,28,52,0),23 }, + { IPv4(203,28,94,0),23 }, + { IPv4(203,28,95,0),24 }, + { IPv4(203,28,116,0),22 }, + { IPv4(203,28,134,0),23 }, + { IPv4(203,28,147,0),24 }, + { IPv4(203,28,171,0),24 }, + { IPv4(203,28,173,0),24 }, + { IPv4(203,28,193,0),24 }, + { IPv4(203,28,207,0),24 }, + { IPv4(203,28,208,0),24 }, + { IPv4(203,28,209,0),24 }, + { IPv4(203,28,210,0),24 }, + { IPv4(203,28,211,0),24 }, + { IPv4(203,28,232,0),24 }, + { IPv4(203,28,238,0),24 }, + { IPv4(203,29,3,0),24 }, + { IPv4(203,29,19,0),24 }, + { IPv4(203,29,65,0),24 }, + { IPv4(203,29,70,0),24 }, + { IPv4(203,29,74,0),24 }, + { IPv4(203,29,91,0),24 }, + { IPv4(203,29,92,0),24 }, + { IPv4(203,29,93,0),24 }, + { IPv4(203,29,114,0),23 }, + { IPv4(203,29,119,0),24 }, + { IPv4(203,29,125,0),24 }, + { IPv4(203,29,127,0),24 }, + { IPv4(203,29,128,0),24 }, + { IPv4(203,29,130,0),24 }, + { IPv4(203,29,138,0),24 }, + { IPv4(203,29,140,0),24 }, + { IPv4(203,29,141,0),24 }, + { IPv4(203,29,142,0),24 }, + { IPv4(203,29,150,0),24 }, + { IPv4(203,29,151,0),24 }, + { IPv4(203,29,153,0),24 }, + { IPv4(203,29,155,0),24 }, + { IPv4(203,29,156,0),24 }, + { IPv4(203,29,159,0),24 }, + { IPv4(203,29,181,0),24 }, + { IPv4(203,29,184,0),24 }, + { IPv4(203,29,189,0),24 }, + { IPv4(203,29,190,0),24 }, + { IPv4(203,29,218,0),24 }, + { IPv4(203,29,221,0),24 }, + { IPv4(203,29,224,0),23 }, + { IPv4(203,29,230,0),24 }, + { IPv4(203,29,236,0),24 }, + { IPv4(203,29,243,0),24 }, + { IPv4(203,29,250,0),23 }, + { IPv4(203,30,14,0),24 }, + { IPv4(203,30,15,0),24 }, + { IPv4(203,30,19,0),24 }, + { IPv4(203,30,24,0),24 }, + { IPv4(203,30,50,0),24 }, + { IPv4(203,30,62,0),23 }, + { IPv4(203,30,68,0),24 }, + { IPv4(203,30,85,0),24 }, + { IPv4(203,30,88,0),24 }, + { IPv4(203,30,96,0),24 }, + { IPv4(203,30,98,0),23 }, + { IPv4(203,30,105,0),24 }, + { IPv4(203,30,130,0),24 }, + { IPv4(203,30,140,0),24 }, + { IPv4(203,30,141,0),24 }, + { IPv4(203,30,142,0),24 }, + { IPv4(203,30,158,0),24 }, + { IPv4(203,30,171,0),24 }, + { IPv4(203,30,175,0),24 }, + { IPv4(203,30,184,0),23 }, + { IPv4(203,30,194,0),24 }, + { IPv4(203,30,202,0),24 }, + { IPv4(203,30,208,0),24 }, + { IPv4(203,30,210,0),24 }, + { IPv4(203,30,213,0),24 }, + { IPv4(203,30,216,0),24 }, + { IPv4(203,30,228,0),23 }, + { IPv4(203,30,230,0),24 }, + { IPv4(203,30,236,0),23 }, + { IPv4(203,30,247,0),24 }, + { IPv4(203,30,248,0),24 }, + { IPv4(203,30,254,0),24 }, + { IPv4(203,30,255,0),24 }, + { IPv4(203,31,8,0),24 }, + { IPv4(203,31,9,0),24 }, + { IPv4(203,31,22,0),24 }, + { IPv4(203,31,23,0),24 }, + { IPv4(203,31,32,0),22 }, + { IPv4(203,31,44,0),24 }, + { IPv4(203,31,48,0),24 }, + { IPv4(203,31,59,0),24 }, + { IPv4(203,31,66,0),24 }, + { IPv4(203,31,71,0),24 }, + { IPv4(203,31,79,0),24 }, + { IPv4(203,31,86,0),24 }, + { IPv4(203,31,93,0),24 }, + { IPv4(203,31,96,0),24 }, + { IPv4(203,31,121,0),24 }, + { IPv4(203,31,122,0),24 }, + { IPv4(203,31,123,0),24 }, + { IPv4(203,31,164,0),24 }, + { IPv4(203,31,165,0),24 }, + { IPv4(203,31,169,0),24 }, + { IPv4(203,31,173,0),24 }, + { IPv4(203,31,184,0),24 }, + { IPv4(203,31,194,0),24 }, + { IPv4(203,31,240,0),24 }, + { IPv4(203,32,0,0),23 }, + { IPv4(203,32,2,0),24 }, + { IPv4(203,32,3,0),24 }, + { IPv4(203,32,57,0),24 }, + { IPv4(203,32,65,0),24 }, + { IPv4(203,32,72,0),23 }, + { IPv4(203,32,74,0),24 }, + { IPv4(203,32,89,0),24 }, + { IPv4(203,32,94,0),24 }, + { IPv4(203,32,103,0),24 }, + { IPv4(203,32,111,0),24 }, + { IPv4(203,32,135,0),24 }, + { IPv4(203,32,143,0),24 }, + { IPv4(203,32,158,0),24 }, + { IPv4(203,32,176,0),24 }, + { IPv4(203,32,194,0),24 }, + { IPv4(203,32,202,0),24 }, + { IPv4(203,32,208,0),22 }, + { IPv4(203,32,224,0),19 }, + { IPv4(203,33,2,0),24 }, + { IPv4(203,33,6,0),24 }, + { IPv4(203,33,31,0),24 }, + { IPv4(203,33,39,0),24 }, + { IPv4(203,33,77,0),24 }, + { IPv4(203,33,96,0),22 }, + { IPv4(203,33,105,0),24 }, + { IPv4(203,33,133,0),24 }, + { IPv4(203,33,134,0),24 }, + { IPv4(203,33,136,0),22 }, + { IPv4(203,33,168,0),24 }, + { IPv4(203,33,171,0),24 }, + { IPv4(203,33,178,0),24 }, + { IPv4(203,33,182,0),24 }, + { IPv4(203,33,188,0),24 }, + { IPv4(203,33,191,0),24 }, + { IPv4(203,33,237,0),24 }, + { IPv4(203,33,240,0),24 }, + { IPv4(203,33,251,0),24 }, + { IPv4(203,34,24,0),24 }, + { IPv4(203,34,36,0),24 }, + { IPv4(203,34,37,0),24 }, + { IPv4(203,34,40,0),24 }, + { IPv4(203,34,61,0),24 }, + { IPv4(203,34,62,0),24 }, + { IPv4(203,34,69,0),24 }, + { IPv4(203,34,110,0),24 }, + { IPv4(203,34,126,0),23 }, + { IPv4(203,34,137,0),24 }, + { IPv4(203,34,140,0),24 }, + { IPv4(203,34,151,0),24 }, + { IPv4(203,34,156,0),22 }, + { IPv4(203,34,180,0),24 }, + { IPv4(203,34,184,0),24 }, + { IPv4(203,34,202,0),24 }, + { IPv4(203,34,218,0),24 }, + { IPv4(203,35,128,0),23 }, + { IPv4(203,37,38,0),24 }, + { IPv4(203,37,165,0),24 }, + { IPv4(203,37,169,0),24 }, + { IPv4(203,37,185,0),24 }, + { IPv4(203,38,138,0),24 }, + { IPv4(203,38,140,0),22 }, + { IPv4(203,44,170,0),23 }, + { IPv4(203,52,18,0),24 }, + { IPv4(203,55,6,0),23 }, + { IPv4(203,55,20,0),24 }, + { IPv4(203,55,23,0),24 }, + { IPv4(203,55,33,0),24 }, + { IPv4(203,55,46,0),23 }, + { IPv4(203,55,48,0),23 }, + { IPv4(203,55,50,0),24 }, + { IPv4(203,55,51,0),24 }, + { IPv4(203,55,54,0),24 }, + { IPv4(203,55,54,0),23 }, + { IPv4(203,55,55,0),24 }, + { IPv4(203,55,65,0),24 }, + { IPv4(203,55,69,0),24 }, + { IPv4(203,55,83,0),24 }, + { IPv4(203,55,102,0),24 }, + { IPv4(203,55,102,0),23 }, + { IPv4(203,55,103,0),24 }, + { IPv4(203,55,105,0),24 }, + { IPv4(203,55,123,0),24 }, + { IPv4(203,55,138,0),24 }, + { IPv4(203,55,142,0),24 }, + { IPv4(203,55,144,0),24 }, + { IPv4(203,55,145,0),24 }, + { IPv4(203,55,158,0),24 }, + { IPv4(203,55,160,0),24 }, + { IPv4(203,55,161,0),24 }, + { IPv4(203,55,176,0),24 }, + { IPv4(203,55,191,0),24 }, + { IPv4(203,55,204,0),23 }, + { IPv4(203,55,215,0),24 }, + { IPv4(203,55,226,0),24 }, + { IPv4(203,55,227,0),24 }, + { IPv4(203,55,250,0),24 }, + { IPv4(203,56,0,0),24 }, + { IPv4(203,56,2,0),24 }, + { IPv4(203,56,3,0),24 }, + { IPv4(203,56,8,0),24 }, + { IPv4(203,56,18,0),24 }, + { IPv4(203,56,20,0),24 }, + { IPv4(203,56,34,0),24 }, + { IPv4(203,56,37,0),24 }, + { IPv4(203,56,88,0),24 }, + { IPv4(203,56,89,0),24 }, + { IPv4(203,56,94,0),24 }, + { IPv4(203,56,98,0),24 }, + { IPv4(203,56,116,0),23 }, + { IPv4(203,56,136,0),23 }, + { IPv4(203,56,186,0),24 }, + { IPv4(203,56,208,0),22 }, + { IPv4(203,56,213,0),24 }, + { IPv4(203,56,218,0),24 }, + { IPv4(203,56,234,0),24 }, + { IPv4(203,56,241,0),24 }, + { IPv4(203,56,244,0),24 }, + { IPv4(203,56,246,0),24 }, + { IPv4(203,56,247,0),24 }, + { IPv4(203,56,248,0),24 }, + { IPv4(203,56,253,0),24 }, + { IPv4(203,57,10,0),24 }, + { IPv4(203,57,20,0),24 }, + { IPv4(203,57,25,0),24 }, + { IPv4(203,57,36,0),23 }, + { IPv4(203,57,48,0),24 }, + { IPv4(203,57,49,0),24 }, + { IPv4(203,57,52,0),22 }, + { IPv4(203,57,56,0),23 }, + { IPv4(203,57,75,0),24 }, + { IPv4(203,57,91,0),24 }, + { IPv4(203,57,92,0),24 }, + { IPv4(203,57,110,0),24 }, + { IPv4(203,57,118,0),23 }, + { IPv4(203,57,121,0),24 }, + { IPv4(203,57,147,0),24 }, + { IPv4(203,57,149,0),24 }, + { IPv4(203,57,192,0),23 }, + { IPv4(203,57,192,0),24 }, + { IPv4(203,57,194,0),24 }, + { IPv4(203,57,204,0),24 }, + { IPv4(203,57,222,0),24 }, + { IPv4(203,57,252,0),22 }, + { IPv4(203,58,0,0),24 }, + { IPv4(203,58,9,0),24 }, + { IPv4(203,58,13,0),24 }, + { IPv4(203,58,17,0),24 }, + { IPv4(203,58,18,0),24 }, + { IPv4(203,58,21,0),24 }, + { IPv4(203,58,22,0),24 }, + { IPv4(203,58,24,0),24 }, + { IPv4(203,58,31,0),24 }, + { IPv4(203,58,58,0),24 }, + { IPv4(203,58,59,0),24 }, + { IPv4(203,58,117,0),24 }, + { IPv4(203,58,118,0),24 }, + { IPv4(203,58,119,0),24 }, + { IPv4(203,58,134,0),24 }, + { IPv4(203,60,19,0),24 }, + { IPv4(203,62,130,0),24 }, + { IPv4(203,62,136,0),23 }, + { IPv4(203,62,144,0),24 }, + { IPv4(203,62,148,0),22 }, + { IPv4(203,62,150,0),24 }, + { IPv4(203,62,170,0),24 }, + { IPv4(203,62,190,0),24 }, + { IPv4(203,63,0,0),16 }, + { IPv4(203,63,99,0),24 }, + { IPv4(203,64,0,0),16 }, + { IPv4(203,65,0,0),17 }, + { IPv4(203,65,128,0),19 }, + { IPv4(203,65,192,0),19 }, + { IPv4(203,65,224,0),21 }, + { IPv4(203,65,232,0),22 }, + { IPv4(203,65,240,0),22 }, + { IPv4(203,65,244,0),22 }, + { IPv4(203,65,248,0),21 }, + { IPv4(203,67,0,0),16 }, + { IPv4(203,67,175,0),24 }, + { IPv4(203,68,0,0),16 }, + { IPv4(203,70,0,0),16 }, + { IPv4(203,70,62,0),24 }, + { IPv4(203,71,0,0),16 }, + { IPv4(203,72,0,0),19 }, + { IPv4(203,72,32,0),22 }, + { IPv4(203,72,36,0),23 }, + { IPv4(203,72,38,0),23 }, + { IPv4(203,72,40,0),21 }, + { IPv4(203,72,48,0),20 }, + { IPv4(203,72,64,0),18 }, + { IPv4(203,72,128,0),17 }, + { IPv4(203,73,0,0),16 }, + { IPv4(203,73,64,0),18 }, + { IPv4(203,73,192,0),18 }, + { IPv4(203,73,250,0),24 }, + { IPv4(203,77,224,0),21 }, + { IPv4(203,77,232,0),21 }, + { IPv4(203,77,241,0),24 }, + { IPv4(203,77,254,0),24 }, + { IPv4(203,77,255,0),24 }, + { IPv4(203,78,128,0),24 }, + { IPv4(203,78,130,0),24 }, + { IPv4(203,80,64,0),24 }, + { IPv4(203,80,66,0),23 }, + { IPv4(203,84,63,0),24 }, + { IPv4(203,86,96,0),19 }, + { IPv4(203,86,156,0),24 }, + { IPv4(203,87,0,0),20 }, + { IPv4(203,87,16,0),23 }, + { IPv4(203,87,18,0),24 }, + { IPv4(203,87,19,0),24 }, + { IPv4(203,87,20,0),24 }, + { IPv4(203,87,21,0),24 }, + { IPv4(203,87,22,0),23 }, + { IPv4(203,87,25,0),24 }, + { IPv4(203,87,26,0),24 }, + { IPv4(203,87,27,0),24 }, + { IPv4(203,87,32,0),20 }, + { IPv4(203,87,48,0),23 }, + { IPv4(203,87,51,0),24 }, + { IPv4(203,87,53,0),24 }, + { IPv4(203,87,57,0),24 }, + { IPv4(203,87,58,0),23 }, + { IPv4(203,87,60,0),24 }, + { IPv4(203,87,61,0),24 }, + { IPv4(203,87,62,0),24 }, + { IPv4(203,87,63,0),24 }, + { IPv4(203,87,64,0),24 }, + { IPv4(203,87,65,0),24 }, + { IPv4(203,87,66,0),23 }, + { IPv4(203,87,69,0),24 }, + { IPv4(203,87,70,0),24 }, + { IPv4(203,87,71,0),24 }, + { IPv4(203,87,73,0),24 }, + { IPv4(203,87,74,0),24 }, + { IPv4(203,87,75,0),24 }, + { IPv4(203,87,76,0),24 }, + { IPv4(203,87,77,0),24 }, + { IPv4(203,87,78,0),24 }, + { IPv4(203,87,79,0),24 }, + { IPv4(203,87,80,0),23 }, + { IPv4(203,87,82,0),23 }, + { IPv4(203,87,84,0),22 }, + { IPv4(203,87,88,0),23 }, + { IPv4(203,87,90,0),23 }, + { IPv4(203,87,92,0),22 }, + { IPv4(203,87,128,0),20 }, + { IPv4(203,87,132,0),24 }, + { IPv4(203,88,0,0),22 }, + { IPv4(203,88,133,0),24 }, + { IPv4(203,88,134,0),24 }, + { IPv4(203,88,135,0),24 }, + { IPv4(203,88,136,0),24 }, + { IPv4(203,88,137,0),24 }, + { IPv4(203,88,141,0),24 }, + { IPv4(203,88,142,0),24 }, + { IPv4(203,88,143,0),24 }, + { IPv4(203,88,144,0),24 }, + { IPv4(203,88,145,0),24 }, + { IPv4(203,88,146,0),24 }, + { IPv4(203,88,147,0),24 }, + { IPv4(203,89,64,0),19 }, + { IPv4(203,90,0,0),22 }, + { IPv4(203,90,192,0),20 }, + { IPv4(203,91,224,0),19 }, + { IPv4(203,91,224,0),24 }, + { IPv4(203,91,226,0),24 }, + { IPv4(203,91,233,0),24 }, + { IPv4(203,91,235,0),24 }, + { IPv4(203,91,236,0),24 }, + { IPv4(203,91,237,0),24 }, + { IPv4(203,91,249,0),24 }, + { IPv4(203,91,250,0),24 }, + { IPv4(203,92,64,0),19 }, + { IPv4(203,92,128,0),19 }, + { IPv4(203,93,248,0),21 }, + { IPv4(203,94,64,0),18 }, + { IPv4(203,95,128,0),18 }, + { IPv4(203,95,192,0),18 }, + { IPv4(203,96,16,0),20 }, + { IPv4(203,96,48,0),20 }, + { IPv4(203,96,96,0),19 }, + { IPv4(203,96,120,0),23 }, + { IPv4(203,96,128,0),20 }, + { IPv4(203,97,0,0),17 }, + { IPv4(203,98,64,0),19 }, + { IPv4(203,98,64,0),20 }, + { IPv4(203,98,80,0),20 }, + { IPv4(203,98,95,0),24 }, + { IPv4(203,99,65,0),24 }, + { IPv4(203,99,66,0),24 }, + { IPv4(203,99,71,0),24 }, + { IPv4(203,100,224,0),19 }, + { IPv4(203,100,232,0),24 }, + { IPv4(203,100,238,0),23 }, + { IPv4(203,100,247,0),24 }, + { IPv4(203,101,128,0),19 }, + { IPv4(203,101,152,0),21 }, + { IPv4(203,105,128,0),21 }, + { IPv4(203,105,136,0),22 }, + { IPv4(203,105,140,0),23 }, + { IPv4(203,105,142,0),23 }, + { IPv4(203,105,144,0),24 }, + { IPv4(203,106,0,0),16 }, + { IPv4(203,107,0,0),18 }, + { IPv4(203,107,128,0),17 }, + { IPv4(203,107,128,0),22 }, + { IPv4(203,107,144,0),20 }, + { IPv4(203,109,140,0),24 }, + { IPv4(203,109,141,0),24 }, + { IPv4(203,109,160,0),24 }, + { IPv4(203,109,161,0),24 }, + { IPv4(203,109,172,0),22 }, + { IPv4(203,109,202,0),24 }, + { IPv4(203,109,206,0),24 }, + { IPv4(203,109,210,0),24 }, + { IPv4(203,109,212,0),22 }, + { IPv4(203,109,217,0),24 }, + { IPv4(203,109,220,0),24 }, + { IPv4(203,109,224,0),22 }, + { IPv4(203,109,240,0),21 }, + { IPv4(203,109,248,0),22 }, + { IPv4(203,110,159,0),24 }, + { IPv4(203,111,192,0),20 }, + { IPv4(203,112,9,0),24 }, + { IPv4(203,112,224,0),19 }, + { IPv4(203,113,0,0),19 }, + { IPv4(203,114,224,0),23 }, + { IPv4(203,114,226,0),23 }, + { IPv4(203,114,228,0),23 }, + { IPv4(203,114,230,0),23 }, + { IPv4(203,114,232,0),23 }, + { IPv4(203,114,234,0),23 }, + { IPv4(203,114,236,0),23 }, + { IPv4(203,114,238,0),23 }, + { IPv4(203,114,240,0),23 }, + { IPv4(203,114,242,0),23 }, + { IPv4(203,114,244,0),23 }, + { IPv4(203,114,246,0),23 }, + { IPv4(203,114,248,0),23 }, + { IPv4(203,114,250,0),23 }, + { IPv4(203,114,252,0),23 }, + { IPv4(203,114,254,0),23 }, + { IPv4(203,115,0,0),18 }, + { IPv4(203,115,96,0),22 }, + { IPv4(203,115,100,0),23 }, + { IPv4(203,115,116,0),23 }, + { IPv4(203,115,118,0),23 }, + { IPv4(203,115,125,0),24 }, + { IPv4(203,115,126,0),24 }, + { IPv4(203,116,0,0),16 }, + { IPv4(203,116,23,0),24 }, + { IPv4(203,116,61,0),24 }, + { IPv4(203,116,81,0),24 }, + { IPv4(203,116,255,0),24 }, + { IPv4(203,117,0,0),16 }, + { IPv4(203,117,32,0),19 }, + { IPv4(203,118,0,0),18 }, + { IPv4(203,121,0,0),19 }, + { IPv4(203,121,0,0),18 }, + { IPv4(203,121,32,0),19 }, + { IPv4(203,121,64,0),20 }, + { IPv4(203,121,64,0),19 }, + { IPv4(203,121,80,0),20 }, + { IPv4(203,121,96,0),19 }, + { IPv4(203,121,96,0),20 }, + { IPv4(203,121,112,0),20 }, + { IPv4(203,121,128,0),20 }, + { IPv4(203,121,128,0),24 }, + { IPv4(203,121,128,0),19 }, + { IPv4(203,121,130,0),24 }, + { IPv4(203,121,131,0),24 }, + { IPv4(203,121,138,0),24 }, + { IPv4(203,121,144,0),20 }, + { IPv4(203,122,0,0),18 }, + { IPv4(203,123,0,0),19 }, + { IPv4(203,123,95,0),24 }, + { IPv4(203,123,224,0),19 }, + { IPv4(203,124,96,0),19 }, + { IPv4(203,124,128,0),20 }, + { IPv4(203,124,130,0),23 }, + { IPv4(203,124,131,0),24 }, + { IPv4(203,124,132,0),23 }, + { IPv4(203,124,133,0),24 }, + { IPv4(203,124,134,0),23 }, + { IPv4(203,124,136,0),22 }, + { IPv4(203,124,140,0),23 }, + { IPv4(203,124,237,0),24 }, + { IPv4(203,124,248,0),24 }, + { IPv4(203,124,249,0),24 }, + { IPv4(203,125,128,0),17 }, + { IPv4(203,126,0,0),16 }, + { IPv4(203,126,77,0),24 }, + { IPv4(203,127,25,0),24 }, + { IPv4(203,127,100,0),23 }, + { IPv4(203,127,108,0),24 }, + { IPv4(203,127,132,0),24 }, + { IPv4(203,127,225,0),24 }, + { IPv4(203,129,194,0),24 }, + { IPv4(203,129,195,0),24 }, + { IPv4(203,129,202,0),24 }, + { IPv4(203,129,204,0),24 }, + { IPv4(203,129,205,0),24 }, + { IPv4(203,129,207,0),24 }, + { IPv4(203,129,216,0),24 }, + { IPv4(203,129,220,0),24 }, + { IPv4(203,129,221,0),24 }, + { IPv4(203,129,222,0),23 }, + { IPv4(203,129,222,0),24 }, + { IPv4(203,129,223,0),24 }, + { IPv4(203,129,244,0),22 }, + { IPv4(203,129,252,0),22 }, + { IPv4(203,129,254,0),23 }, + { IPv4(203,130,128,0),19 }, + { IPv4(203,132,100,0),24 }, + { IPv4(203,132,107,0),24 }, + { IPv4(203,132,108,0),24 }, + { IPv4(203,132,111,0),24 }, + { IPv4(203,132,125,0),24 }, + { IPv4(203,132,224,0),19 }, + { IPv4(203,133,0,0),17 }, + { IPv4(203,134,0,0),20 }, + { IPv4(203,134,2,0),24 }, + { IPv4(203,134,12,0),23 }, + { IPv4(203,134,13,0),24 }, + { IPv4(203,134,18,0),23 }, + { IPv4(203,134,32,0),21 }, + { IPv4(203,134,56,0),22 }, + { IPv4(203,134,64,0),19 }, + { IPv4(203,134,96,0),20 }, + { IPv4(203,134,116,0),22 }, + { IPv4(203,134,117,0),24 }, + { IPv4(203,134,144,0),20 }, + { IPv4(203,134,148,0),22 }, + { IPv4(203,134,160,0),21 }, + { IPv4(203,134,168,0),21 }, + { IPv4(203,134,176,0),22 }, + { IPv4(203,134,184,0),21 }, + { IPv4(203,135,0,0),24 }, + { IPv4(203,135,7,0),24 }, + { IPv4(203,135,10,0),24 }, + { IPv4(203,135,11,0),24 }, + { IPv4(203,135,12,0),24 }, + { IPv4(203,135,13,0),24 }, + { IPv4(203,135,16,0),24 }, + { IPv4(203,135,17,0),24 }, + { IPv4(203,135,18,0),24 }, + { IPv4(203,135,19,0),24 }, + { IPv4(203,135,20,0),24 }, + { IPv4(203,135,21,0),24 }, + { IPv4(203,135,22,0),24 }, + { IPv4(203,135,23,0),24 }, + { IPv4(203,135,28,0),24 }, + { IPv4(203,135,30,0),24 }, + { IPv4(203,135,33,0),24 }, + { IPv4(203,135,34,0),24 }, + { IPv4(203,135,35,0),24 }, + { IPv4(203,135,36,0),24 }, + { IPv4(203,135,37,0),24 }, + { IPv4(203,135,38,0),24 }, + { IPv4(203,135,40,0),24 }, + { IPv4(203,135,43,0),24 }, + { IPv4(203,135,50,0),24 }, + { IPv4(203,135,60,0),24 }, + { IPv4(203,135,99,0),24 }, + { IPv4(203,136,0,0),16 }, + { IPv4(203,139,0,0),17 }, + { IPv4(203,139,192,0),19 }, + { IPv4(203,139,224,0),19 }, + { IPv4(203,140,32,0),19 }, + { IPv4(203,140,128,0),19 }, + { IPv4(203,140,154,0),24 }, + { IPv4(203,140,176,0),20 }, + { IPv4(203,141,64,0),19 }, + { IPv4(203,141,128,0),19 }, + { IPv4(203,141,160,0),19 }, + { IPv4(203,141,192,0),19 }, + { IPv4(203,141,224,0),20 }, + { IPv4(203,142,128,0),19 }, + { IPv4(203,142,244,0),24 }, + { IPv4(203,142,245,0),24 }, + { IPv4(203,143,128,0),23 }, + { IPv4(203,143,130,0),24 }, + { IPv4(203,143,131,0),24 }, + { IPv4(203,143,224,0),19 }, + { IPv4(203,144,32,0),20 }, + { IPv4(203,144,128,0),20 }, + { IPv4(203,144,144,0),20 }, + { IPv4(203,144,160,0),20 }, + { IPv4(203,144,176,0),20 }, + { IPv4(203,144,192,0),22 }, + { IPv4(203,144,196,0),22 }, + { IPv4(203,144,200,0),22 }, + { IPv4(203,144,204,0),22 }, + { IPv4(203,144,208,0),22 }, + { IPv4(203,144,212,0),22 }, + { IPv4(203,144,216,0),22 }, + { IPv4(203,144,220,0),22 }, + { IPv4(203,144,224,0),22 }, + { IPv4(203,144,228,0),22 }, + { IPv4(203,144,232,0),22 }, + { IPv4(203,144,233,0),24 }, + { IPv4(203,144,236,0),22 }, + { IPv4(203,144,240,0),22 }, + { IPv4(203,144,240,0),24 }, + { IPv4(203,144,244,0),22 }, + { IPv4(203,144,248,0),22 }, + { IPv4(203,144,252,0),22 }, + { IPv4(203,145,128,0),22 }, + { IPv4(203,145,133,0),24 }, + { IPv4(203,145,134,0),23 }, + { IPv4(203,145,135,0),24 }, + { IPv4(203,145,136,0),22 }, + { IPv4(203,145,140,0),22 }, + { IPv4(203,145,144,0),22 }, + { IPv4(203,145,147,0),24 }, + { IPv4(203,145,148,0),22 }, + { IPv4(203,145,152,0),23 }, + { IPv4(203,145,154,0),24 }, + { IPv4(203,145,156,0),24 }, + { IPv4(203,145,157,0),24 }, + { IPv4(203,145,159,0),24 }, + { IPv4(203,145,224,0),23 }, + { IPv4(203,146,18,0),24 }, + { IPv4(203,146,205,0),24 }, + { IPv4(203,146,242,0),23 }, + { IPv4(203,147,0,0),18 }, + { IPv4(203,147,60,0),24 }, + { IPv4(203,148,128,0),20 }, + { IPv4(203,148,141,0),24 }, + { IPv4(203,148,144,0),20 }, + { IPv4(203,148,144,0),21 }, + { IPv4(203,148,160,0),19 }, + { IPv4(203,148,161,0),24 }, + { IPv4(203,148,162,0),24 }, + { IPv4(203,148,192,0),22 }, + { IPv4(203,148,196,0),22 }, + { IPv4(203,148,200,0),21 }, + { IPv4(203,148,208,0),20 }, + { IPv4(203,148,224,0),19 }, + { IPv4(203,149,0,0),19 }, + { IPv4(203,149,32,0),19 }, + { IPv4(203,149,52,0),24 }, + { IPv4(203,149,128,0),17 }, + { IPv4(203,150,121,0),24 }, + { IPv4(203,151,240,0),20 }, + { IPv4(203,152,0,0),18 }, + { IPv4(203,152,0,0),22 }, + { IPv4(203,152,4,0),22 }, + { IPv4(203,152,8,0),22 }, + { IPv4(203,152,12,0),22 }, + { IPv4(203,152,16,0),22 }, + { IPv4(203,152,20,0),22 }, + { IPv4(203,152,28,0),22 }, + { IPv4(203,152,32,0),22 }, + { IPv4(203,152,36,0),22 }, + { IPv4(203,152,40,0),22 }, + { IPv4(203,152,44,0),22 }, + { IPv4(203,152,48,0),22 }, + { IPv4(203,152,52,0),22 }, + { IPv4(203,152,56,0),22 }, + { IPv4(203,152,60,0),22 }, + { IPv4(203,153,128,0),20 }, + { IPv4(203,154,0,0),16 }, + { IPv4(203,154,221,0),24 }, + { IPv4(203,154,222,0),24 }, + { IPv4(203,155,0,0),16 }, + { IPv4(203,157,0,0),16 }, + { IPv4(203,158,0,0),22 }, + { IPv4(203,158,6,0),23 }, + { IPv4(203,159,0,0),18 }, + { IPv4(203,159,64,0),19 }, + { IPv4(203,160,224,0),21 }, + { IPv4(203,160,232,0),22 }, + { IPv4(203,160,236,0),22 }, + { IPv4(203,160,240,0),20 }, + { IPv4(203,161,32,0),20 }, + { IPv4(203,161,39,0),24 }, + { IPv4(203,161,128,0),21 }, + { IPv4(203,162,0,0),20 }, + { IPv4(203,162,16,0),20 }, + { IPv4(203,163,61,0),24 }, + { IPv4(203,163,64,0),18 }, + { IPv4(203,164,0,0),16 }, + { IPv4(203,166,45,0),24 }, + { IPv4(203,167,1,0),24 }, + { IPv4(203,167,2,0),24 }, + { IPv4(203,167,9,0),24 }, + { IPv4(203,167,10,0),24 }, + { IPv4(203,167,26,0),24 }, + { IPv4(203,167,64,0),19 }, + { IPv4(203,167,70,0),24 }, + { IPv4(203,167,71,0),24 }, + { IPv4(203,167,72,0),24 }, + { IPv4(203,167,73,0),24 }, + { IPv4(203,167,74,0),24 }, + { IPv4(203,167,96,0),19 }, + { IPv4(203,167,105,0),24 }, + { IPv4(203,167,106,0),24 }, + { IPv4(203,167,107,0),24 }, + { IPv4(203,167,111,0),24 }, + { IPv4(203,167,112,0),24 }, + { IPv4(203,167,128,0),17 }, + { IPv4(203,168,0,0),22 }, + { IPv4(203,168,0,0),24 }, + { IPv4(203,168,1,0),24 }, + { IPv4(203,168,2,0),24 }, + { IPv4(203,168,3,0),24 }, + { IPv4(203,168,4,0),22 }, + { IPv4(203,168,8,0),22 }, + { IPv4(203,168,12,0),22 }, + { IPv4(203,168,16,0),20 }, + { IPv4(203,168,20,0),24 }, + { IPv4(203,168,21,0),24 }, + { IPv4(203,168,22,0),24 }, + { IPv4(203,168,23,0),24 }, + { IPv4(203,168,74,0),24 }, + { IPv4(203,168,75,0),24 }, + { IPv4(203,168,77,0),24 }, + { IPv4(203,168,78,0),24 }, + { IPv4(203,168,144,0),20 }, + { IPv4(203,168,192,0),21 }, + { IPv4(203,168,200,0),21 }, + { IPv4(203,168,224,0),19 }, + { IPv4(203,169,90,0),24 }, + { IPv4(203,170,1,0),24 }, + { IPv4(203,170,2,0),24 }, + { IPv4(203,170,3,0),24 }, + { IPv4(203,170,4,0),24 }, + { IPv4(203,170,5,0),24 }, + { IPv4(203,170,6,0),24 }, + { IPv4(203,170,7,0),24 }, + { IPv4(203,170,8,0),24 }, + { IPv4(203,170,9,0),24 }, + { IPv4(203,170,10,0),24 }, + { IPv4(203,170,11,0),24 }, + { IPv4(203,170,12,0),24 }, + { IPv4(203,170,13,0),24 }, + { IPv4(203,170,14,0),24 }, + { IPv4(203,170,15,0),24 }, + { IPv4(203,170,128,0),18 }, + { IPv4(203,170,160,0),19 }, + { IPv4(203,170,176,0),24 }, + { IPv4(203,170,177,0),24 }, + { IPv4(203,170,178,0),24 }, + { IPv4(203,170,179,0),24 }, + { IPv4(203,170,190,0),24 }, + { IPv4(203,170,191,0),24 }, + { IPv4(203,170,192,0),18 }, + { IPv4(203,170,224,0),19 }, + { IPv4(203,172,0,0),19 }, + { IPv4(203,172,24,0),24 }, + { IPv4(203,173,128,0),19 }, + { IPv4(203,173,252,0),24 }, + { IPv4(203,175,0,0),24 }, + { IPv4(203,175,1,0),24 }, + { IPv4(203,175,2,0),24 }, + { IPv4(203,176,0,0),19 }, + { IPv4(203,176,5,0),24 }, + { IPv4(203,176,6,0),24 }, + { IPv4(203,176,8,0),24 }, + { IPv4(203,176,23,0),24 }, + { IPv4(203,176,24,0),22 }, + { IPv4(203,176,32,0),19 }, + { IPv4(203,176,35,0),24 }, + { IPv4(203,176,44,0),24 }, + { IPv4(203,176,46,0),24 }, + { IPv4(203,176,47,0),24 }, + { IPv4(203,176,56,0),22 }, + { IPv4(203,176,64,0),19 }, + { IPv4(203,176,65,0),24 }, + { IPv4(203,176,75,0),24 }, + { IPv4(203,176,92,0),22 }, + { IPv4(203,177,3,0),24 }, + { IPv4(203,177,32,0),19 }, + { IPv4(203,177,64,0),20 }, + { IPv4(203,177,252,0),24 }, + { IPv4(203,177,253,0),24 }, + { IPv4(203,177,254,0),24 }, + { IPv4(203,178,32,0),24 }, + { IPv4(203,178,33,0),24 }, + { IPv4(203,178,36,0),22 }, + { IPv4(203,178,64,0),18 }, + { IPv4(203,178,128,0),19 }, + { IPv4(203,179,192,0),19 }, + { IPv4(203,181,96,0),19 }, + { IPv4(203,181,192,0),18 }, + { IPv4(203,185,64,0),18 }, + { IPv4(203,185,129,0),24 }, + { IPv4(203,185,140,0),22 }, + { IPv4(203,185,192,0),19 }, + { IPv4(203,185,224,0),19 }, + { IPv4(203,185,238,0),24 }, + { IPv4(203,186,38,0),23 }, + { IPv4(203,186,66,0),23 }, + { IPv4(203,186,94,0),24 }, + { IPv4(203,186,95,0),24 }, + { IPv4(203,186,192,0),18 }, + { IPv4(203,187,0,0),17 }, + { IPv4(203,188,128,0),21 }, + { IPv4(203,189,254,0),24 }, + { IPv4(203,190,0,0),24 }, + { IPv4(203,190,0,0),22 }, + { IPv4(203,190,1,0),24 }, + { IPv4(203,190,2,0),24 }, + { IPv4(203,190,3,0),24 }, + { IPv4(203,190,254,0),23 }, + { IPv4(203,194,128,0),20 }, + { IPv4(203,194,176,0),21 }, + { IPv4(203,194,184,0),22 }, + { IPv4(203,194,216,0),21 }, + { IPv4(203,195,0,0),18 }, + { IPv4(203,195,0,0),19 }, + { IPv4(203,195,32,0),19 }, + { IPv4(203,195,128,0),23 }, + { IPv4(203,195,129,0),24 }, + { IPv4(203,195,130,0),23 }, + { IPv4(203,195,132,0),22 }, + { IPv4(203,195,136,0),21 }, + { IPv4(203,195,144,0),23 }, + { IPv4(203,195,146,0),23 }, + { IPv4(203,195,148,0),22 }, + { IPv4(203,195,150,0),24 }, + { IPv4(203,195,156,0),22 }, + { IPv4(203,195,160,0),21 }, + { IPv4(203,195,164,0),24 }, + { IPv4(203,195,170,0),23 }, + { IPv4(203,195,172,0),24 }, + { IPv4(203,195,175,0),24 }, + { IPv4(203,195,180,0),22 }, + { IPv4(203,195,184,0),22 }, + { IPv4(203,195,192,0),22 }, + { IPv4(203,195,196,0),22 }, + { IPv4(203,195,200,0),22 }, + { IPv4(203,195,204,0),22 }, + { IPv4(203,195,222,0),24 }, + { IPv4(203,195,223,0),24 }, + { IPv4(203,196,0,0),22 }, + { IPv4(203,196,4,0),22 }, + { IPv4(203,196,4,0),24 }, + { IPv4(203,196,7,0),24 }, + { IPv4(203,196,64,0),21 }, + { IPv4(203,196,72,0),21 }, + { IPv4(203,196,128,0),21 }, + { IPv4(203,196,136,0),21 }, + { IPv4(203,196,142,0),23 }, + { IPv4(203,196,144,0),22 }, + { IPv4(203,196,148,0),23 }, + { IPv4(203,196,150,0),23 }, + { IPv4(203,203,0,0),16 }, + { IPv4(203,204,0,0),16 }, + { IPv4(203,204,0,0),17 }, + { IPv4(203,204,128,0),17 }, + { IPv4(203,207,0,0),20 }, + { IPv4(203,207,4,0),24 }, + { IPv4(203,207,5,0),24 }, + { IPv4(203,207,7,0),24 }, + { IPv4(203,208,0,0),20 }, + { IPv4(203,208,16,0),24 }, + { IPv4(203,208,128,0),17 }, + { IPv4(203,208,128,0),19 }, + { IPv4(203,208,129,0),24 }, + { IPv4(203,208,130,0),24 }, + { IPv4(203,208,131,0),24 }, + { IPv4(203,208,132,0),24 }, + { IPv4(203,208,134,0),24 }, + { IPv4(203,208,135,0),24 }, + { IPv4(203,208,136,0),24 }, + { IPv4(203,208,138,0),24 }, + { IPv4(203,208,139,0),24 }, + { IPv4(203,208,144,0),24 }, + { IPv4(203,208,170,0),24 }, + { IPv4(203,208,224,0),24 }, + { IPv4(203,208,255,0),24 }, + { IPv4(203,209,0,0),18 }, + { IPv4(203,212,64,0),24 }, + { IPv4(203,212,65,0),24 }, + { IPv4(203,213,0,0),24 }, + { IPv4(203,213,2,0),24 }, + { IPv4(203,213,48,0),24 }, + { IPv4(203,213,128,0),19 }, + { IPv4(203,224,0,0),16 }, + { IPv4(203,225,0,0),16 }, + { IPv4(203,226,0,0),18 }, + { IPv4(203,226,128,0),18 }, + { IPv4(203,226,192,0),18 }, + { IPv4(203,227,19,0),24 }, + { IPv4(203,227,164,0),22 }, + { IPv4(203,227,232,0),24 }, + { IPv4(203,228,0,0),17 }, + { IPv4(203,228,128,0),18 }, + { IPv4(203,228,128,0),17 }, + { IPv4(203,228,176,0),24 }, + { IPv4(203,228,177,0),24 }, + { IPv4(203,228,178,0),24 }, + { IPv4(203,228,192,0),18 }, + { IPv4(203,228,208,0),21 }, + { IPv4(203,228,216,0),21 }, + { IPv4(203,228,224,0),21 }, + { IPv4(203,229,0,0),17 }, + { IPv4(203,229,128,0),17 }, + { IPv4(203,229,147,0),24 }, + { IPv4(203,230,0,0),17 }, + { IPv4(203,230,4,0),23 }, + { IPv4(203,230,12,0),22 }, + { IPv4(203,230,64,0),21 }, + { IPv4(203,230,72,0),23 }, + { IPv4(203,230,74,0),24 }, + { IPv4(203,230,76,0),24 }, + { IPv4(203,230,76,0),22 }, + { IPv4(203,230,80,0),20 }, + { IPv4(203,230,96,0),21 }, + { IPv4(203,230,104,0),22 }, + { IPv4(203,230,128,0),17 }, + { IPv4(203,230,152,0),22 }, + { IPv4(203,230,160,0),19 }, + { IPv4(203,230,208,0),23 }, + { IPv4(203,230,236,0),22 }, + { IPv4(203,232,0,0),16 }, + { IPv4(203,232,126,0),23 }, + { IPv4(203,232,128,0),21 }, + { IPv4(203,232,136,0),22 }, + { IPv4(203,232,140,0),22 }, + { IPv4(203,232,161,0),24 }, + { IPv4(203,232,162,0),23 }, + { IPv4(203,232,172,0),22 }, + { IPv4(203,232,176,0),22 }, + { IPv4(203,232,180,0),23 }, + { IPv4(203,232,186,0),24 }, + { IPv4(203,232,224,0),20 }, + { IPv4(203,233,0,0),17 }, + { IPv4(203,233,54,0),24 }, + { IPv4(203,233,55,0),24 }, + { IPv4(203,233,56,0),24 }, + { IPv4(203,233,57,0),24 }, + { IPv4(203,233,82,0),24 }, + { IPv4(203,233,85,0),24 }, + { IPv4(203,233,128,0),21 }, + { IPv4(203,233,136,0),22 }, + { IPv4(203,233,144,0),20 }, + { IPv4(203,233,160,0),19 }, + { IPv4(203,233,192,0),19 }, + { IPv4(203,233,224,0),20 }, + { IPv4(203,234,0,0),16 }, + { IPv4(203,234,8,0),21 }, + { IPv4(203,234,48,0),20 }, + { IPv4(203,234,96,0),21 }, + { IPv4(203,234,104,0),22 }, + { IPv4(203,234,108,0),23 }, + { IPv4(203,234,110,0),24 }, + { IPv4(203,234,132,0),24 }, + { IPv4(203,234,163,0),24 }, + { IPv4(203,234,241,0),24 }, + { IPv4(203,235,8,0),24 }, + { IPv4(203,235,68,0),24 }, + { IPv4(203,235,84,0),23 }, + { IPv4(203,235,128,0),18 }, + { IPv4(203,235,192,0),21 }, + { IPv4(203,235,208,0),20 }, + { IPv4(203,235,224,0),19 }, + { IPv4(203,236,0,0),19 }, + { IPv4(203,236,0,0),17 }, + { IPv4(203,236,32,0),21 }, + { IPv4(203,236,40,0),22 }, + { IPv4(203,236,52,0),22 }, + { IPv4(203,236,56,0),22 }, + { IPv4(203,236,60,0),24 }, + { IPv4(203,236,62,0),23 }, + { IPv4(203,236,64,0),24 }, + { IPv4(203,236,65,0),24 }, + { IPv4(203,236,66,0),24 }, + { IPv4(203,236,67,0),24 }, + { IPv4(203,236,69,0),24 }, + { IPv4(203,236,70,0),24 }, + { IPv4(203,236,72,0),24 }, + { IPv4(203,236,73,0),24 }, + { IPv4(203,236,75,0),24 }, + { IPv4(203,236,76,0),24 }, + { IPv4(203,236,77,0),24 }, + { IPv4(203,236,78,0),24 }, + { IPv4(203,237,0,0),19 }, + { IPv4(203,237,32,0),19 }, + { IPv4(203,237,64,0),19 }, + { IPv4(203,237,96,0),19 }, + { IPv4(203,237,128,0),17 }, + { IPv4(203,237,204,0),22 }, + { IPv4(203,237,208,0),21 }, + { IPv4(203,238,0,0),24 }, + { IPv4(203,238,1,0),24 }, + { IPv4(203,238,7,0),24 }, + { IPv4(203,238,28,0),24 }, + { IPv4(203,238,37,0),24 }, + { IPv4(203,238,67,0),24 }, + { IPv4(203,238,72,0),21 }, + { IPv4(203,238,128,0),18 }, + { IPv4(203,238,192,0),19 }, + { IPv4(203,238,224,0),19 }, + { IPv4(203,239,34,0),24 }, + { IPv4(203,239,56,0),24 }, + { IPv4(203,239,57,0),24 }, + { IPv4(203,239,63,0),24 }, + { IPv4(203,239,192,0),24 }, + { IPv4(203,239,193,0),24 }, + { IPv4(203,239,194,0),23 }, + { IPv4(203,239,196,0),23 }, + { IPv4(203,239,198,0),24 }, + { IPv4(203,239,199,0),24 }, + { IPv4(203,239,200,0),21 }, + { IPv4(203,239,208,0),24 }, + { IPv4(203,239,209,0),24 }, + { IPv4(203,239,210,0),24 }, + { IPv4(203,239,211,0),24 }, + { IPv4(203,239,212,0),22 }, + { IPv4(203,239,216,0),24 }, + { IPv4(203,239,217,0),24 }, + { IPv4(203,239,218,0),23 }, + { IPv4(203,239,220,0),22 }, + { IPv4(203,239,224,0),19 }, + { IPv4(203,240,0,0),18 }, + { IPv4(203,240,64,0),23 }, + { IPv4(203,240,67,0),24 }, + { IPv4(203,240,68,0),24 }, + { IPv4(203,240,128,0),17 }, + { IPv4(203,240,128,0),18 }, + { IPv4(203,240,128,0),24 }, + { IPv4(203,240,192,0),18 }, + { IPv4(203,240,231,0),24 }, + { IPv4(203,241,0,0),19 }, + { IPv4(203,241,52,0),22 }, + { IPv4(203,241,56,0),23 }, + { IPv4(203,241,58,0),23 }, + { IPv4(203,241,60,0),23 }, + { IPv4(203,241,62,0),23 }, + { IPv4(203,241,64,0),23 }, + { IPv4(203,241,64,0),21 }, + { IPv4(203,241,66,0),23 }, + { IPv4(203,241,68,0),24 }, + { IPv4(203,241,69,0),24 }, + { IPv4(203,241,70,0),23 }, + { IPv4(203,241,72,0),21 }, + { IPv4(203,241,80,0),22 }, + { IPv4(203,241,84,0),22 }, + { IPv4(203,241,88,0),21 }, + { IPv4(203,241,96,0),20 }, + { IPv4(203,241,112,0),22 }, + { IPv4(203,241,116,0),22 }, + { IPv4(203,241,120,0),21 }, + { IPv4(203,241,120,0),22 }, + { IPv4(203,241,124,0),22 }, + { IPv4(203,241,128,0),22 }, + { IPv4(203,241,164,0),23 }, + { IPv4(203,241,167,0),24 }, + { IPv4(203,241,168,0),22 }, + { IPv4(203,241,168,0),24 }, + { IPv4(203,241,170,0),24 }, + { IPv4(203,241,172,0),22 }, + { IPv4(203,241,174,0),23 }, + { IPv4(203,241,176,0),20 }, + { IPv4(203,241,192,0),20 }, + { IPv4(203,241,208,0),23 }, + { IPv4(203,241,208,0),20 }, + { IPv4(203,241,212,0),23 }, + { IPv4(203,241,224,0),19 }, + { IPv4(203,242,32,0),20 }, + { IPv4(203,242,48,0),21 }, + { IPv4(203,242,56,0),22 }, + { IPv4(203,242,60,0),23 }, + { IPv4(203,242,62,0),24 }, + { IPv4(203,242,63,0),24 }, + { IPv4(203,242,64,0),19 }, + { IPv4(203,242,112,0),21 }, + { IPv4(203,242,120,0),21 }, + { IPv4(203,242,128,0),17 }, + { IPv4(203,243,0,0),19 }, + { IPv4(203,243,0,0),18 }, + { IPv4(203,243,32,0),19 }, + { IPv4(203,243,64,0),18 }, + { IPv4(203,243,152,0),22 }, + { IPv4(203,243,156,0),22 }, + { IPv4(203,243,160,0),19 }, + { IPv4(203,243,192,0),20 }, + { IPv4(203,243,208,0),21 }, + { IPv4(203,243,253,0),24 }, + { IPv4(203,244,0,0),19 }, + { IPv4(203,244,32,0),19 }, + { IPv4(203,244,64,0),19 }, + { IPv4(203,244,128,0),18 }, + { IPv4(203,245,0,0),18 }, + { IPv4(203,245,64,0),18 }, + { IPv4(203,245,128,0),17 }, + { IPv4(203,246,0,0),17 }, + { IPv4(203,246,64,0),21 }, + { IPv4(203,246,104,0),21 }, + { IPv4(203,246,118,0),24 }, + { IPv4(203,246,119,0),24 }, + { IPv4(203,246,128,0),19 }, + { IPv4(203,246,176,0),22 }, + { IPv4(203,246,180,0),22 }, + { IPv4(203,246,184,0),24 }, + { IPv4(203,246,186,0),24 }, + { IPv4(203,246,187,0),24 }, + { IPv4(203,246,188,0),24 }, + { IPv4(203,246,189,0),24 }, + { IPv4(203,247,0,0),19 }, + { IPv4(203,247,32,0),19 }, + { IPv4(203,247,64,0),18 }, + { IPv4(203,247,66,0),24 }, + { IPv4(203,247,80,0),24 }, + { IPv4(203,247,128,0),19 }, + { IPv4(203,247,160,0),19 }, + { IPv4(203,247,161,0),24 }, + { IPv4(203,247,162,0),24 }, + { IPv4(203,247,166,0),24 }, + { IPv4(203,247,168,0),23 }, + { IPv4(203,247,180,0),23 }, + { IPv4(203,247,192,0),20 }, + { IPv4(203,247,212,0),22 }, + { IPv4(203,247,216,0),21 }, + { IPv4(203,247,220,0),22 }, + { IPv4(203,247,224,0),19 }, + { IPv4(203,248,116,0),24 }, + { IPv4(203,248,117,0),24 }, + { IPv4(203,248,118,0),24 }, + { IPv4(203,248,128,0),17 }, + { IPv4(203,248,188,0),24 }, + { IPv4(203,249,0,0),19 }, + { IPv4(203,249,0,0),17 }, + { IPv4(203,249,11,0),24 }, + { IPv4(203,249,12,0),22 }, + { IPv4(203,249,16,0),24 }, + { IPv4(203,249,18,0),23 }, + { IPv4(203,249,35,0),24 }, + { IPv4(203,249,38,0),24 }, + { IPv4(203,249,42,0),23 }, + { IPv4(203,249,44,0),23 }, + { IPv4(203,249,47,0),24 }, + { IPv4(203,249,48,0),20 }, + { IPv4(203,249,64,0),19 }, + { IPv4(203,249,84,0),22 }, + { IPv4(203,249,88,0),22 }, + { IPv4(203,249,92,0),23 }, + { IPv4(203,249,94,0),24 }, + { IPv4(203,249,95,0),24 }, + { IPv4(203,249,96,0),20 }, + { IPv4(203,249,128,0),19 }, + { IPv4(203,249,160,0),20 }, + { IPv4(203,249,224,0),19 }, + { IPv4(203,250,0,0),19 }, + { IPv4(203,250,32,0),19 }, + { IPv4(203,250,64,0),19 }, + { IPv4(203,250,96,0),20 }, + { IPv4(203,250,112,0),21 }, + { IPv4(203,250,120,0),21 }, + { IPv4(203,250,128,0),20 }, + { IPv4(203,250,144,0),20 }, + { IPv4(203,250,160,0),20 }, + { IPv4(203,250,184,0),21 }, + { IPv4(203,250,188,0),22 }, + { IPv4(203,250,192,0),18 }, + { IPv4(203,251,0,0),17 }, + { IPv4(203,251,128,0),18 }, + { IPv4(203,251,192,0),19 }, + { IPv4(203,251,192,0),18 }, + { IPv4(203,251,224,0),19 }, + { IPv4(203,251,226,0),23 }, + { IPv4(203,251,228,0),23 }, + { IPv4(203,251,230,0),24 }, + { IPv4(203,251,250,0),24 }, + { IPv4(203,251,253,0),24 }, + { IPv4(203,252,0,0),20 }, + { IPv4(203,252,16,0),22 }, + { IPv4(203,252,16,0),21 }, + { IPv4(203,252,20,0),23 }, + { IPv4(203,252,27,0),24 }, + { IPv4(203,252,28,0),22 }, + { IPv4(203,252,32,0),21 }, + { IPv4(203,252,40,0),24 }, + { IPv4(203,252,41,0),24 }, + { IPv4(203,252,42,0),24 }, + { IPv4(203,252,43,0),24 }, + { IPv4(203,252,44,0),22 }, + { IPv4(203,252,48,0),20 }, + { IPv4(203,252,64,0),19 }, + { IPv4(203,252,96,0),19 }, + { IPv4(203,252,128,0),19 }, + { IPv4(203,252,160,0),24 }, + { IPv4(203,252,161,0),24 }, + { IPv4(203,252,162,0),24 }, + { IPv4(203,252,163,0),24 }, + { IPv4(203,252,164,0),24 }, + { IPv4(203,252,165,0),24 }, + { IPv4(203,252,166,0),24 }, + { IPv4(203,252,168,0),21 }, + { IPv4(203,252,176,0),20 }, + { IPv4(203,252,192,0),20 }, + { IPv4(203,252,208,0),20 }, + { IPv4(203,252,224,0),19 }, + { IPv4(203,253,0,0),19 }, + { IPv4(203,253,32,0),20 }, + { IPv4(203,253,64,0),20 }, + { IPv4(203,253,96,0),20 }, + { IPv4(203,253,128,0),20 }, + { IPv4(203,253,144,0),22 }, + { IPv4(203,253,160,0),20 }, + { IPv4(203,253,176,0),21 }, + { IPv4(203,253,184,0),22 }, + { IPv4(203,253,188,0),23 }, + { IPv4(203,253,190,0),24 }, + { IPv4(203,253,192,0),19 }, + { IPv4(203,253,224,0),22 }, + { IPv4(203,253,232,0),24 }, + { IPv4(203,253,237,0),24 }, + { IPv4(203,253,240,0),21 }, + { IPv4(203,253,240,0),22 }, + { IPv4(203,253,248,0),21 }, + { IPv4(203,253,254,0),24 }, + { IPv4(203,253,255,0),24 }, + { IPv4(203,254,0,0),18 }, + { IPv4(203,254,8,0),23 }, + { IPv4(203,254,9,0),24 }, + { IPv4(203,254,10,0),24 }, + { IPv4(203,254,41,0),24 }, + { IPv4(203,254,42,0),24 }, + { IPv4(203,254,43,0),24 }, + { IPv4(203,254,44,0),24 }, + { IPv4(203,254,47,0),24 }, + { IPv4(203,254,64,0),19 }, + { IPv4(203,254,96,0),22 }, + { IPv4(203,254,120,0),24 }, + { IPv4(203,254,128,0),19 }, + { IPv4(203,254,160,0),21 }, + { IPv4(203,254,168,0),22 }, + { IPv4(203,254,172,0),24 }, + { IPv4(203,254,173,0),24 }, + { IPv4(203,254,176,0),20 }, + { IPv4(203,255,0,0),18 }, + { IPv4(203,255,64,0),19 }, + { IPv4(203,255,96,0),20 }, + { IPv4(203,255,120,0),22 }, + { IPv4(203,255,156,0),24 }, + { IPv4(203,255,157,0),24 }, + { IPv4(203,255,158,0),24 }, + { IPv4(203,255,159,0),24 }, + { IPv4(203,255,160,0),19 }, + { IPv4(203,255,192,0),20 }, + { IPv4(203,255,208,0),24 }, + { IPv4(203,255,209,0),24 }, + { IPv4(203,255,210,0),24 }, + { IPv4(203,255,211,0),24 }, + { IPv4(203,255,212,0),22 }, + { IPv4(203,255,216,0),23 }, + { IPv4(203,255,216,0),22 }, + { IPv4(203,255,218,0),23 }, + { IPv4(203,255,220,0),23 }, + { IPv4(203,255,222,0),23 }, + { IPv4(203,255,224,0),21 }, + { IPv4(203,255,232,0),24 }, + { IPv4(203,255,234,0),24 }, + { IPv4(203,255,236,0),22 }, + { IPv4(203,255,240,0),21 }, + { IPv4(203,255,248,0),21 }, + { IPv4(204,0,0,0),14 }, + { IPv4(204,0,43,0),24 }, + { IPv4(204,0,49,0),24 }, + { IPv4(204,4,60,0),24 }, + { IPv4(204,4,86,0),23 }, + { IPv4(204,4,88,0),24 }, + { IPv4(204,4,178,0),24 }, + { IPv4(204,4,179,0),24 }, + { IPv4(204,4,182,0),24 }, + { IPv4(204,4,183,0),24 }, + { IPv4(204,4,185,0),24 }, + { IPv4(204,4,187,0),24 }, + { IPv4(204,4,190,0),24 }, + { IPv4(204,4,191,0),24 }, + { IPv4(204,6,36,0),24 }, + { IPv4(204,6,91,0),24 }, + { IPv4(204,6,205,0),24 }, + { IPv4(204,6,206,0),24 }, + { IPv4(204,6,207,0),24 }, + { IPv4(204,6,208,0),24 }, + { IPv4(204,11,1,0),24 }, + { IPv4(204,17,16,0),24 }, + { IPv4(204,17,16,0),20 }, + { IPv4(204,17,17,0),24 }, + { IPv4(204,17,24,0),24 }, + { IPv4(204,17,26,0),24 }, + { IPv4(204,17,27,0),24 }, + { IPv4(204,17,64,0),18 }, + { IPv4(204,17,128,0),23 }, + { IPv4(204,17,132,0),24 }, + { IPv4(204,17,133,0),24 }, + { IPv4(204,17,139,0),24 }, + { IPv4(204,17,177,0),24 }, + { IPv4(204,17,179,0),24 }, + { IPv4(204,17,185,0),24 }, + { IPv4(204,17,189,0),24 }, + { IPv4(204,17,195,0),24 }, + { IPv4(204,17,201,0),24 }, + { IPv4(204,17,205,0),24 }, + { IPv4(204,17,209,0),24 }, + { IPv4(204,17,221,0),24 }, + { IPv4(204,17,228,0),24 }, + { IPv4(204,19,1,0),24 }, + { IPv4(204,19,16,0),24 }, + { IPv4(204,19,34,0),24 }, + { IPv4(204,19,35,0),24 }, + { IPv4(204,19,116,0),23 }, + { IPv4(204,19,136,0),24 }, + { IPv4(204,19,138,0),24 }, + { IPv4(204,19,164,0),24 }, + { IPv4(204,19,170,0),24 }, + { IPv4(204,19,170,0),23 }, + { IPv4(204,19,172,0),22 }, + { IPv4(204,19,184,0),23 }, + { IPv4(204,26,1,0),24 }, + { IPv4(204,26,5,0),24 }, + { IPv4(204,26,6,0),24 }, + { IPv4(204,26,16,0),20 }, + { IPv4(204,27,64,0),18 }, + { IPv4(204,27,114,0),24 }, + { IPv4(204,27,128,0),24 }, + { IPv4(204,27,133,0),24 }, + { IPv4(204,27,156,0),24 }, + { IPv4(204,27,162,0),24 }, + { IPv4(204,27,165,0),24 }, + { IPv4(204,27,176,0),24 }, + { IPv4(204,27,180,0),24 }, + { IPv4(204,27,188,0),24 }, + { IPv4(204,27,196,0),24 }, + { IPv4(204,27,239,0),24 }, + { IPv4(204,27,250,0),24 }, + { IPv4(204,27,251,0),24 }, + { IPv4(204,27,253,0),24 }, + { IPv4(204,28,3,0),24 }, + { IPv4(204,28,140,0),24 }, + { IPv4(204,28,150,0),24 }, + { IPv4(204,29,134,0),24 }, + { IPv4(204,29,135,0),24 }, + { IPv4(204,29,145,0),24 }, + { IPv4(204,29,154,0),24 }, + { IPv4(204,29,171,0),24 }, + { IPv4(204,29,185,0),24 }, + { IPv4(204,29,186,0),23 }, + { IPv4(204,29,192,0),24 }, + { IPv4(204,29,196,0),24 }, + { IPv4(204,29,197,0),24 }, + { IPv4(204,29,200,0),24 }, + { IPv4(204,29,206,0),24 }, + { IPv4(204,29,207,0),24 }, + { IPv4(204,29,217,0),24 }, + { IPv4(204,29,238,0),24 }, + { IPv4(204,29,239,0),24 }, + { IPv4(204,29,244,0),22 }, + { IPv4(204,29,245,0),24 }, + { IPv4(204,30,0,0),15 }, + { IPv4(204,30,91,0),24 }, + { IPv4(204,30,103,0),24 }, + { IPv4(204,30,120,0),24 }, + { IPv4(204,31,0,0),24 }, + { IPv4(204,31,169,0),24 }, + { IPv4(204,31,181,0),24 }, + { IPv4(204,31,191,0),24 }, + { IPv4(204,31,192,0),24 }, + { IPv4(204,31,193,0),24 }, + { IPv4(204,31,198,0),24 }, + { IPv4(204,31,213,0),24 }, + { IPv4(204,32,0,0),15 }, + { IPv4(204,32,8,0),24 }, + { IPv4(204,32,16,0),24 }, + { IPv4(204,32,18,0),24 }, + { IPv4(204,32,20,0),24 }, + { IPv4(204,32,38,0),24 }, + { IPv4(204,32,92,0),24 }, + { IPv4(204,32,94,0),24 }, + { IPv4(204,32,125,0),24 }, + { IPv4(204,33,56,0),24 }, + { IPv4(204,33,160,0),22 }, + { IPv4(204,33,164,0),23 }, + { IPv4(204,33,181,0),24 }, + { IPv4(204,33,192,0),23 }, + { IPv4(204,33,194,0),24 }, + { IPv4(204,33,211,0),24 }, + { IPv4(204,33,213,0),24 }, + { IPv4(204,33,214,0),24 }, + { IPv4(204,33,215,0),24 }, + { IPv4(204,33,216,0),24 }, + { IPv4(204,33,217,0),24 }, + { IPv4(204,33,218,0),24 }, + { IPv4(204,33,249,0),24 }, + { IPv4(204,33,250,0),24 }, + { IPv4(204,34,0,0),19 }, + { IPv4(204,34,2,0),24 }, + { IPv4(204,34,3,0),24 }, + { IPv4(204,34,4,0),24 }, + { IPv4(204,34,8,0),24 }, + { IPv4(204,34,9,0),24 }, + { IPv4(204,34,10,0),24 }, + { IPv4(204,34,11,0),24 }, + { IPv4(204,34,12,0),24 }, + { IPv4(204,34,13,0),24 }, + { IPv4(204,34,14,0),24 }, + { IPv4(204,34,15,0),24 }, + { IPv4(204,34,64,0),18 }, + { IPv4(204,34,108,0),24 }, + { IPv4(204,34,109,0),24 }, + { IPv4(204,34,128,0),17 }, + { IPv4(204,34,136,0),24 }, + { IPv4(204,34,141,0),24 }, + { IPv4(204,34,153,0),24 }, + { IPv4(204,34,154,0),24 }, + { IPv4(204,34,170,0),24 }, + { IPv4(204,34,177,0),24 }, + { IPv4(204,34,197,0),24 }, + { IPv4(204,34,201,0),24 }, + { IPv4(204,34,204,0),24 }, + { IPv4(204,34,205,0),24 }, + { IPv4(204,34,226,0),24 }, + { IPv4(204,34,229,0),24 }, + { IPv4(204,34,236,0),24 }, + { IPv4(204,34,239,0),24 }, + { IPv4(204,34,244,0),24 }, + { IPv4(204,34,251,0),24 }, + { IPv4(204,34,254,0),24 }, + { IPv4(204,36,0,0),20 }, + { IPv4(204,36,15,0),24 }, + { IPv4(204,36,16,0),20 }, + { IPv4(204,36,32,0),24 }, + { IPv4(204,36,38,0),24 }, + { IPv4(204,36,47,0),24 }, + { IPv4(204,37,8,0),21 }, + { IPv4(204,37,16,0),24 }, + { IPv4(204,37,16,0),21 }, + { IPv4(204,37,17,0),24 }, + { IPv4(204,37,128,0),24 }, + { IPv4(204,37,128,0),17 }, + { IPv4(204,37,129,0),24 }, + { IPv4(204,37,130,0),24 }, + { IPv4(204,37,131,0),24 }, + { IPv4(204,37,132,0),24 }, + { IPv4(204,37,133,0),24 }, + { IPv4(204,37,134,0),24 }, + { IPv4(204,37,136,0),24 }, + { IPv4(204,37,154,0),24 }, + { IPv4(204,37,170,0),24 }, + { IPv4(204,37,182,0),24 }, + { IPv4(204,37,201,0),24 }, + { IPv4(204,42,0,0),16 }, + { IPv4(204,42,48,0),20 }, + { IPv4(204,43,64,0),18 }, + { IPv4(204,44,128,0),21 }, + { IPv4(204,44,136,0),23 }, + { IPv4(204,44,208,0),20 }, + { IPv4(204,44,240,0),24 }, + { IPv4(204,44,241,0),24 }, + { IPv4(204,44,244,0),24 }, + { IPv4(204,44,245,0),24 }, + { IPv4(204,44,246,0),24 }, + { IPv4(204,44,248,0),24 }, + { IPv4(204,44,249,0),24 }, + { IPv4(204,44,250,0),24 }, + { IPv4(204,44,251,0),24 }, + { IPv4(204,44,252,0),24 }, + { IPv4(204,44,253,0),24 }, + { IPv4(204,44,254,0),24 }, + { IPv4(204,44,255,0),24 }, + { IPv4(204,48,8,0),24 }, + { IPv4(204,48,32,0),19 }, + { IPv4(204,48,128,0),17 }, + { IPv4(204,50,76,0),24 }, + { IPv4(204,50,235,0),24 }, + { IPv4(204,50,236,0),24 }, + { IPv4(204,52,135,0),24 }, + { IPv4(204,52,175,0),24 }, + { IPv4(204,52,176,0),24 }, + { IPv4(204,52,177,0),24 }, + { IPv4(204,52,178,0),24 }, + { IPv4(204,52,187,0),24 }, + { IPv4(204,52,188,0),24 }, + { IPv4(204,52,191,0),24 }, + { IPv4(204,52,215,0),24 }, + { IPv4(204,52,223,0),24 }, + { IPv4(204,52,238,0),24 }, + { IPv4(204,52,242,0),24 }, + { IPv4(204,52,244,0),24 }, + { IPv4(204,52,245,0),24 }, + { IPv4(204,52,246,0),23 }, + { IPv4(204,53,0,0),16 }, + { IPv4(204,54,0,0),16 }, + { IPv4(204,56,0,0),21 }, + { IPv4(204,56,64,0),19 }, + { IPv4(204,56,64,0),18 }, + { IPv4(204,56,96,0),22 }, + { IPv4(204,56,100,0),23 }, + { IPv4(204,56,102,0),24 }, + { IPv4(204,56,104,0),24 }, + { IPv4(204,56,105,0),24 }, + { IPv4(204,56,106,0),24 }, + { IPv4(204,56,107,0),24 }, + { IPv4(204,56,108,0),24 }, + { IPv4(204,56,109,0),24 }, + { IPv4(204,56,110,0),24 }, + { IPv4(204,56,111,0),24 }, + { IPv4(204,56,112,0),21 }, + { IPv4(204,56,120,0),21 }, + { IPv4(204,57,32,0),19 }, + { IPv4(204,57,67,0),24 }, + { IPv4(204,57,142,0),24 }, + { IPv4(204,58,30,0),24 }, + { IPv4(204,58,149,0),24 }, + { IPv4(204,58,152,0),22 }, + { IPv4(204,58,224,0),24 }, + { IPv4(204,58,225,0),24 }, + { IPv4(204,58,226,0),24 }, + { IPv4(204,58,227,0),24 }, + { IPv4(204,58,232,0),22 }, + { IPv4(204,58,248,0),24 }, + { IPv4(204,60,0,0),16 }, + { IPv4(204,60,246,0),24 }, + { IPv4(204,62,192,0),24 }, + { IPv4(204,62,200,0),24 }, + { IPv4(204,62,232,0),24 }, + { IPv4(204,62,247,0),24 }, + { IPv4(204,62,248,0),23 }, + { IPv4(204,62,254,0),24 }, + { IPv4(204,63,208,0),24 }, + { IPv4(204,63,209,0),24 }, + { IPv4(204,63,210,0),24 }, + { IPv4(204,63,211,0),24 }, + { IPv4(204,63,212,0),24 }, + { IPv4(204,64,0,0),14 }, + { IPv4(204,68,16,0),20 }, + { IPv4(204,68,25,0),24 }, + { IPv4(204,68,32,0),19 }, + { IPv4(204,68,133,0),24 }, + { IPv4(204,68,140,0),24 }, + { IPv4(204,68,149,0),24 }, + { IPv4(204,68,151,0),24 }, + { IPv4(204,68,152,0),24 }, + { IPv4(204,68,153,0),24 }, + { IPv4(204,68,154,0),24 }, + { IPv4(204,68,168,0),24 }, + { IPv4(204,68,173,0),24 }, + { IPv4(204,68,178,0),24 }, + { IPv4(204,68,186,0),24 }, + { IPv4(204,68,187,0),24 }, + { IPv4(204,68,217,0),24 }, + { IPv4(204,68,227,0),24 }, + { IPv4(204,68,228,0),24 }, + { IPv4(204,68,229,0),24 }, + { IPv4(204,68,230,0),24 }, + { IPv4(204,68,247,0),24 }, + { IPv4(204,69,32,0),24 }, + { IPv4(204,69,128,0),24 }, + { IPv4(204,69,130,0),24 }, + { IPv4(204,69,131,0),24 }, + { IPv4(204,69,132,0),24 }, + { IPv4(204,69,133,0),24 }, + { IPv4(204,69,158,0),23 }, + { IPv4(204,69,160,0),23 }, + { IPv4(204,69,162,0),24 }, + { IPv4(204,69,169,0),24 }, + { IPv4(204,69,177,0),24 }, + { IPv4(204,69,178,0),24 }, + { IPv4(204,69,198,0),23 }, + { IPv4(204,69,200,0),24 }, + { IPv4(204,69,207,0),24 }, + { IPv4(204,69,220,0),24 }, + { IPv4(204,69,222,0),24 }, + { IPv4(204,69,224,0),22 }, + { IPv4(204,69,225,0),24 }, + { IPv4(204,69,226,0),23 }, + { IPv4(204,69,228,0),24 }, + { IPv4(204,69,229,0),24 }, + { IPv4(204,69,230,0),24 }, + { IPv4(204,69,233,0),24 }, + { IPv4(204,71,12,0),23 }, + { IPv4(204,71,21,0),24 }, + { IPv4(204,71,31,0),24 }, + { IPv4(204,71,65,0),24 }, + { IPv4(204,71,102,0),24 }, + { IPv4(204,71,127,0),24 }, + { IPv4(204,71,154,0),24 }, + { IPv4(204,71,212,0),24 }, + { IPv4(204,71,213,0),24 }, + { IPv4(204,72,0,0),17 }, + { IPv4(204,72,0,0),15 }, + { IPv4(204,72,192,0),19 }, + { IPv4(204,72,224,0),23 }, + { IPv4(204,72,226,0),23 }, + { IPv4(204,72,228,0),22 }, + { IPv4(204,72,232,0),22 }, + { IPv4(204,72,246,0),23 }, + { IPv4(204,72,248,0),21 }, + { IPv4(204,73,0,0),19 }, + { IPv4(204,73,36,0),22 }, + { IPv4(204,73,43,0),24 }, + { IPv4(204,73,79,0),24 }, + { IPv4(204,73,80,0),22 }, + { IPv4(204,73,104,0),22 }, + { IPv4(204,73,160,0),21 }, + { IPv4(204,73,192,0),21 }, + { IPv4(204,74,0,0),21 }, + { IPv4(204,74,8,0),23 }, + { IPv4(204,74,100,0),24 }, + { IPv4(204,74,101,0),24 }, + { IPv4(204,74,107,0),24 }, + { IPv4(204,74,108,0),24 }, + { IPv4(204,75,146,0),24 }, + { IPv4(204,75,153,0),24 }, + { IPv4(204,75,154,0),24 }, + { IPv4(204,75,156,0),24 }, + { IPv4(204,75,161,0),24 }, + { IPv4(204,75,162,0),24 }, + { IPv4(204,75,195,0),24 }, + { IPv4(204,75,207,0),24 }, + { IPv4(204,75,209,0),24 }, + { IPv4(204,75,228,0),24 }, + { IPv4(204,75,238,0),24 }, + { IPv4(204,75,249,0),24 }, + { IPv4(204,75,250,0),23 }, + { IPv4(204,75,252,0),22 }, + { IPv4(204,76,0,0),21 }, + { IPv4(204,76,113,0),24 }, + { IPv4(204,76,152,0),22 }, + { IPv4(204,76,156,0),24 }, + { IPv4(204,76,174,0),23 }, + { IPv4(204,76,176,0),22 }, + { IPv4(204,76,180,0),23 }, + { IPv4(204,76,182,0),24 }, + { IPv4(204,76,190,0),23 }, + { IPv4(204,76,192,0),22 }, + { IPv4(204,77,32,0),19 }, + { IPv4(204,77,78,0),24 }, + { IPv4(204,77,134,0),24 }, + { IPv4(204,77,141,0),24 }, + { IPv4(204,77,142,0),24 }, + { IPv4(204,77,145,0),24 }, + { IPv4(204,77,146,0),24 }, + { IPv4(204,77,148,0),23 }, + { IPv4(204,77,156,0),24 }, + { IPv4(204,77,159,0),24 }, + { IPv4(204,77,164,0),24 }, + { IPv4(204,77,166,0),24 }, + { IPv4(204,77,167,0),24 }, + { IPv4(204,77,181,0),24 }, + { IPv4(204,78,32,0),19 }, + { IPv4(204,79,190,0),24 }, + { IPv4(204,80,132,0),24 }, + { IPv4(204,80,136,0),24 }, + { IPv4(204,80,150,0),24 }, + { IPv4(204,80,212,0),24 }, + { IPv4(204,80,213,0),24 }, + { IPv4(204,80,221,0),24 }, + { IPv4(204,80,222,0),24 }, + { IPv4(204,86,120,0),21 }, + { IPv4(204,86,128,0),23 }, + { IPv4(204,86,144,0),22 }, + { IPv4(204,86,144,0),21 }, + { IPv4(204,87,133,0),24 }, + { IPv4(204,87,151,0),24 }, + { IPv4(204,87,158,0),24 }, + { IPv4(204,87,163,0),24 }, + { IPv4(204,87,178,0),24 }, + { IPv4(204,87,183,0),24 }, + { IPv4(204,87,185,0),24 }, + { IPv4(204,87,187,0),24 }, + { IPv4(204,87,220,0),24 }, + { IPv4(204,87,230,0),24 }, + { IPv4(204,88,64,0),19 }, + { IPv4(204,88,128,0),19 }, + { IPv4(204,88,224,0),19 }, + { IPv4(204,89,8,0),21 }, + { IPv4(204,89,49,0),24 }, + { IPv4(204,89,56,0),24 }, + { IPv4(204,89,129,0),24 }, + { IPv4(204,89,132,0),23 }, + { IPv4(204,89,138,0),24 }, + { IPv4(204,89,139,0),24 }, + { IPv4(204,89,140,0),24 }, + { IPv4(204,89,144,0),24 }, + { IPv4(204,89,155,0),24 }, + { IPv4(204,89,163,0),24 }, + { IPv4(204,89,164,0),22 }, + { IPv4(204,89,172,0),24 }, + { IPv4(204,89,181,0),24 }, + { IPv4(204,89,187,0),24 }, + { IPv4(204,89,188,0),24 }, + { IPv4(204,89,197,0),24 }, + { IPv4(204,89,200,0),24 }, + { IPv4(204,89,216,0),24 }, + { IPv4(204,89,219,0),24 }, + { IPv4(204,89,226,0),24 }, + { IPv4(204,89,231,0),24 }, + { IPv4(204,89,244,0),24 }, + { IPv4(204,89,251,0),24 }, + { IPv4(204,89,254,0),24 }, + { IPv4(204,90,69,0),24 }, + { IPv4(204,90,78,0),24 }, + { IPv4(204,90,119,0),24 }, + { IPv4(204,90,120,0),23 }, + { IPv4(204,90,122,0),24 }, + { IPv4(204,90,181,0),24 }, + { IPv4(204,90,182,0),24 }, + { IPv4(204,91,11,0),24 }, + { IPv4(204,91,12,0),24 }, + { IPv4(204,91,139,0),24 }, + { IPv4(204,91,156,0),24 }, + { IPv4(204,92,43,0),24 }, + { IPv4(204,92,73,0),24 }, + { IPv4(204,92,91,0),24 }, + { IPv4(204,92,234,0),23 }, + { IPv4(204,92,254,0),24 }, + { IPv4(204,94,39,0),24 }, + { IPv4(204,94,40,0),21 }, + { IPv4(204,94,64,0),19 }, + { IPv4(204,94,112,0),21 }, + { IPv4(204,94,115,0),24 }, + { IPv4(204,94,118,0),24 }, + { IPv4(204,94,119,0),24 }, + { IPv4(204,94,129,0),24 }, + { IPv4(204,94,144,0),20 }, + { IPv4(204,94,248,0),23 }, + { IPv4(204,94,249,0),24 }, + { IPv4(204,95,160,0),19 }, + { IPv4(204,95,192,0),19 }, + { IPv4(204,96,112,0),24 }, + { IPv4(204,96,224,0),19 }, + { IPv4(204,97,2,0),24 }, + { IPv4(204,97,3,0),24 }, + { IPv4(204,97,32,0),19 }, + { IPv4(204,97,64,0),21 }, + { IPv4(204,97,89,0),24 }, + { IPv4(204,97,104,0),24 }, + { IPv4(204,98,0,0),16 }, + { IPv4(204,99,0,0),17 }, + { IPv4(204,99,128,0),18 }, + { IPv4(204,99,158,0),24 }, + { IPv4(204,99,160,0),24 }, + { IPv4(204,99,179,0),24 }, + { IPv4(204,99,192,0),19 }, + { IPv4(204,99,224,0),19 }, + { IPv4(204,101,30,0),24 }, + { IPv4(204,101,31,0),24 }, + { IPv4(204,101,33,0),24 }, + { IPv4(204,101,34,0),24 }, + { IPv4(204,101,106,0),24 }, + { IPv4(204,101,111,0),24 }, + { IPv4(204,101,113,0),24 }, + { IPv4(204,101,115,0),24 }, + { IPv4(204,101,118,0),24 }, + { IPv4(204,102,0,0),16 }, + { IPv4(204,102,10,0),23 }, + { IPv4(204,102,12,0),22 }, + { IPv4(204,102,16,0),20 }, + { IPv4(204,102,32,0),20 }, + { IPv4(204,102,48,0),21 }, + { IPv4(204,102,56,0),22 }, + { IPv4(204,102,60,0),23 }, + { IPv4(204,102,62,0),24 }, + { IPv4(204,102,115,0),24 }, + { IPv4(204,102,116,0),22 }, + { IPv4(204,102,120,0),21 }, + { IPv4(204,102,128,0),23 }, + { IPv4(204,102,234,0),23 }, + { IPv4(204,103,157,0),24 }, + { IPv4(204,103,158,0),24 }, + { IPv4(204,103,226,0),24 }, + { IPv4(204,104,55,0),24 }, + { IPv4(204,104,132,0),24 }, + { IPv4(204,104,133,0),24 }, + { IPv4(204,104,134,0),24 }, + { IPv4(204,104,135,0),24 }, + { IPv4(204,104,140,0),24 }, + { IPv4(204,106,32,0),19 }, + { IPv4(204,106,62,0),23 }, + { IPv4(204,107,60,0),22 }, + { IPv4(204,107,76,0),24 }, + { IPv4(204,107,77,0),24 }, + { IPv4(204,107,85,0),24 }, + { IPv4(204,107,91,0),24 }, + { IPv4(204,107,104,0),24 }, + { IPv4(204,107,105,0),24 }, + { IPv4(204,107,107,0),24 }, + { IPv4(204,107,109,0),24 }, + { IPv4(204,107,120,0),24 }, + { IPv4(204,107,129,0),24 }, + { IPv4(204,107,130,0),24 }, + { IPv4(204,107,133,0),24 }, + { IPv4(204,107,143,0),24 }, + { IPv4(204,107,154,0),24 }, + { IPv4(204,107,168,0),24 }, + { IPv4(204,107,178,0),24 }, + { IPv4(204,107,183,0),24 }, + { IPv4(204,107,200,0),24 }, + { IPv4(204,107,211,0),24 }, + { IPv4(204,107,232,0),24 }, + { IPv4(204,107,238,0),24 }, + { IPv4(204,107,242,0),24 }, + { IPv4(204,107,249,0),24 }, + { IPv4(204,107,252,0),24 }, + { IPv4(204,107,254,0),24 }, + { IPv4(204,108,0,0),21 }, + { IPv4(204,108,8,0),24 }, + { IPv4(204,108,9,0),24 }, + { IPv4(204,108,10,0),24 }, + { IPv4(204,108,16,0),24 }, + { IPv4(204,110,0,0),21 }, + { IPv4(204,110,135,0),24 }, + { IPv4(204,110,138,0),24 }, + { IPv4(204,110,164,0),24 }, + { IPv4(204,110,167,0),24 }, + { IPv4(204,110,169,0),24 }, + { IPv4(204,110,226,0),24 }, + { IPv4(204,112,48,0),22 }, + { IPv4(204,112,90,0),24 }, + { IPv4(204,112,91,0),24 }, + { IPv4(204,112,103,0),24 }, + { IPv4(204,112,108,0),24 }, + { IPv4(204,112,109,0),24 }, + { IPv4(204,112,122,0),24 }, + { IPv4(204,112,126,0),24 }, + { IPv4(204,112,130,0),23 }, + { IPv4(204,112,132,0),22 }, + { IPv4(204,112,158,0),24 }, + { IPv4(204,112,189,0),24 }, + { IPv4(204,112,235,0),24 }, + { IPv4(204,112,237,0),24 }, + { IPv4(204,113,0,0),16 }, + { IPv4(204,113,91,0),24 }, + { IPv4(204,113,123,0),24 }, + { IPv4(204,114,64,0),18 }, + { IPv4(204,114,253,0),24 }, + { IPv4(204,115,76,0),24 }, + { IPv4(204,115,88,0),24 }, + { IPv4(204,115,89,0),24 }, + { IPv4(204,115,90,0),24 }, + { IPv4(204,115,91,0),24 }, + { IPv4(204,115,92,0),24 }, + { IPv4(204,115,93,0),24 }, + { IPv4(204,115,94,0),24 }, + { IPv4(204,115,95,0),24 }, + { IPv4(204,115,96,0),24 }, + { IPv4(204,115,97,0),24 }, + { IPv4(204,115,98,0),24 }, + { IPv4(204,115,99,0),24 }, + { IPv4(204,115,121,0),24 }, + { IPv4(204,115,164,0),24 }, + { IPv4(204,115,215,0),24 }, + { IPv4(204,115,216,0),21 }, + { IPv4(204,115,224,0),24 }, + { IPv4(204,115,225,0),24 }, + { IPv4(204,115,226,0),24 }, + { IPv4(204,115,227,0),24 }, + { IPv4(204,115,228,0),24 }, + { IPv4(204,115,229,0),24 }, + { IPv4(204,115,230,0),24 }, + { IPv4(204,116,0,0),16 }, + { IPv4(204,117,91,0),24 }, + { IPv4(204,117,224,0),23 }, + { IPv4(204,117,240,0),20 }, + { IPv4(204,118,32,0),24 }, + { IPv4(204,118,52,0),22 }, + { IPv4(204,118,120,0),23 }, + { IPv4(204,118,174,0),24 }, + { IPv4(204,119,0,0),24 }, + { IPv4(204,119,1,0),24 }, + { IPv4(204,119,56,0),22 }, + { IPv4(204,119,64,0),18 }, + { IPv4(204,119,248,0),21 }, + { IPv4(204,119,249,0),24 }, + { IPv4(204,119,255,0),24 }, + { IPv4(204,120,8,0),21 }, + { IPv4(204,120,80,0),20 }, + { IPv4(204,120,138,0),24 }, + { IPv4(204,120,144,0),20 }, + { IPv4(204,121,0,0),16 }, + { IPv4(204,122,0,0),21 }, + { IPv4(204,123,0,0),16 }, + { IPv4(204,124,82,0),24 }, + { IPv4(204,124,85,0),24 }, + { IPv4(204,124,86,0),24 }, + { IPv4(204,124,92,0),24 }, + { IPv4(204,124,93,0),24 }, + { IPv4(204,124,104,0),24 }, + { IPv4(204,124,105,0),24 }, + { IPv4(204,124,106,0),24 }, + { IPv4(204,124,107,0),24 }, + { IPv4(204,124,116,0),24 }, + { IPv4(204,124,120,0),24 }, + { IPv4(204,124,121,0),24 }, + { IPv4(204,124,122,0),24 }, + { IPv4(204,124,123,0),24 }, + { IPv4(204,124,132,0),24 }, + { IPv4(204,124,133,0),24 }, + { IPv4(204,124,134,0),24 }, + { IPv4(204,124,135,0),24 }, + { IPv4(204,124,137,0),24 }, + { IPv4(204,124,160,0),22 }, + { IPv4(204,124,164,0),23 }, + { IPv4(204,124,166,0),24 }, + { IPv4(204,124,197,0),24 }, + { IPv4(204,124,208,0),23 }, + { IPv4(204,124,244,0),24 }, + { IPv4(204,124,245,0),24 }, + { IPv4(204,124,246,0),24 }, + { IPv4(204,124,247,0),24 }, + { IPv4(204,125,142,0),24 }, + { IPv4(204,126,22,0),24 }, + { IPv4(204,126,122,0),24 }, + { IPv4(204,126,123,0),24 }, + { IPv4(204,126,134,0),24 }, + { IPv4(204,126,135,0),24 }, + { IPv4(204,126,172,0),24 }, + { IPv4(204,126,173,0),24 }, + { IPv4(204,126,198,0),24 }, + { IPv4(204,126,199,0),24 }, + { IPv4(204,126,242,0),24 }, + { IPv4(204,126,243,0),24 }, + { IPv4(204,126,250,0),23 }, + { IPv4(204,126,250,0),24 }, + { IPv4(204,126,251,0),24 }, + { IPv4(204,126,254,0),23 }, + { IPv4(204,127,64,0),20 }, + { IPv4(204,127,128,0),17 }, + { IPv4(204,127,192,0),20 }, + { IPv4(204,128,32,0),20 }, + { IPv4(204,128,48,0),22 }, + { IPv4(204,128,147,0),24 }, + { IPv4(204,128,156,0),24 }, + { IPv4(204,128,158,0),24 }, + { IPv4(204,128,167,0),24 }, + { IPv4(204,128,175,0),24 }, + { IPv4(204,128,179,0),24 }, + { IPv4(204,128,180,0),24 }, + { IPv4(204,128,192,0),24 }, + { IPv4(204,128,199,0),24 }, + { IPv4(204,128,213,0),24 }, + { IPv4(204,128,215,0),24 }, + { IPv4(204,128,226,0),24 }, + { IPv4(204,128,227,0),24 }, + { IPv4(204,128,232,0),22 }, + { IPv4(204,128,232,0),24 }, + { IPv4(204,128,236,0),22 }, + { IPv4(204,130,138,0),24 }, + { IPv4(204,130,166,0),24 }, + { IPv4(204,130,176,0),24 }, + { IPv4(204,130,184,0),24 }, + { IPv4(204,130,185,0),24 }, + { IPv4(204,130,191,0),24 }, + { IPv4(204,130,198,0),24 }, + { IPv4(204,130,216,0),24 }, + { IPv4(204,130,226,0),23 }, + { IPv4(204,130,228,0),22 }, + { IPv4(204,130,232,0),22 }, + { IPv4(204,130,236,0),23 }, + { IPv4(204,130,244,0),24 }, + { IPv4(204,130,248,0),24 }, + { IPv4(204,131,0,0),16 }, + { IPv4(204,131,62,0),24 }, + { IPv4(204,131,105,0),24 }, + { IPv4(204,131,176,0),23 }, + { IPv4(204,131,188,0),24 }, + { IPv4(204,132,0,0),15 }, + { IPv4(204,132,148,0),24 }, + { IPv4(204,132,224,0),20 }, + { IPv4(204,133,127,0),24 }, + { IPv4(204,134,131,0),24 }, + { IPv4(204,134,132,0),24 }, + { IPv4(204,134,133,0),24 }, + { IPv4(204,134,135,0),24 }, + { IPv4(204,134,136,0),24 }, + { IPv4(204,134,137,0),24 }, + { IPv4(204,134,142,0),24 }, + { IPv4(204,134,144,0),24 }, + { IPv4(204,134,147,0),24 }, + { IPv4(204,134,150,0),24 }, + { IPv4(204,134,194,0),23 }, + { IPv4(204,134,210,0),24 }, + { IPv4(204,134,217,0),24 }, + { IPv4(204,134,219,0),24 }, + { IPv4(204,134,220,0),24 }, + { IPv4(204,134,240,0),23 }, + { IPv4(204,134,251,0),24 }, + { IPv4(204,134,252,0),22 }, + { IPv4(204,136,23,0),24 }, + { IPv4(204,136,24,0),23 }, + { IPv4(204,136,26,0),23 }, + { IPv4(204,136,28,0),23 }, + { IPv4(204,137,183,0),24 }, + { IPv4(204,137,199,0),24 }, + { IPv4(204,138,27,0),24 }, + { IPv4(204,138,44,0),22 }, + { IPv4(204,138,48,0),22 }, + { IPv4(204,138,52,0),22 }, + { IPv4(204,138,56,0),22 }, + { IPv4(204,138,68,0),24 }, + { IPv4(204,138,71,0),24 }, + { IPv4(204,138,91,0),24 }, + { IPv4(204,138,103,0),24 }, + { IPv4(204,138,108,0),24 }, + { IPv4(204,138,111,0),24 }, + { IPv4(204,138,115,0),24 }, + { IPv4(204,138,128,0),21 }, + { IPv4(204,138,135,0),24 }, + { IPv4(204,138,172,0),24 }, + { IPv4(204,138,236,0),24 }, + { IPv4(204,138,237,0),24 }, + { IPv4(204,138,239,0),24 }, + { IPv4(204,139,45,0),24 }, + { IPv4(204,139,64,0),18 }, + { IPv4(204,140,15,0),24 }, + { IPv4(204,140,32,0),19 }, + { IPv4(204,140,71,0),24 }, + { IPv4(204,140,172,0),23 }, + { IPv4(204,140,245,0),24 }, + { IPv4(204,141,0,0),16 }, + { IPv4(204,141,44,0),22 }, + { IPv4(204,141,64,0),20 }, + { IPv4(204,141,101,0),24 }, + { IPv4(204,141,207,0),24 }, + { IPv4(204,141,235,0),24 }, + { IPv4(204,142,0,0),15 }, + { IPv4(204,142,178,0),24 }, + { IPv4(204,143,35,0),24 }, + { IPv4(204,143,36,0),24 }, + { IPv4(204,143,156,0),24 }, + { IPv4(204,143,170,0),24 }, + { IPv4(204,144,32,0),20 }, + { IPv4(204,144,48,0),21 }, + { IPv4(204,144,56,0),24 }, + { IPv4(204,144,76,0),24 }, + { IPv4(204,144,106,0),24 }, + { IPv4(204,144,128,0),17 }, + { IPv4(204,144,128,0),24 }, + { IPv4(204,144,129,0),24 }, + { IPv4(204,144,130,0),24 }, + { IPv4(204,144,131,0),24 }, + { IPv4(204,144,132,0),24 }, + { IPv4(204,144,133,0),24 }, + { IPv4(204,144,140,0),24 }, + { IPv4(204,144,141,0),24 }, + { IPv4(204,144,168,0),24 }, + { IPv4(204,144,174,0),24 }, + { IPv4(204,144,179,0),24 }, + { IPv4(204,144,182,0),24 }, + { IPv4(204,144,184,0),24 }, + { IPv4(204,144,244,0),24 }, + { IPv4(204,145,119,0),24 }, + { IPv4(204,145,144,0),24 }, + { IPv4(204,145,147,0),24 }, + { IPv4(204,145,148,0),24 }, + { IPv4(204,145,160,0),24 }, + { IPv4(204,145,167,0),24 }, + { IPv4(204,145,171,0),24 }, + { IPv4(204,145,186,0),24 }, + { IPv4(204,145,211,0),24 }, + { IPv4(204,145,215,0),24 }, + { IPv4(204,145,225,0),24 }, + { IPv4(204,145,230,0),24 }, + { IPv4(204,145,255,0),24 }, + { IPv4(204,146,0,0),16 }, + { IPv4(204,146,19,0),24 }, + { IPv4(204,146,20,0),24 }, + { IPv4(204,146,21,0),24 }, + { IPv4(204,146,22,0),24 }, + { IPv4(204,146,23,0),24 }, + { IPv4(204,146,24,0),22 }, + { IPv4(204,146,50,0),24 }, + { IPv4(204,146,60,0),24 }, + { IPv4(204,146,86,0),24 }, + { IPv4(204,146,133,0),24 }, + { IPv4(204,146,134,0),24 }, + { IPv4(204,146,140,0),24 }, + { IPv4(204,146,150,0),24 }, + { IPv4(204,146,157,0),24 }, + { IPv4(204,146,159,0),24 }, + { IPv4(204,146,165,0),24 }, + { IPv4(204,146,167,0),24 }, + { IPv4(204,146,173,0),24 }, + { IPv4(204,146,179,0),24 }, + { IPv4(204,146,189,0),24 }, + { IPv4(204,146,209,0),24 }, + { IPv4(204,146,230,0),24 }, + { IPv4(204,146,231,0),24 }, + { IPv4(204,146,237,0),24 }, + { IPv4(204,147,16,0),20 }, + { IPv4(204,147,55,0),24 }, + { IPv4(204,148,71,0),24 }, + { IPv4(204,148,72,0),24 }, + { IPv4(204,148,80,0),21 }, + { IPv4(204,148,96,0),21 }, + { IPv4(204,148,108,0),24 }, + { IPv4(204,148,144,0),20 }, + { IPv4(204,148,160,0),20 }, + { IPv4(204,149,96,0),20 }, + { IPv4(204,149,112,0),21 }, + { IPv4(204,149,167,0),24 }, + { IPv4(204,151,57,0),24 }, + { IPv4(204,152,12,0),23 }, + { IPv4(204,152,24,0),24 }, + { IPv4(204,152,25,0),24 }, + { IPv4(204,152,42,0),24 }, + { IPv4(204,152,42,0),23 }, + { IPv4(204,152,43,0),24 }, + { IPv4(204,152,46,0),23 }, + { IPv4(204,152,48,0),24 }, + { IPv4(204,152,49,0),24 }, + { IPv4(204,152,56,0),23 }, + { IPv4(204,152,60,0),24 }, + { IPv4(204,152,70,0),23 }, + { IPv4(204,152,80,0),23 }, + { IPv4(204,152,98,0),24 }, + { IPv4(204,152,109,0),24 }, + { IPv4(204,152,114,0),23 }, + { IPv4(204,152,134,0),23 }, + { IPv4(204,152,142,0),24 }, + { IPv4(204,152,143,0),24 }, + { IPv4(204,152,156,0),22 }, + { IPv4(204,152,157,0),24 }, + { IPv4(204,152,159,0),24 }, + { IPv4(204,152,178,0),24 }, + { IPv4(204,152,184,0),21 }, + { IPv4(204,152,186,0),24 }, + { IPv4(204,152,187,0),24 }, + { IPv4(204,153,8,0),22 }, + { IPv4(204,153,49,0),24 }, + { IPv4(204,153,51,0),24 }, + { IPv4(204,153,60,0),24 }, + { IPv4(204,153,61,0),24 }, + { IPv4(204,153,62,0),24 }, + { IPv4(204,153,63,0),24 }, + { IPv4(204,153,68,0),24 }, + { IPv4(204,153,71,0),24 }, + { IPv4(204,153,96,0),22 }, + { IPv4(204,153,134,0),24 }, + { IPv4(204,153,155,0),24 }, + { IPv4(204,153,175,0),24 }, + { IPv4(204,153,198,0),24 }, + { IPv4(204,153,244,0),22 }, + { IPv4(204,154,32,0),21 }, + { IPv4(204,154,192,0),21 }, + { IPv4(204,154,228,0),24 }, + { IPv4(204,155,0,0),20 }, + { IPv4(204,155,16,0),21 }, + { IPv4(204,155,16,0),24 }, + { IPv4(204,155,24,0),23 }, + { IPv4(204,155,56,0),24 }, + { IPv4(204,155,96,0),20 }, + { IPv4(204,155,122,0),24 }, + { IPv4(204,155,141,0),24 }, + { IPv4(204,155,160,0),20 }, + { IPv4(204,155,226,0),24 }, + { IPv4(204,156,0,0),19 }, + { IPv4(204,156,78,0),24 }, + { IPv4(204,156,84,0),24 }, + { IPv4(204,156,96,0),20 }, + { IPv4(204,156,112,0),21 }, + { IPv4(204,156,120,0),24 }, + { IPv4(204,156,128,0),19 }, + { IPv4(204,157,11,0),24 }, + { IPv4(204,157,211,0),24 }, + { IPv4(204,157,238,0),24 }, + { IPv4(204,159,36,0),22 }, + { IPv4(204,159,107,0),24 }, + { IPv4(204,159,108,0),22 }, + { IPv4(204,162,80,0),21 }, + { IPv4(204,163,170,0),24 }, + { IPv4(204,164,98,0),23 }, + { IPv4(204,164,100,0),23 }, + { IPv4(204,165,17,0),24 }, + { IPv4(204,165,18,0),24 }, + { IPv4(204,168,0,0),16 }, + { IPv4(204,168,12,0),24 }, + { IPv4(204,168,16,0),24 }, + { IPv4(204,168,17,0),24 }, + { IPv4(204,168,18,0),24 }, + { IPv4(204,168,19,0),24 }, + { IPv4(204,168,20,0),24 }, + { IPv4(204,168,22,0),24 }, + { IPv4(204,168,23,0),24 }, + { IPv4(204,168,24,0),24 }, + { IPv4(204,168,25,0),24 }, + { IPv4(204,168,26,0),24 }, + { IPv4(204,168,27,0),24 }, + { IPv4(204,168,28,0),24 }, + { IPv4(204,168,29,0),24 }, + { IPv4(204,168,30,0),24 }, + { IPv4(204,168,31,0),24 }, + { IPv4(204,168,51,0),24 }, + { IPv4(204,168,52,0),22 }, + { IPv4(204,168,59,0),24 }, + { IPv4(204,168,62,0),24 }, + { IPv4(204,168,65,0),24 }, + { IPv4(204,168,66,0),24 }, + { IPv4(204,168,67,0),24 }, + { IPv4(204,168,68,0),24 }, + { IPv4(204,168,69,0),24 }, + { IPv4(204,168,70,0),24 }, + { IPv4(204,168,71,0),24 }, + { IPv4(204,168,72,0),24 }, + { IPv4(204,168,75,0),24 }, + { IPv4(204,168,86,0),24 }, + { IPv4(204,168,90,0),24 }, + { IPv4(204,168,91,0),24 }, + { IPv4(204,168,94,0),24 }, + { IPv4(204,168,95,0),24 }, + { IPv4(204,168,112,0),22 }, + { IPv4(204,168,113,0),24 }, + { IPv4(204,168,136,0),24 }, + { IPv4(204,168,150,0),24 }, + { IPv4(204,168,160,0),19 }, + { IPv4(204,169,0,0),16 }, + { IPv4(204,170,0,0),15 }, + { IPv4(204,170,22,0),24 }, + { IPv4(204,170,23,0),24 }, + { IPv4(204,170,37,0),24 }, + { IPv4(204,170,38,0),24 }, + { IPv4(204,171,184,0),24 }, + { IPv4(204,173,234,0),24 }, + { IPv4(204,174,101,0),24 }, + { IPv4(204,174,102,0),24 }, + { IPv4(204,174,112,0),24 }, + { IPv4(204,174,204,0),23 }, + { IPv4(204,174,223,0),24 }, + { IPv4(204,176,148,0),23 }, + { IPv4(204,176,177,0),24 }, + { IPv4(204,177,32,0),19 }, + { IPv4(204,177,80,0),21 }, + { IPv4(204,177,92,0),24 }, + { IPv4(204,177,93,0),24 }, + { IPv4(204,177,154,0),23 }, + { IPv4(204,178,176,0),23 }, + { IPv4(204,179,176,0),21 }, + { IPv4(204,179,240,0),24 }, + { IPv4(204,180,0,0),20 }, + { IPv4(204,180,230,0),23 }, + { IPv4(204,181,37,0),24 }, + { IPv4(204,181,116,0),24 }, + { IPv4(204,181,147,0),24 }, + { IPv4(204,181,149,0),24 }, + { IPv4(204,182,55,0),24 }, + { IPv4(204,182,56,0),24 }, + { IPv4(204,182,64,0),18 }, + { IPv4(204,182,144,0),24 }, + { IPv4(204,182,232,0),21 }, + { IPv4(204,183,80,0),20 }, + { IPv4(204,183,192,0),20 }, + { IPv4(204,183,205,0),24 }, + { IPv4(204,184,0,0),17 }, + { IPv4(204,184,128,0),18 }, + { IPv4(204,184,192,0),18 }, + { IPv4(204,185,0,0),19 }, + { IPv4(204,185,32,0),19 }, + { IPv4(204,185,64,0),18 }, + { IPv4(204,185,128,0),17 }, + { IPv4(204,186,0,0),16 }, + { IPv4(204,187,39,0),24 }, + { IPv4(204,187,48,0),24 }, + { IPv4(204,187,62,0),23 }, + { IPv4(204,187,65,0),24 }, + { IPv4(204,187,78,0),24 }, + { IPv4(204,187,87,0),24 }, + { IPv4(204,187,88,0),24 }, + { IPv4(204,187,89,0),24 }, + { IPv4(204,187,103,0),24 }, + { IPv4(204,187,104,0),24 }, + { IPv4(204,187,105,0),24 }, + { IPv4(204,187,133,0),24 }, + { IPv4(204,187,136,0),24 }, + { IPv4(204,187,138,0),24 }, + { IPv4(204,187,152,0),24 }, + { IPv4(204,189,34,0),24 }, + { IPv4(204,189,82,0),24 }, + { IPv4(204,189,94,0),23 }, + { IPv4(204,192,115,0),24 }, + { IPv4(204,192,127,0),24 }, + { IPv4(204,193,128,0),19 }, + { IPv4(204,193,140,0),22 }, + { IPv4(204,193,152,0),21 }, + { IPv4(204,194,14,0),24 }, + { IPv4(204,194,28,0),22 }, + { IPv4(204,194,64,0),21 }, + { IPv4(204,194,104,0),23 }, + { IPv4(204,194,106,0),23 }, + { IPv4(204,194,108,0),23 }, + { IPv4(204,194,176,0),21 }, + { IPv4(204,198,72,0),22 }, + { IPv4(204,198,76,0),23 }, + { IPv4(204,198,148,0),23 }, + { IPv4(204,198,249,0),24 }, + { IPv4(204,198,250,0),24 }, + { IPv4(204,200,0,0),14 }, + { IPv4(204,200,26,0),23 }, + { IPv4(204,200,103,0),24 }, + { IPv4(204,200,104,0),24 }, + { IPv4(204,200,106,0),24 }, + { IPv4(204,200,108,0),24 }, + { IPv4(204,200,120,0),24 }, + { IPv4(204,200,122,0),23 }, + { IPv4(204,200,130,0),23 }, + { IPv4(204,200,132,0),23 }, + { IPv4(204,200,134,0),24 }, + { IPv4(204,201,25,0),24 }, + { IPv4(204,201,36,0),22 }, + { IPv4(204,201,36,0),24 }, + { IPv4(204,201,37,0),24 }, + { IPv4(204,201,38,0),23 }, + { IPv4(204,201,38,0),24 }, + { IPv4(204,201,39,0),24 }, + { IPv4(204,201,232,0),21 }, + { IPv4(204,201,240,0),20 }, + { IPv4(204,203,20,0),24 }, + { IPv4(204,203,20,0),22 }, + { IPv4(204,203,21,0),24 }, + { IPv4(204,203,22,0),23 }, + { IPv4(204,203,22,0),24 }, + { IPv4(204,203,23,0),24 }, + { IPv4(204,203,32,0),24 }, + { IPv4(204,203,33,0),24 }, + { IPv4(204,203,48,0),23 }, + { IPv4(204,203,50,0),24 }, + { IPv4(204,208,0,0),16 }, + { IPv4(204,208,1,0),24 }, + { IPv4(204,208,2,0),23 }, + { IPv4(204,208,4,0),22 }, + { IPv4(204,208,8,0),21 }, + { IPv4(204,208,16,0),22 }, + { IPv4(204,208,28,0),22 }, + { IPv4(204,208,29,0),24 }, + { IPv4(204,208,31,0),24 }, + { IPv4(204,208,32,0),24 }, + { IPv4(204,208,33,0),24 }, + { IPv4(204,208,35,0),24 }, + { IPv4(204,208,37,0),24 }, + { IPv4(204,208,38,0),24 }, + { IPv4(204,208,40,0),24 }, + { IPv4(204,208,42,0),24 }, + { IPv4(204,208,57,0),24 }, + { IPv4(204,208,80,0),20 }, + { IPv4(204,208,94,0),24 }, + { IPv4(204,208,115,0),24 }, + { IPv4(204,208,168,0),24 }, + { IPv4(204,208,192,0),24 }, + { IPv4(204,208,193,0),24 }, + { IPv4(204,208,194,0),24 }, + { IPv4(204,208,195,0),24 }, + { IPv4(204,208,196,0),24 }, + { IPv4(204,208,197,0),24 }, + { IPv4(204,208,198,0),24 }, + { IPv4(204,208,199,0),24 }, + { IPv4(204,208,203,0),24 }, + { IPv4(204,208,206,0),24 }, + { IPv4(204,208,207,0),24 }, + { IPv4(204,208,208,0),24 }, + { IPv4(204,208,209,0),24 }, + { IPv4(204,208,213,0),24 }, + { IPv4(204,208,215,0),24 }, + { IPv4(204,208,217,0),24 }, + { IPv4(204,208,218,0),24 }, + { IPv4(204,208,219,0),24 }, + { IPv4(204,208,220,0),24 }, + { IPv4(204,208,221,0),24 }, + { IPv4(204,208,222,0),24 }, + { IPv4(204,209,6,0),24 }, + { IPv4(204,209,13,0),24 }, + { IPv4(204,209,14,0),23 }, + { IPv4(204,209,44,0),24 }, + { IPv4(204,209,45,0),24 }, + { IPv4(204,209,52,0),24 }, + { IPv4(204,209,53,0),24 }, + { IPv4(204,209,54,0),24 }, + { IPv4(204,209,55,0),24 }, + { IPv4(204,209,114,0),24 }, + { IPv4(204,209,115,0),24 }, + { IPv4(204,209,136,0),24 }, + { IPv4(204,209,148,0),24 }, + { IPv4(204,209,158,0),24 }, + { IPv4(204,209,186,0),24 }, + { IPv4(204,209,208,0),21 }, + { IPv4(204,210,0,0),20 }, + { IPv4(204,210,16,0),20 }, + { IPv4(204,210,32,0),20 }, + { IPv4(204,210,48,0),20 }, + { IPv4(204,210,64,0),19 }, + { IPv4(204,210,96,0),19 }, + { IPv4(204,210,128,0),19 }, + { IPv4(204,210,160,0),20 }, + { IPv4(204,210,176,0),20 }, + { IPv4(204,210,192,0),19 }, + { IPv4(204,210,224,0),20 }, + { IPv4(204,210,240,0),21 }, + { IPv4(204,210,248,0),22 }, + { IPv4(204,210,252,0),24 }, + { IPv4(204,212,128,0),19 }, + { IPv4(204,212,160,0),22 }, + { IPv4(204,212,161,0),24 }, + { IPv4(204,212,163,0),24 }, + { IPv4(204,213,88,0),23 }, + { IPv4(204,213,90,0),23 }, + { IPv4(204,213,94,0),23 }, + { IPv4(204,213,96,0),20 }, + { IPv4(204,213,176,0),20 }, + { IPv4(204,214,7,0),24 }, + { IPv4(204,214,144,0),20 }, + { IPv4(204,216,0,0),17 }, + { IPv4(204,216,17,0),24 }, + { IPv4(204,216,18,0),24 }, + { IPv4(204,216,19,0),24 }, + { IPv4(204,216,20,0),24 }, + { IPv4(204,216,97,0),24 }, + { IPv4(204,216,101,0),24 }, + { IPv4(204,216,102,0),24 }, + { IPv4(204,216,103,0),24 }, + { IPv4(204,216,128,0),17 }, + { IPv4(204,218,0,0),15 }, + { IPv4(204,219,0,0),17 }, + { IPv4(204,220,0,0),15 }, + { IPv4(204,220,39,0),24 }, + { IPv4(204,220,64,0),18 }, + { IPv4(204,220,128,0),22 }, + { IPv4(204,220,160,0),19 }, + { IPv4(204,220,179,0),24 }, + { IPv4(204,220,181,0),24 }, + { IPv4(204,220,182,0),24 }, + { IPv4(204,220,183,0),24 }, + { IPv4(204,220,184,0),24 }, + { IPv4(204,220,188,0),24 }, + { IPv4(204,220,189,0),24 }, + { IPv4(204,220,190,0),24 }, + { IPv4(204,220,192,0),21 }, + { IPv4(204,220,200,0),21 }, + { IPv4(204,220,208,0),21 }, + { IPv4(204,220,216,0),21 }, + { IPv4(204,221,36,0),22 }, + { IPv4(204,221,76,0),24 }, + { IPv4(204,221,240,0),21 }, + { IPv4(204,222,0,0),18 }, + { IPv4(204,222,0,0),23 }, + { IPv4(204,222,2,0),24 }, + { IPv4(204,222,6,0),23 }, + { IPv4(204,222,8,0),23 }, + { IPv4(204,222,20,0),22 }, + { IPv4(204,222,24,0),24 }, + { IPv4(204,222,25,0),24 }, + { IPv4(204,222,26,0),24 }, + { IPv4(204,222,27,0),24 }, + { IPv4(204,222,32,0),24 }, + { IPv4(204,222,33,0),24 }, + { IPv4(204,222,34,0),24 }, + { IPv4(204,222,35,0),24 }, + { IPv4(204,222,36,0),24 }, + { IPv4(204,222,37,0),24 }, + { IPv4(204,222,38,0),24 }, + { IPv4(204,222,39,0),24 }, + { IPv4(204,222,40,0),24 }, + { IPv4(204,222,42,0),24 }, + { IPv4(204,222,43,0),24 }, + { IPv4(204,222,44,0),24 }, + { IPv4(204,222,45,0),24 }, + { IPv4(204,222,46,0),24 }, + { IPv4(204,222,47,0),24 }, + { IPv4(204,222,64,0),21 }, + { IPv4(204,222,72,0),22 }, + { IPv4(204,222,76,0),24 }, + { IPv4(204,222,77,0),24 }, + { IPv4(204,222,80,0),21 }, + { IPv4(204,222,88,0),22 }, + { IPv4(204,222,96,0),19 }, + { IPv4(204,222,142,0),23 }, + { IPv4(204,222,143,0),24 }, + { IPv4(204,222,144,0),23 }, + { IPv4(204,222,144,0),20 }, + { IPv4(204,222,146,0),24 }, + { IPv4(204,222,149,0),24 }, + { IPv4(204,222,158,0),24 }, + { IPv4(204,222,159,0),24 }, + { IPv4(204,222,160,0),24 }, + { IPv4(204,222,160,0),19 }, + { IPv4(204,222,163,0),24 }, + { IPv4(204,222,167,0),24 }, + { IPv4(204,222,168,0),24 }, + { IPv4(204,222,169,0),24 }, + { IPv4(204,222,170,0),24 }, + { IPv4(204,222,173,0),24 }, + { IPv4(204,222,176,0),24 }, + { IPv4(204,222,177,0),24 }, + { IPv4(204,222,178,0),24 }, + { IPv4(204,222,179,0),24 }, + { IPv4(204,222,192,0),18 }, + { IPv4(204,222,214,0),23 }, + { IPv4(204,222,220,0),24 }, + { IPv4(204,222,221,0),24 }, + { IPv4(204,222,228,0),24 }, + { IPv4(204,222,229,0),24 }, + { IPv4(204,222,230,0),23 }, + { IPv4(204,222,232,0),21 }, + { IPv4(204,222,250,0),23 }, + { IPv4(204,222,252,0),23 }, + { IPv4(204,223,28,0),24 }, + { IPv4(204,223,30,0),23 }, + { IPv4(204,223,32,0),19 }, + { IPv4(204,223,64,0),18 }, + { IPv4(204,223,128,0),17 }, + { IPv4(204,225,32,0),24 }, + { IPv4(204,225,46,0),24 }, + { IPv4(204,225,47,0),24 }, + { IPv4(204,225,48,0),24 }, + { IPv4(204,225,60,0),22 }, + { IPv4(204,225,64,0),24 }, + { IPv4(204,225,84,0),22 }, + { IPv4(204,225,119,0),24 }, + { IPv4(204,225,134,0),24 }, + { IPv4(204,225,139,0),24 }, + { IPv4(204,225,140,0),24 }, + { IPv4(204,225,141,0),24 }, + { IPv4(204,225,144,0),24 }, + { IPv4(204,225,145,0),24 }, + { IPv4(204,225,156,0),24 }, + { IPv4(204,225,163,0),24 }, + { IPv4(204,225,176,0),24 }, + { IPv4(204,225,177,0),24 }, + { IPv4(204,225,186,0),24 }, + { IPv4(204,225,188,0),23 }, + { IPv4(204,225,218,0),24 }, + { IPv4(204,227,128,0),19 }, + { IPv4(204,227,160,0),19 }, + { IPv4(204,227,161,0),24 }, + { IPv4(204,227,174,0),24 }, + { IPv4(204,228,8,0),21 }, + { IPv4(204,228,21,0),24 }, + { IPv4(204,228,22,0),23 }, + { IPv4(204,228,24,0),21 }, + { IPv4(204,228,27,0),24 }, + { IPv4(204,228,28,0),24 }, + { IPv4(204,228,29,0),24 }, + { IPv4(204,228,64,0),24 }, + { IPv4(204,228,64,0),18 }, + { IPv4(204,228,67,0),24 }, + { IPv4(204,228,68,0),24 }, + { IPv4(204,228,69,0),24 }, + { IPv4(204,228,71,0),24 }, + { IPv4(204,228,78,0),24 }, + { IPv4(204,228,80,0),24 }, + { IPv4(204,228,82,0),24 }, + { IPv4(204,228,89,0),24 }, + { IPv4(204,228,128,0),19 }, + { IPv4(204,228,192,0),22 }, + { IPv4(204,228,203,0),24 }, + { IPv4(204,228,204,0),24 }, + { IPv4(204,228,208,0),23 }, + { IPv4(204,228,210,0),23 }, + { IPv4(204,228,212,0),24 }, + { IPv4(204,229,0,0),18 }, + { IPv4(204,229,36,0),24 }, + { IPv4(204,229,39,0),24 }, + { IPv4(204,229,40,0),24 }, + { IPv4(204,229,41,0),24 }, + { IPv4(204,229,42,0),23 }, + { IPv4(204,229,44,0),23 }, + { IPv4(204,229,182,0),24 }, + { IPv4(204,229,192,0),18 }, + { IPv4(204,229,192,0),21 }, + { IPv4(204,229,200,0),24 }, + { IPv4(204,229,201,0),24 }, + { IPv4(204,229,204,0),22 }, + { IPv4(204,229,219,0),24 }, + { IPv4(204,229,220,0),22 }, + { IPv4(204,229,224,0),22 }, + { IPv4(204,229,234,0),24 }, + { IPv4(204,229,236,0),22 }, + { IPv4(204,231,97,0),24 }, + { IPv4(204,231,110,0),23 }, + { IPv4(204,231,238,0),24 }, + { IPv4(204,233,0,0),16 }, + { IPv4(204,233,170,0),24 }, + { IPv4(204,233,172,0),22 }, + { IPv4(204,235,32,0),21 }, + { IPv4(204,235,40,0),22 }, + { IPv4(204,235,80,0),20 }, + { IPv4(204,235,196,0),24 }, + { IPv4(204,235,224,0),20 }, + { IPv4(204,235,245,0),24 }, + { IPv4(204,238,10,0),24 }, + { IPv4(204,238,15,0),24 }, + { IPv4(204,238,18,0),24 }, + { IPv4(204,238,23,0),24 }, + { IPv4(204,238,24,0),23 }, + { IPv4(204,238,26,0),24 }, + { IPv4(204,238,32,0),24 }, + { IPv4(204,238,37,0),24 }, + { IPv4(204,238,56,0),24 }, + { IPv4(204,238,98,0),24 }, + { IPv4(204,238,107,0),24 }, + { IPv4(204,238,120,0),24 }, + { IPv4(204,238,126,0),24 }, + { IPv4(204,238,129,0),24 }, + { IPv4(204,238,141,0),24 }, + { IPv4(204,238,143,0),24 }, + { IPv4(204,238,151,0),24 }, + { IPv4(204,238,153,0),24 }, + { IPv4(204,238,202,0),24 }, + { IPv4(204,238,211,0),24 }, + { IPv4(204,238,213,0),24 }, + { IPv4(204,238,217,0),24 }, + { IPv4(204,238,232,0),24 }, + { IPv4(204,238,237,0),24 }, + { IPv4(204,239,68,0),24 }, + { IPv4(204,239,123,0),24 }, + { IPv4(204,239,136,0),23 }, + { IPv4(204,239,179,0),24 }, + { IPv4(204,239,214,0),24 }, + { IPv4(204,244,0,0),16 }, + { IPv4(204,244,24,0),21 }, + { IPv4(204,245,128,0),17 }, + { IPv4(204,246,64,0),18 }, + { IPv4(204,246,128,0),20 }, + { IPv4(204,246,144,0),21 }, + { IPv4(204,246,147,0),24 }, + { IPv4(204,247,0,0),16 }, + { IPv4(204,248,29,0),24 }, + { IPv4(204,248,30,0),24 }, + { IPv4(204,248,128,0),20 }, + { IPv4(204,248,175,0),24 }, + { IPv4(204,248,192,0),21 }, + { IPv4(204,248,220,0),24 }, + { IPv4(204,248,221,0),24 }, + { IPv4(204,248,222,0),24 }, + { IPv4(204,248,223,0),24 }, + { IPv4(204,249,48,0),20 }, + { IPv4(204,249,49,0),24 }, + { IPv4(204,249,50,0),24 }, + { IPv4(204,249,51,0),24 }, + { IPv4(204,249,58,0),24 }, + { IPv4(204,249,62,0),24 }, + { IPv4(204,249,63,0),24 }, + { IPv4(204,249,74,0),24 }, + { IPv4(204,249,160,0),22 }, + { IPv4(204,249,232,0),24 }, + { IPv4(204,249,233,0),24 }, + { IPv4(204,250,96,0),20 }, + { IPv4(204,250,125,0),24 }, + { IPv4(204,250,126,0),24 }, + { IPv4(204,250,155,0),24 }, + { IPv4(204,250,160,0),19 }, + { IPv4(204,251,64,0),21 }, + { IPv4(204,251,168,0),22 }, + { IPv4(204,251,188,0),22 }, + { IPv4(204,251,189,0),24 }, + { IPv4(204,252,0,0),22 }, + { IPv4(204,252,74,0),24 }, + { IPv4(204,252,112,0),20 }, + { IPv4(204,252,224,0),20 }, + { IPv4(204,253,8,0),21 }, + { IPv4(204,253,8,0),24 }, + { IPv4(204,253,9,0),24 }, + { IPv4(204,253,10,0),24 }, + { IPv4(204,253,11,0),24 }, + { IPv4(204,253,12,0),24 }, + { IPv4(204,253,13,0),24 }, + { IPv4(204,253,14,0),24 }, + { IPv4(204,253,15,0),24 }, + { IPv4(204,253,83,0),24 }, + { IPv4(204,253,128,0),22 }, + { IPv4(204,253,151,0),24 }, + { IPv4(204,253,168,0),21 }, + { IPv4(204,254,32,0),24 }, + { IPv4(204,254,60,0),24 }, + { IPv4(204,254,61,0),24 }, + { IPv4(204,254,94,0),24 }, + { IPv4(204,254,120,0),21 }, + { IPv4(204,254,168,0),24 }, + { IPv4(204,254,170,0),24 }, + { IPv4(204,254,224,0),21 }, + { IPv4(204,255,32,0),24 }, + { IPv4(204,255,34,0),24 }, + { IPv4(204,255,42,0),24 }, + { IPv4(204,255,43,0),24 }, + { IPv4(204,255,44,0),24 }, + { IPv4(204,255,45,0),24 }, + { IPv4(204,255,50,0),24 }, + { IPv4(204,255,51,0),24 }, + { IPv4(204,255,56,0),24 }, + { IPv4(204,255,57,0),24 }, + { IPv4(204,255,177,0),24 }, + { IPv4(204,255,200,0),21 }, + { IPv4(204,255,224,0),20 }, + { IPv4(204,255,244,0),23 }, + { IPv4(205,56,0,0),13 }, + { IPv4(205,56,144,0),24 }, + { IPv4(205,56,145,0),24 }, + { IPv4(205,56,150,0),24 }, + { IPv4(205,57,192,0),22 }, + { IPv4(205,57,196,0),24 }, + { IPv4(205,62,14,0),24 }, + { IPv4(205,64,0,0),11 }, + { IPv4(205,65,129,0),24 }, + { IPv4(205,66,84,0),24 }, + { IPv4(205,66,100,0),22 }, + { IPv4(205,66,105,0),24 }, + { IPv4(205,66,107,0),24 }, + { IPv4(205,66,110,0),24 }, + { IPv4(205,66,111,0),24 }, + { IPv4(205,66,112,0),24 }, + { IPv4(205,66,113,0),24 }, + { IPv4(205,66,118,0),24 }, + { IPv4(205,66,240,0),24 }, + { IPv4(205,67,206,0),24 }, + { IPv4(205,67,207,0),24 }, + { IPv4(205,67,218,0),24 }, + { IPv4(205,67,223,0),24 }, + { IPv4(205,67,231,0),24 }, + { IPv4(205,67,232,0),24 }, + { IPv4(205,67,252,0),24 }, + { IPv4(205,67,255,0),24 }, + { IPv4(205,68,66,0),24 }, + { IPv4(205,68,69,0),24 }, + { IPv4(205,68,76,0),24 }, + { IPv4(205,68,89,0),24 }, + { IPv4(205,68,90,0),24 }, + { IPv4(205,68,93,0),24 }, + { IPv4(205,68,94,0),24 }, + { IPv4(205,68,95,0),24 }, + { IPv4(205,68,103,0),24 }, + { IPv4(205,69,124,0),24 }, + { IPv4(205,69,192,0),20 }, + { IPv4(205,69,208,0),21 }, + { IPv4(205,69,221,0),24 }, + { IPv4(205,69,224,0),24 }, + { IPv4(205,69,225,0),24 }, + { IPv4(205,69,226,0),24 }, + { IPv4(205,69,227,0),24 }, + { IPv4(205,69,228,0),24 }, + { IPv4(205,70,64,0),24 }, + { IPv4(205,70,65,0),24 }, + { IPv4(205,70,67,0),24 }, + { IPv4(205,70,96,0),21 }, + { IPv4(205,70,104,0),22 }, + { IPv4(205,70,108,0),23 }, + { IPv4(205,76,0,0),24 }, + { IPv4(205,76,1,0),24 }, + { IPv4(205,76,6,0),24 }, + { IPv4(205,76,7,0),24 }, + { IPv4(205,76,8,0),24 }, + { IPv4(205,76,9,0),24 }, + { IPv4(205,76,10,0),24 }, + { IPv4(205,76,11,0),24 }, + { IPv4(205,76,12,0),24 }, + { IPv4(205,76,13,0),24 }, + { IPv4(205,89,128,0),24 }, + { IPv4(205,94,129,0),24 }, + { IPv4(205,94,130,0),24 }, + { IPv4(205,94,131,0),24 }, + { IPv4(205,94,132,0),24 }, + { IPv4(205,96,0,0),13 }, + { IPv4(205,101,96,0),24 }, + { IPv4(205,101,97,0),24 }, + { IPv4(205,101,98,0),24 }, + { IPv4(205,101,99,0),24 }, + { IPv4(205,101,100,0),24 }, + { IPv4(205,101,224,0),24 }, + { IPv4(205,102,128,0),24 }, + { IPv4(205,102,129,0),24 }, + { IPv4(205,103,84,0),24 }, + { IPv4(205,104,0,0),15 }, + { IPv4(205,106,0,0),15 }, + { IPv4(205,106,16,0),24 }, + { IPv4(205,106,75,0),24 }, + { IPv4(205,106,220,0),24 }, + { IPv4(205,107,0,0),17 }, + { IPv4(205,107,192,0),19 }, + { IPv4(205,108,0,0),15 }, + { IPv4(205,108,36,0),24 }, + { IPv4(205,109,23,0),24 }, + { IPv4(205,109,24,0),24 }, + { IPv4(205,109,56,0),21 }, + { IPv4(205,109,64,0),22 }, + { IPv4(205,109,192,0),24 }, + { IPv4(205,109,224,0),19 }, + { IPv4(205,110,0,0),24 }, + { IPv4(205,110,0,0),16 }, + { IPv4(205,110,1,0),24 }, + { IPv4(205,110,2,0),24 }, + { IPv4(205,110,3,0),24 }, + { IPv4(205,110,4,0),24 }, + { IPv4(205,110,5,0),24 }, + { IPv4(205,110,6,0),24 }, + { IPv4(205,110,7,0),24 }, + { IPv4(205,110,8,0),24 }, + { IPv4(205,110,9,0),24 }, + { IPv4(205,110,10,0),24 }, + { IPv4(205,110,11,0),24 }, + { IPv4(205,110,12,0),24 }, + { IPv4(205,110,13,0),24 }, + { IPv4(205,110,14,0),24 }, + { IPv4(205,110,15,0),24 }, + { IPv4(205,110,16,0),24 }, + { IPv4(205,110,17,0),24 }, + { IPv4(205,110,18,0),24 }, + { IPv4(205,110,19,0),24 }, + { IPv4(205,110,20,0),24 }, + { IPv4(205,110,21,0),24 }, + { IPv4(205,110,22,0),24 }, + { IPv4(205,110,23,0),24 }, + { IPv4(205,110,24,0),24 }, + { IPv4(205,110,25,0),24 }, + { IPv4(205,110,26,0),24 }, + { IPv4(205,110,27,0),24 }, + { IPv4(205,110,28,0),24 }, + { IPv4(205,110,29,0),24 }, + { IPv4(205,110,30,0),24 }, + { IPv4(205,110,31,0),24 }, + { IPv4(205,110,205,0),24 }, + { IPv4(205,110,206,0),24 }, + { IPv4(205,110,224,0),24 }, + { IPv4(205,110,225,0),24 }, + { IPv4(205,113,0,0),16 }, + { IPv4(205,115,0,0),16 }, + { IPv4(205,118,0,0),15 }, + { IPv4(205,120,0,0),13 }, + { IPv4(205,124,237,0),24 }, + { IPv4(205,124,245,0),24 }, + { IPv4(205,127,29,0),24 }, + { IPv4(205,127,253,0),24 }, + { IPv4(205,128,8,0),22 }, + { IPv4(205,132,8,0),24 }, + { IPv4(205,132,16,0),21 }, + { IPv4(205,132,73,0),24 }, + { IPv4(205,132,74,0),24 }, + { IPv4(205,132,75,0),24 }, + { IPv4(205,132,76,0),24 }, + { IPv4(205,132,82,0),24 }, + { IPv4(205,132,83,0),24 }, + { IPv4(205,132,173,0),24 }, + { IPv4(205,132,174,0),24 }, + { IPv4(205,132,175,0),24 }, + { IPv4(205,132,224,0),24 }, + { IPv4(205,132,225,0),24 }, + { IPv4(205,132,226,0),24 }, + { IPv4(205,132,227,0),24 }, + { IPv4(205,132,228,0),24 }, + { IPv4(205,132,229,0),24 }, + { IPv4(205,132,230,0),24 }, + { IPv4(205,132,231,0),24 }, + { IPv4(205,132,232,0),24 }, + { IPv4(205,132,233,0),24 }, + { IPv4(205,132,248,0),21 }, + { IPv4(205,136,35,0),24 }, + { IPv4(205,136,46,0),24 }, + { IPv4(205,136,49,0),24 }, + { IPv4(205,136,56,0),24 }, + { IPv4(205,136,60,0),24 }, + { IPv4(205,136,61,0),24 }, + { IPv4(205,136,119,0),24 }, + { IPv4(205,136,158,0),24 }, + { IPv4(205,136,164,0),22 }, + { IPv4(205,136,180,0),23 }, + { IPv4(205,136,182,0),23 }, + { IPv4(205,136,205,0),24 }, + { IPv4(205,136,213,0),24 }, + { IPv4(205,136,224,0),24 }, + { IPv4(205,136,246,0),23 }, + { IPv4(205,136,248,0),23 }, + { IPv4(205,137,96,0),24 }, + { IPv4(205,137,96,0),20 }, + { IPv4(205,137,176,0),20 }, + { IPv4(205,138,133,0),24 }, + { IPv4(205,138,134,0),24 }, + { IPv4(205,138,135,0),24 }, + { IPv4(205,138,136,0),24 }, + { IPv4(205,138,137,0),24 }, + { IPv4(205,138,138,0),24 }, + { IPv4(205,138,230,0),24 }, + { IPv4(205,139,0,0),23 }, + { IPv4(205,139,50,0),23 }, + { IPv4(205,139,96,0),23 }, + { IPv4(205,139,102,0),23 }, + { IPv4(205,139,106,0),23 }, + { IPv4(205,139,120,0),22 }, + { IPv4(205,139,124,0),24 }, + { IPv4(205,139,140,0),23 }, + { IPv4(205,139,189,0),24 }, + { IPv4(205,139,224,0),22 }, + { IPv4(205,140,14,0),23 }, + { IPv4(205,140,126,0),24 }, + { IPv4(205,140,164,0),22 }, + { IPv4(205,140,192,0),19 }, + { IPv4(205,141,128,0),18 }, + { IPv4(205,142,56,0),22 }, + { IPv4(205,142,80,0),22 }, + { IPv4(205,142,96,0),22 }, + { IPv4(205,142,108,0),23 }, + { IPv4(205,142,124,0),22 }, + { IPv4(205,142,149,0),24 }, + { IPv4(205,142,150,0),24 }, + { IPv4(205,142,151,0),24 }, + { IPv4(205,142,164,0),24 }, + { IPv4(205,142,176,0),24 }, + { IPv4(205,142,177,0),24 }, + { IPv4(205,142,188,0),22 }, + { IPv4(205,142,196,0),24 }, + { IPv4(205,142,197,0),24 }, + { IPv4(205,142,198,0),24 }, + { IPv4(205,142,199,0),24 }, + { IPv4(205,142,204,0),24 }, + { IPv4(205,142,205,0),24 }, + { IPv4(205,142,206,0),24 }, + { IPv4(205,142,207,0),24 }, + { IPv4(205,142,236,0),24 }, + { IPv4(205,142,237,0),24 }, + { IPv4(205,142,238,0),24 }, + { IPv4(205,142,239,0),24 }, + { IPv4(205,143,37,0),24 }, + { IPv4(205,143,50,0),23 }, + { IPv4(205,143,52,0),22 }, + { IPv4(205,143,64,0),21 }, + { IPv4(205,143,88,0),21 }, + { IPv4(205,143,100,0),23 }, + { IPv4(205,143,103,0),24 }, + { IPv4(205,143,192,0),21 }, + { IPv4(205,143,200,0),24 }, + { IPv4(205,143,201,0),24 }, + { IPv4(205,143,202,0),24 }, + { IPv4(205,143,203,0),24 }, + { IPv4(205,143,204,0),24 }, + { IPv4(205,143,205,0),24 }, + { IPv4(205,143,207,0),24 }, + { IPv4(205,143,208,0),21 }, + { IPv4(205,143,232,0),21 }, + { IPv4(205,143,248,0),21 }, + { IPv4(205,143,248,0),24 }, + { IPv4(205,143,249,0),24 }, + { IPv4(205,143,250,0),24 }, + { IPv4(205,143,251,0),24 }, + { IPv4(205,143,252,0),24 }, + { IPv4(205,143,253,0),24 }, + { IPv4(205,143,254,0),24 }, + { IPv4(205,143,255,0),24 }, + { IPv4(205,144,99,0),24 }, + { IPv4(205,144,100,0),24 }, + { IPv4(205,144,101,0),24 }, + { IPv4(205,144,106,0),24 }, + { IPv4(205,144,113,0),24 }, + { IPv4(205,144,122,0),24 }, + { IPv4(205,144,123,0),24 }, + { IPv4(205,144,125,0),24 }, + { IPv4(205,144,126,0),24 }, + { IPv4(205,144,146,0),24 }, + { IPv4(205,144,222,0),24 }, + { IPv4(205,144,223,0),24 }, + { IPv4(205,144,225,0),24 }, + { IPv4(205,145,64,0),24 }, + { IPv4(205,145,102,0),24 }, + { IPv4(205,145,158,0),24 }, + { IPv4(205,145,161,0),24 }, + { IPv4(205,145,185,0),24 }, + { IPv4(205,145,186,0),24 }, + { IPv4(205,146,0,0),16 }, + { IPv4(205,146,78,0),23 }, + { IPv4(205,146,148,0),22 }, + { IPv4(205,146,152,0),24 }, + { IPv4(205,147,0,0),18 }, + { IPv4(205,147,128,0),19 }, + { IPv4(205,147,149,0),24 }, + { IPv4(205,147,160,0),19 }, + { IPv4(205,147,192,0),18 }, + { IPv4(205,148,0,0),18 }, + { IPv4(205,148,123,0),24 }, + { IPv4(205,148,125,0),24 }, + { IPv4(205,148,184,0),24 }, + { IPv4(205,148,192,0),18 }, + { IPv4(205,148,225,0),24 }, + { IPv4(205,148,233,0),24 }, + { IPv4(205,149,0,0),21 }, + { IPv4(205,149,120,0),22 }, + { IPv4(205,149,124,0),23 }, + { IPv4(205,149,160,0),19 }, + { IPv4(205,150,42,0),24 }, + { IPv4(205,150,88,0),24 }, + { IPv4(205,150,101,0),24 }, + { IPv4(205,150,136,0),24 }, + { IPv4(205,150,142,0),24 }, + { IPv4(205,150,203,0),24 }, + { IPv4(205,150,218,0),24 }, + { IPv4(205,150,247,0),24 }, + { IPv4(205,150,248,0),24 }, + { IPv4(205,151,82,0),24 }, + { IPv4(205,151,103,0),24 }, + { IPv4(205,151,126,0),23 }, + { IPv4(205,151,179,0),24 }, + { IPv4(205,151,192,0),20 }, + { IPv4(205,153,9,0),24 }, + { IPv4(205,153,10,0),24 }, + { IPv4(205,153,11,0),24 }, + { IPv4(205,153,47,0),24 }, + { IPv4(205,153,60,0),22 }, + { IPv4(205,153,68,0),22 }, + { IPv4(205,153,88,0),22 }, + { IPv4(205,153,196,0),22 }, + { IPv4(205,153,248,0),22 }, + { IPv4(205,154,0,0),16 }, + { IPv4(205,154,0,0),19 }, + { IPv4(205,154,32,0),20 }, + { IPv4(205,154,48,0),23 }, + { IPv4(205,154,160,0),19 }, + { IPv4(205,155,0,0),16 }, + { IPv4(205,156,177,0),24 }, + { IPv4(205,157,65,0),24 }, + { IPv4(205,157,69,0),24 }, + { IPv4(205,157,74,0),24 }, + { IPv4(205,157,85,0),24 }, + { IPv4(205,157,90,0),24 }, + { IPv4(205,157,102,0),24 }, + { IPv4(205,157,103,0),24 }, + { IPv4(205,157,104,0),24 }, + { IPv4(205,157,105,0),24 }, + { IPv4(205,157,128,0),20 }, + { IPv4(205,157,160,0),19 }, + { IPv4(205,158,0,0),16 }, + { IPv4(205,158,160,0),23 }, + { IPv4(205,158,184,0),24 }, + { IPv4(205,159,1,0),24 }, + { IPv4(205,159,16,0),24 }, + { IPv4(205,159,27,0),24 }, + { IPv4(205,159,28,0),24 }, + { IPv4(205,159,81,0),24 }, + { IPv4(205,159,83,0),24 }, + { IPv4(205,159,90,0),24 }, + { IPv4(205,159,126,0),24 }, + { IPv4(205,159,132,0),24 }, + { IPv4(205,159,147,0),24 }, + { IPv4(205,159,151,0),24 }, + { IPv4(205,159,154,0),24 }, + { IPv4(205,159,169,0),24 }, + { IPv4(205,159,173,0),24 }, + { IPv4(205,159,176,0),24 }, + { IPv4(205,159,191,0),24 }, + { IPv4(205,159,233,0),24 }, + { IPv4(205,159,238,0),24 }, + { IPv4(205,159,239,0),24 }, + { IPv4(205,159,248,0),24 }, + { IPv4(205,160,0,0),22 }, + { IPv4(205,160,4,0),23 }, + { IPv4(205,160,112,0),20 }, + { IPv4(205,160,214,0),24 }, + { IPv4(205,160,215,0),24 }, + { IPv4(205,160,216,0),22 }, + { IPv4(205,160,241,0),24 }, + { IPv4(205,161,205,0),24 }, + { IPv4(205,162,5,0),24 }, + { IPv4(205,162,7,0),24 }, + { IPv4(205,162,49,0),24 }, + { IPv4(205,162,54,0),24 }, + { IPv4(205,162,58,0),24 }, + { IPv4(205,162,59,0),24 }, + { IPv4(205,162,64,0),20 }, + { IPv4(205,162,124,0),22 }, + { IPv4(205,162,201,0),24 }, + { IPv4(205,162,202,0),24 }, + { IPv4(205,162,240,0),24 }, + { IPv4(205,162,240,0),20 }, + { IPv4(205,162,245,0),24 }, + { IPv4(205,162,246,0),24 }, + { IPv4(205,162,249,0),24 }, + { IPv4(205,162,250,0),24 }, + { IPv4(205,162,251,0),24 }, + { IPv4(205,162,252,0),24 }, + { IPv4(205,162,254,0),24 }, + { IPv4(205,163,0,0),19 }, + { IPv4(205,163,2,0),24 }, + { IPv4(205,163,3,0),24 }, + { IPv4(205,163,142,0),24 }, + { IPv4(205,164,216,0),23 }, + { IPv4(205,164,219,0),24 }, + { IPv4(205,166,4,0),24 }, + { IPv4(205,166,32,0),24 }, + { IPv4(205,166,33,0),24 }, + { IPv4(205,166,36,0),24 }, + { IPv4(205,166,39,0),24 }, + { IPv4(205,166,48,0),24 }, + { IPv4(205,166,62,0),24 }, + { IPv4(205,166,76,0),24 }, + { IPv4(205,166,82,0),24 }, + { IPv4(205,166,84,0),24 }, + { IPv4(205,166,92,0),24 }, + { IPv4(205,166,115,0),24 }, + { IPv4(205,166,121,0),24 }, + { IPv4(205,166,143,0),24 }, + { IPv4(205,166,146,0),24 }, + { IPv4(205,166,151,0),24 }, + { IPv4(205,166,165,0),24 }, + { IPv4(205,166,180,0),24 }, + { IPv4(205,166,195,0),24 }, + { IPv4(205,166,196,0),24 }, + { IPv4(205,166,214,0),24 }, + { IPv4(205,166,226,0),24 }, + { IPv4(205,166,230,0),24 }, + { IPv4(205,166,234,0),24 }, + { IPv4(205,166,249,0),24 }, + { IPv4(205,167,19,0),24 }, + { IPv4(205,167,22,0),23 }, + { IPv4(205,167,28,0),24 }, + { IPv4(205,167,29,0),24 }, + { IPv4(205,167,36,0),23 }, + { IPv4(205,167,46,0),23 }, + { IPv4(205,167,62,0),23 }, + { IPv4(205,167,68,0),23 }, + { IPv4(205,167,80,0),23 }, + { IPv4(205,167,88,0),24 }, + { IPv4(205,167,89,0),24 }, + { IPv4(205,167,90,0),23 }, + { IPv4(205,167,96,0),24 }, + { IPv4(205,167,108,0),24 }, + { IPv4(205,167,109,0),24 }, + { IPv4(205,167,110,0),24 }, + { IPv4(205,167,111,0),24 }, + { IPv4(205,167,118,0),24 }, + { IPv4(205,167,124,0),23 }, + { IPv4(205,167,128,0),23 }, + { IPv4(205,167,142,0),23 }, + { IPv4(205,167,150,0),23 }, + { IPv4(205,167,162,0),23 }, + { IPv4(205,167,162,0),24 }, + { IPv4(205,167,163,0),24 }, + { IPv4(205,167,174,0),24 }, + { IPv4(205,167,175,0),24 }, + { IPv4(205,167,184,0),23 }, + { IPv4(205,167,188,0),23 }, + { IPv4(205,167,198,0),24 }, + { IPv4(205,167,199,0),24 }, + { IPv4(205,168,0,0),15 }, + { IPv4(205,168,0,0),16 }, + { IPv4(205,168,70,0),24 }, + { IPv4(205,168,96,0),24 }, + { IPv4(205,168,175,0),24 }, + { IPv4(205,169,23,0),24 }, + { IPv4(205,169,24,0),22 }, + { IPv4(205,169,28,0),23 }, + { IPv4(205,169,30,0),24 }, + { IPv4(205,169,171,0),24 }, + { IPv4(205,170,0,0),20 }, + { IPv4(205,170,0,0),16 }, + { IPv4(205,170,168,0),21 }, + { IPv4(205,170,235,0),24 }, + { IPv4(205,170,240,0),24 }, + { IPv4(205,170,241,0),24 }, + { IPv4(205,170,242,0),24 }, + { IPv4(205,170,243,0),24 }, + { IPv4(205,171,64,0),21 }, + { IPv4(205,171,78,0),24 }, + { IPv4(205,171,120,0),21 }, + { IPv4(205,171,129,0),24 }, + { IPv4(205,171,202,0),24 }, + { IPv4(205,172,0,0),22 }, + { IPv4(205,172,8,0),22 }, + { IPv4(205,172,16,0),22 }, + { IPv4(205,172,139,0),24 }, + { IPv4(205,172,156,0),22 }, + { IPv4(205,172,164,0),24 }, + { IPv4(205,172,203,0),24 }, + { IPv4(205,172,212,0),22 }, + { IPv4(205,173,0,0),21 }, + { IPv4(205,173,32,0),21 }, + { IPv4(205,173,40,0),21 }, + { IPv4(205,173,93,0),24 }, + { IPv4(205,173,95,0),24 }, + { IPv4(205,173,129,0),24 }, + { IPv4(205,173,176,0),21 }, + { IPv4(205,173,240,0),24 }, + { IPv4(205,174,16,0),24 }, + { IPv4(205,174,21,0),24 }, + { IPv4(205,174,22,0),24 }, + { IPv4(205,174,23,0),24 }, + { IPv4(205,174,34,0),24 }, + { IPv4(205,174,40,0),24 }, + { IPv4(205,174,41,0),24 }, + { IPv4(205,174,42,0),24 }, + { IPv4(205,174,47,0),24 }, + { IPv4(205,174,64,0),20 }, + { IPv4(205,174,159,0),24 }, + { IPv4(205,174,208,0),20 }, + { IPv4(205,174,240,0),20 }, + { IPv4(205,175,0,0),19 }, + { IPv4(205,175,208,0),24 }, + { IPv4(205,175,224,0),24 }, + { IPv4(205,175,252,0),24 }, + { IPv4(205,177,14,0),24 }, + { IPv4(205,177,62,0),24 }, + { IPv4(205,177,84,0),23 }, + { IPv4(205,177,116,0),22 }, + { IPv4(205,177,140,0),22 }, + { IPv4(205,177,144,0),20 }, + { IPv4(205,177,172,0),24 }, + { IPv4(205,178,0,0),17 }, + { IPv4(205,178,26,0),24 }, + { IPv4(205,178,38,0),23 }, + { IPv4(205,178,41,0),24 }, + { IPv4(205,178,52,0),24 }, + { IPv4(205,178,61,0),24 }, + { IPv4(205,178,85,0),24 }, + { IPv4(205,178,86,0),24 }, + { IPv4(205,178,87,0),24 }, + { IPv4(205,178,93,0),24 }, + { IPv4(205,178,95,0),24 }, + { IPv4(205,178,118,0),24 }, + { IPv4(205,178,123,0),24 }, + { IPv4(205,178,125,0),24 }, + { IPv4(205,180,10,0),24 }, + { IPv4(205,180,15,0),24 }, + { IPv4(205,180,85,0),24 }, + { IPv4(205,180,86,0),24 }, + { IPv4(205,180,87,0),24 }, + { IPv4(205,180,192,0),24 }, + { IPv4(205,180,193,0),24 }, + { IPv4(205,181,72,0),24 }, + { IPv4(205,181,180,0),24 }, + { IPv4(205,181,181,0),24 }, + { IPv4(205,181,240,0),24 }, + { IPv4(205,181,242,0),24 }, + { IPv4(205,184,0,0),14 }, + { IPv4(205,184,3,0),24 }, + { IPv4(205,184,14,0),24 }, + { IPv4(205,184,38,0),24 }, + { IPv4(205,184,62,0),24 }, + { IPv4(205,184,128,0),23 }, + { IPv4(205,184,138,0),24 }, + { IPv4(205,184,151,0),24 }, + { IPv4(205,184,204,0),23 }, + { IPv4(205,184,218,0),23 }, + { IPv4(205,184,238,0),24 }, + { IPv4(205,184,240,0),24 }, + { IPv4(205,185,12,0),24 }, + { IPv4(205,185,14,0),24 }, + { IPv4(205,185,17,0),24 }, + { IPv4(205,185,18,0),24 }, + { IPv4(205,185,19,0),24 }, + { IPv4(205,185,21,0),24 }, + { IPv4(205,185,22,0),24 }, + { IPv4(205,185,24,0),24 }, + { IPv4(205,185,25,0),24 }, + { IPv4(205,185,119,0),24 }, + { IPv4(205,186,39,0),24 }, + { IPv4(205,186,155,0),24 }, + { IPv4(205,187,75,0),24 }, + { IPv4(205,187,207,0),24 }, + { IPv4(205,187,221,0),24 }, + { IPv4(205,187,228,0),24 }, + { IPv4(205,188,0,0),16 }, + { IPv4(205,188,64,0),18 }, + { IPv4(205,189,1,0),24 }, + { IPv4(205,189,39,0),24 }, + { IPv4(205,189,40,0),24 }, + { IPv4(205,189,51,0),24 }, + { IPv4(205,189,71,0),24 }, + { IPv4(205,189,72,0),23 }, + { IPv4(205,189,86,0),23 }, + { IPv4(205,189,108,0),24 }, + { IPv4(205,189,134,0),24 }, + { IPv4(205,189,139,0),24 }, + { IPv4(205,189,151,0),24 }, + { IPv4(205,189,152,0),24 }, + { IPv4(205,189,204,0),24 }, + { IPv4(205,190,14,0),24 }, + { IPv4(205,191,64,0),24 }, + { IPv4(205,191,128,0),23 }, + { IPv4(205,191,166,0),24 }, + { IPv4(205,191,176,0),24 }, + { IPv4(205,191,194,0),24 }, + { IPv4(205,193,0,0),16 }, + { IPv4(205,198,244,0),24 }, + { IPv4(205,199,135,0),24 }, + { IPv4(205,199,148,0),22 }, + { IPv4(205,199,200,0),23 }, + { IPv4(205,199,232,0),22 }, + { IPv4(205,201,0,0),18 }, + { IPv4(205,202,96,0),19 }, + { IPv4(205,202,192,0),18 }, + { IPv4(205,203,64,0),19 }, + { IPv4(205,203,224,0),19 }, + { IPv4(205,205,19,0),24 }, + { IPv4(205,205,56,0),24 }, + { IPv4(205,205,98,0),24 }, + { IPv4(205,205,149,0),24 }, + { IPv4(205,207,64,0),24 }, + { IPv4(205,207,69,0),24 }, + { IPv4(205,207,98,0),24 }, + { IPv4(205,207,136,0),24 }, + { IPv4(205,207,137,0),24 }, + { IPv4(205,207,138,0),24 }, + { IPv4(205,207,175,0),24 }, + { IPv4(205,207,184,0),24 }, + { IPv4(205,207,185,0),24 }, + { IPv4(205,207,188,0),24 }, + { IPv4(205,207,214,0),24 }, + { IPv4(205,207,243,0),24 }, + { IPv4(205,208,148,0),24 }, + { IPv4(205,210,28,0),24 }, + { IPv4(205,210,42,0),24 }, + { IPv4(205,210,85,0),24 }, + { IPv4(205,210,86,0),23 }, + { IPv4(205,210,88,0),21 }, + { IPv4(205,210,104,0),24 }, + { IPv4(205,210,137,0),24 }, + { IPv4(205,210,138,0),24 }, + { IPv4(205,210,141,0),24 }, + { IPv4(205,210,144,0),24 }, + { IPv4(205,210,145,0),24 }, + { IPv4(205,210,147,0),24 }, + { IPv4(205,210,148,0),24 }, + { IPv4(205,210,184,0),24 }, + { IPv4(205,210,218,0),24 }, + { IPv4(205,210,220,0),24 }, + { IPv4(205,210,221,0),24 }, + { IPv4(205,210,222,0),24 }, + { IPv4(205,210,223,0),24 }, + { IPv4(205,210,228,0),24 }, + { IPv4(205,210,229,0),24 }, + { IPv4(205,210,230,0),24 }, + { IPv4(205,210,231,0),24 }, + { IPv4(205,210,248,0),24 }, + { IPv4(205,211,11,0),24 }, + { IPv4(205,211,16,0),21 }, + { IPv4(205,211,140,0),22 }, + { IPv4(205,211,144,0),23 }, + { IPv4(205,211,164,0),24 }, + { IPv4(205,211,165,0),24 }, + { IPv4(205,211,168,0),24 }, + { IPv4(205,211,169,0),24 }, + { IPv4(205,212,0,0),16 }, + { IPv4(205,212,112,0),20 }, + { IPv4(205,212,144,0),21 }, + { IPv4(205,212,152,0),21 }, + { IPv4(205,214,42,0),24 }, + { IPv4(205,214,160,0),20 }, + { IPv4(205,215,0,0),18 }, + { IPv4(205,215,11,0),24 }, + { IPv4(205,215,14,0),24 }, + { IPv4(205,215,29,0),24 }, + { IPv4(205,215,33,0),24 }, + { IPv4(205,215,37,0),24 }, + { IPv4(205,215,38,0),24 }, + { IPv4(205,215,51,0),24 }, + { IPv4(205,215,52,0),24 }, + { IPv4(205,215,56,0),24 }, + { IPv4(205,215,57,0),24 }, + { IPv4(205,215,58,0),24 }, + { IPv4(205,215,62,0),24 }, + { IPv4(205,215,128,0),18 }, + { IPv4(205,215,192,0),19 }, + { IPv4(205,215,210,0),24 }, + { IPv4(205,215,211,0),24 }, + { IPv4(205,215,212,0),24 }, + { IPv4(205,215,216,0),24 }, + { IPv4(205,215,232,0),23 }, + { IPv4(205,216,147,0),24 }, + { IPv4(205,217,32,0),20 }, + { IPv4(205,217,104,0),24 }, + { IPv4(205,217,201,0),24 }, + { IPv4(205,217,216,0),24 }, + { IPv4(205,217,220,0),22 }, + { IPv4(205,217,224,0),19 }, + { IPv4(205,218,108,0),23 }, + { IPv4(205,218,118,0),23 }, + { IPv4(205,218,156,0),22 }, + { IPv4(205,218,186,0),24 }, + { IPv4(205,219,64,0),19 }, + { IPv4(205,219,120,0),23 }, + { IPv4(205,219,138,0),23 }, + { IPv4(205,219,141,0),24 }, + { IPv4(205,219,162,0),24 }, + { IPv4(205,219,188,0),24 }, + { IPv4(205,219,188,0),23 }, + { IPv4(205,219,198,0),23 }, + { IPv4(205,219,208,0),24 }, + { IPv4(205,219,209,0),24 }, + { IPv4(205,219,212,0),24 }, + { IPv4(205,220,0,0),17 }, + { IPv4(205,221,0,0),16 }, + { IPv4(205,221,176,0),21 }, + { IPv4(205,221,208,0),21 }, + { IPv4(205,222,0,0),16 }, + { IPv4(205,223,126,0),24 }, + { IPv4(205,223,128,0),24 }, + { IPv4(205,226,57,0),24 }, + { IPv4(205,227,180,0),24 }, + { IPv4(205,227,181,0),24 }, + { IPv4(205,227,182,0),24 }, + { IPv4(205,227,183,0),24 }, + { IPv4(205,227,204,0),24 }, + { IPv4(205,228,0,0),18 }, + { IPv4(205,228,240,0),21 }, + { IPv4(205,228,245,0),24 }, + { IPv4(205,229,32,0),20 }, + { IPv4(205,229,250,0),24 }, + { IPv4(205,230,16,0),21 }, + { IPv4(205,230,25,0),24 }, + { IPv4(205,230,56,0),24 }, + { IPv4(205,230,57,0),24 }, + { IPv4(205,231,44,0),24 }, + { IPv4(205,231,82,0),23 }, + { IPv4(205,231,214,0),24 }, + { IPv4(205,232,0,0),16 }, + { IPv4(205,232,18,0),23 }, + { IPv4(205,232,37,0),24 }, + { IPv4(205,232,69,0),24 }, + { IPv4(205,232,84,0),24 }, + { IPv4(205,232,85,0),24 }, + { IPv4(205,232,128,0),21 }, + { IPv4(205,232,164,0),24 }, + { IPv4(205,232,165,0),24 }, + { IPv4(205,232,214,0),24 }, + { IPv4(205,232,217,0),24 }, + { IPv4(205,232,248,0),22 }, + { IPv4(205,233,22,0),23 }, + { IPv4(205,233,24,0),21 }, + { IPv4(205,233,28,0),24 }, + { IPv4(205,233,29,0),24 }, + { IPv4(205,233,64,0),24 }, + { IPv4(205,233,68,0),24 }, + { IPv4(205,233,106,0),24 }, + { IPv4(205,233,139,0),24 }, + { IPv4(205,233,186,0),24 }, + { IPv4(205,233,187,0),24 }, + { IPv4(205,233,206,0),24 }, + { IPv4(205,233,221,0),24 }, + { IPv4(205,234,0,0),19 }, + { IPv4(205,234,0,0),22 }, + { IPv4(205,234,16,0),21 }, + { IPv4(205,234,24,0),21 }, + { IPv4(205,235,0,0),21 }, + { IPv4(205,235,8,0),21 }, + { IPv4(205,235,16,0),20 }, + { IPv4(205,235,28,0),22 }, + { IPv4(205,235,32,0),20 }, + { IPv4(205,235,48,0),24 }, + { IPv4(205,235,49,0),24 }, + { IPv4(205,235,50,0),24 }, + { IPv4(205,235,51,0),24 }, + { IPv4(205,235,52,0),24 }, + { IPv4(205,235,53,0),24 }, + { IPv4(205,235,54,0),24 }, + { IPv4(205,235,55,0),24 }, + { IPv4(205,235,56,0),24 }, + { IPv4(205,235,57,0),24 }, + { IPv4(205,235,58,0),24 }, + { IPv4(205,235,59,0),24 }, + { IPv4(205,235,60,0),24 }, + { IPv4(205,235,61,0),24 }, + { IPv4(205,235,62,0),24 }, + { IPv4(205,235,63,0),24 }, + { IPv4(205,235,64,0),24 }, + { IPv4(205,235,65,0),24 }, + { IPv4(205,235,66,0),24 }, + { IPv4(205,235,67,0),24 }, + { IPv4(205,235,68,0),24 }, + { IPv4(205,235,69,0),24 }, + { IPv4(205,235,70,0),24 }, + { IPv4(205,235,71,0),24 }, + { IPv4(205,235,72,0),24 }, + { IPv4(205,235,73,0),24 }, + { IPv4(205,235,74,0),24 }, + { IPv4(205,235,75,0),24 }, + { IPv4(205,235,76,0),24 }, + { IPv4(205,235,77,0),24 }, + { IPv4(205,235,78,0),24 }, + { IPv4(205,235,79,0),24 }, + { IPv4(205,235,112,0),24 }, + { IPv4(205,235,113,0),24 }, + { IPv4(205,235,114,0),24 }, + { IPv4(205,235,115,0),24 }, + { IPv4(205,235,116,0),24 }, + { IPv4(205,235,117,0),24 }, + { IPv4(205,235,118,0),24 }, + { IPv4(205,235,119,0),24 }, + { IPv4(205,235,149,0),24 }, + { IPv4(205,235,159,0),24 }, + { IPv4(205,235,160,0),20 }, + { IPv4(205,236,14,0),24 }, + { IPv4(205,236,15,0),24 }, + { IPv4(205,236,93,0),24 }, + { IPv4(205,236,94,0),24 }, + { IPv4(205,236,95,0),24 }, + { IPv4(205,236,134,0),23 }, + { IPv4(205,236,136,0),22 }, + { IPv4(205,236,143,0),24 }, + { IPv4(205,236,144,0),24 }, + { IPv4(205,236,185,0),24 }, + { IPv4(205,237,20,0),24 }, + { IPv4(205,237,230,0),24 }, + { IPv4(205,237,242,0),24 }, + { IPv4(205,238,0,0),18 }, + { IPv4(205,238,3,0),24 }, + { IPv4(205,238,5,0),24 }, + { IPv4(205,238,18,0),24 }, + { IPv4(205,238,28,0),24 }, + { IPv4(205,238,30,0),24 }, + { IPv4(205,238,192,0),18 }, + { IPv4(205,239,178,0),24 }, + { IPv4(205,239,179,0),24 }, + { IPv4(205,239,180,0),24 }, + { IPv4(205,240,80,0),22 }, + { IPv4(205,240,240,0),24 }, + { IPv4(205,241,0,0),21 }, + { IPv4(205,241,144,0),22 }, + { IPv4(205,242,228,0),24 }, + { IPv4(205,242,229,0),24 }, + { IPv4(205,243,72,0),24 }, + { IPv4(205,243,88,0),24 }, + { IPv4(205,243,161,0),24 }, + { IPv4(205,243,162,0),23 }, + { IPv4(205,243,166,0),23 }, + { IPv4(205,243,192,0),19 }, + { IPv4(205,243,220,0),24 }, + { IPv4(205,244,0,0),21 }, + { IPv4(205,244,73,0),24 }, + { IPv4(205,244,74,0),24 }, + { IPv4(205,244,75,0),24 }, + { IPv4(205,244,160,0),21 }, + { IPv4(205,244,188,0),24 }, + { IPv4(205,244,189,0),24 }, + { IPv4(205,245,64,0),24 }, + { IPv4(205,245,65,0),24 }, + { IPv4(205,245,103,0),24 }, + { IPv4(205,245,104,0),22 }, + { IPv4(205,245,135,0),24 }, + { IPv4(205,246,8,0),21 }, + { IPv4(205,246,16,0),23 }, + { IPv4(205,246,48,0),20 }, + { IPv4(205,246,72,0),21 }, + { IPv4(205,246,194,0),24 }, + { IPv4(205,247,126,0),23 }, + { IPv4(205,247,136,0),23 }, + { IPv4(205,247,160,0),20 }, + { IPv4(205,247,176,0),20 }, + { IPv4(205,248,29,0),24 }, + { IPv4(205,248,30,0),24 }, + { IPv4(205,248,31,0),24 }, + { IPv4(205,248,226,0),24 }, + { IPv4(205,248,236,0),24 }, + { IPv4(205,250,137,0),24 }, + { IPv4(205,251,12,0),24 }, + { IPv4(205,251,17,0),24 }, + { IPv4(205,251,32,0),22 }, + { IPv4(205,251,36,0),22 }, + { IPv4(205,251,41,0),24 }, + { IPv4(205,251,55,0),24 }, + { IPv4(205,251,56,0),23 }, + { IPv4(205,251,85,0),24 }, + { IPv4(205,251,86,0),23 }, + { IPv4(205,251,88,0),22 }, + { IPv4(205,251,222,0),23 }, + { IPv4(205,251,226,0),23 }, + { IPv4(205,251,233,0),24 }, + { IPv4(205,251,250,0),23 }, + { IPv4(205,252,27,0),24 }, + { IPv4(205,252,116,0),22 }, + { IPv4(205,252,176,0),20 }, + { IPv4(205,253,72,0),22 }, + { IPv4(205,253,107,0),24 }, + { IPv4(205,253,140,0),23 }, + { IPv4(205,253,140,0),24 }, + { IPv4(205,253,192,0),19 }, + { IPv4(205,254,224,0),19 }, + { IPv4(206,8,0,0),14 }, + { IPv4(206,8,8,0),22 }, + { IPv4(206,8,192,0),19 }, + { IPv4(206,8,224,0),23 }, + { IPv4(206,9,32,0),21 }, + { IPv4(206,9,147,0),24 }, + { IPv4(206,9,148,0),22 }, + { IPv4(206,10,223,0),24 }, + { IPv4(206,12,82,0),24 }, + { IPv4(206,12,90,0),23 }, + { IPv4(206,12,148,0),24 }, + { IPv4(206,12,246,0),24 }, + { IPv4(206,14,0,0),16 }, + { IPv4(206,14,57,0),24 }, + { IPv4(206,14,85,0),24 }, + { IPv4(206,14,97,0),24 }, + { IPv4(206,14,176,0),24 }, + { IPv4(206,14,202,0),24 }, + { IPv4(206,14,228,0),24 }, + { IPv4(206,14,238,0),24 }, + { IPv4(206,14,239,0),24 }, + { IPv4(206,14,240,0),24 }, + { IPv4(206,14,241,0),24 }, + { IPv4(206,14,248,0),24 }, + { IPv4(206,14,249,0),24 }, + { IPv4(206,15,11,0),24 }, + { IPv4(206,15,24,0),24 }, + { IPv4(206,15,64,0),20 }, + { IPv4(206,15,64,0),19 }, + { IPv4(206,15,80,0),21 }, + { IPv4(206,15,154,0),24 }, + { IPv4(206,16,0,0),14 }, + { IPv4(206,16,32,0),22 }, + { IPv4(206,16,48,0),22 }, + { IPv4(206,16,136,0),22 }, + { IPv4(206,16,148,0),23 }, + { IPv4(206,16,192,0),20 }, + { IPv4(206,17,20,0),22 }, + { IPv4(206,17,24,0),21 }, + { IPv4(206,17,32,0),22 }, + { IPv4(206,17,58,0),23 }, + { IPv4(206,17,94,0),23 }, + { IPv4(206,17,226,0),24 }, + { IPv4(206,17,250,0),24 }, + { IPv4(206,18,32,0),21 }, + { IPv4(206,18,128,0),17 }, + { IPv4(206,19,61,0),24 }, + { IPv4(206,19,76,0),24 }, + { IPv4(206,19,77,0),24 }, + { IPv4(206,19,96,0),21 }, + { IPv4(206,19,124,0),23 }, + { IPv4(206,19,126,0),24 }, + { IPv4(206,19,129,0),24 }, + { IPv4(206,19,130,0),23 }, + { IPv4(206,19,144,0),24 }, + { IPv4(206,19,146,0),24 }, + { IPv4(206,19,147,0),24 }, + { IPv4(206,19,192,0),24 }, + { IPv4(206,20,0,0),16 }, + { IPv4(206,20,212,0),24 }, + { IPv4(206,24,31,0),24 }, + { IPv4(206,24,35,0),24 }, + { IPv4(206,24,48,0),21 }, + { IPv4(206,24,64,0),24 }, + { IPv4(206,24,68,0),23 }, + { IPv4(206,25,112,0),24 }, + { IPv4(206,25,119,0),24 }, + { IPv4(206,25,172,0),24 }, + { IPv4(206,25,182,0),24 }, + { IPv4(206,26,12,0),22 }, + { IPv4(206,26,98,0),23 }, + { IPv4(206,26,124,0),22 }, + { IPv4(206,26,160,0),21 }, + { IPv4(206,26,217,0),24 }, + { IPv4(206,26,224,0),21 }, + { IPv4(206,27,104,0),24 }, + { IPv4(206,27,118,0),23 }, + { IPv4(206,27,215,0),24 }, + { IPv4(206,27,216,0),24 }, + { IPv4(206,27,238,0),23 }, + { IPv4(206,27,244,0),23 }, + { IPv4(206,28,32,0),19 }, + { IPv4(206,28,102,0),24 }, + { IPv4(206,28,109,0),24 }, + { IPv4(206,28,116,0),23 }, + { IPv4(206,28,119,0),24 }, + { IPv4(206,28,124,0),22 }, + { IPv4(206,28,142,0),23 }, + { IPv4(206,28,153,0),24 }, + { IPv4(206,29,10,0),24 }, + { IPv4(206,29,64,0),21 }, + { IPv4(206,29,77,0),24 }, + { IPv4(206,29,88,0),21 }, + { IPv4(206,29,168,0),21 }, + { IPv4(206,29,184,0),21 }, + { IPv4(206,29,192,0),22 }, + { IPv4(206,29,196,0),23 }, + { IPv4(206,29,224,0),20 }, + { IPv4(206,30,30,0),24 }, + { IPv4(206,30,32,0),21 }, + { IPv4(206,30,48,0),21 }, + { IPv4(206,30,66,0),24 }, + { IPv4(206,30,130,0),24 }, + { IPv4(206,30,144,0),22 }, + { IPv4(206,31,22,0),24 }, + { IPv4(206,31,70,0),23 }, + { IPv4(206,31,77,0),24 }, + { IPv4(206,31,88,0),24 }, + { IPv4(206,31,89,0),24 }, + { IPv4(206,31,90,0),24 }, + { IPv4(206,31,91,0),24 }, + { IPv4(206,31,92,0),24 }, + { IPv4(206,31,93,0),24 }, + { IPv4(206,31,94,0),24 }, + { IPv4(206,31,95,0),24 }, + { IPv4(206,31,212,0),24 }, + { IPv4(206,31,213,0),24 }, + { IPv4(206,31,219,0),24 }, + { IPv4(206,32,34,0),24 }, + { IPv4(206,35,160,0),20 }, + { IPv4(206,37,0,0),16 }, + { IPv4(206,37,28,0),24 }, + { IPv4(206,37,29,0),24 }, + { IPv4(206,37,30,0),24 }, + { IPv4(206,37,31,0),24 }, + { IPv4(206,37,126,0),24 }, + { IPv4(206,37,146,0),24 }, + { IPv4(206,37,153,0),24 }, + { IPv4(206,37,158,0),24 }, + { IPv4(206,37,159,0),24 }, + { IPv4(206,37,164,0),24 }, + { IPv4(206,37,185,0),24 }, + { IPv4(206,37,186,0),24 }, + { IPv4(206,37,191,0),24 }, + { IPv4(206,37,199,0),24 }, + { IPv4(206,37,200,0),24 }, + { IPv4(206,37,205,0),24 }, + { IPv4(206,37,206,0),24 }, + { IPv4(206,37,213,0),24 }, + { IPv4(206,38,0,0),16 }, + { IPv4(206,38,39,0),24 }, + { IPv4(206,38,102,0),24 }, + { IPv4(206,38,115,0),24 }, + { IPv4(206,38,117,0),24 }, + { IPv4(206,38,174,0),24 }, + { IPv4(206,39,0,0),16 }, + { IPv4(206,39,0,0),17 }, + { IPv4(206,39,48,0),21 }, + { IPv4(206,39,65,0),24 }, + { IPv4(206,39,67,0),24 }, + { IPv4(206,39,68,0),24 }, + { IPv4(206,39,108,0),24 }, + { IPv4(206,39,110,0),24 }, + { IPv4(206,39,111,0),24 }, + { IPv4(206,39,116,0),23 }, + { IPv4(206,39,128,0),23 }, + { IPv4(206,39,128,0),18 }, + { IPv4(206,39,148,0),22 }, + { IPv4(206,39,160,0),21 }, + { IPv4(206,39,192,0),21 }, + { IPv4(206,39,192,0),20 }, + { IPv4(206,39,192,0),22 }, + { IPv4(206,39,200,0),23 }, + { IPv4(206,39,202,0),24 }, + { IPv4(206,39,203,0),24 }, + { IPv4(206,39,205,0),24 }, + { IPv4(206,39,206,0),24 }, + { IPv4(206,39,207,0),24 }, + { IPv4(206,39,208,0),24 }, + { IPv4(206,39,208,0),21 }, + { IPv4(206,39,209,0),24 }, + { IPv4(206,39,216,0),22 }, + { IPv4(206,39,232,0),21 }, + { IPv4(206,39,240,0),20 }, + { IPv4(206,39,250,0),24 }, + { IPv4(206,40,40,0),21 }, + { IPv4(206,40,48,0),24 }, + { IPv4(206,40,79,0),24 }, + { IPv4(206,40,93,0),24 }, + { IPv4(206,41,32,0),19 }, + { IPv4(206,41,128,0),23 }, + { IPv4(206,43,192,0),19 }, + { IPv4(206,45,87,0),24 }, + { IPv4(206,45,234,0),24 }, + { IPv4(206,45,247,0),24 }, + { IPv4(206,45,254,0),24 }, + { IPv4(206,47,128,0),22 }, + { IPv4(206,47,218,0),23 }, + { IPv4(206,48,168,0),22 }, + { IPv4(206,49,34,0),24 }, + { IPv4(206,49,58,0),23 }, + { IPv4(206,49,58,0),24 }, + { IPv4(206,49,59,0),24 }, + { IPv4(206,49,79,0),24 }, + { IPv4(206,49,195,0),24 }, + { IPv4(206,50,0,0),16 }, + { IPv4(206,51,23,0),24 }, + { IPv4(206,51,24,0),24 }, + { IPv4(206,51,25,0),24 }, + { IPv4(206,51,26,0),24 }, + { IPv4(206,51,250,0),24 }, + { IPv4(206,51,251,0),24 }, + { IPv4(206,52,0,0),16 }, + { IPv4(206,52,68,0),23 }, + { IPv4(206,53,0,0),22 }, + { IPv4(206,53,132,0),24 }, + { IPv4(206,53,192,0),21 }, + { IPv4(206,53,233,0),24 }, + { IPv4(206,54,0,0),18 }, + { IPv4(206,54,244,0),23 }, + { IPv4(206,54,252,0),24 }, + { IPv4(206,54,253,0),24 }, + { IPv4(206,55,0,0),18 }, + { IPv4(206,55,224,0),19 }, + { IPv4(206,56,128,0),17 }, + { IPv4(206,57,110,0),24 }, + { IPv4(206,58,0,0),16 }, + { IPv4(206,58,34,0),24 }, + { IPv4(206,58,95,0),24 }, + { IPv4(206,58,98,0),24 }, + { IPv4(206,58,127,0),24 }, + { IPv4(206,58,133,0),24 }, + { IPv4(206,58,136,0),23 }, + { IPv4(206,58,138,0),24 }, + { IPv4(206,58,140,0),24 }, + { IPv4(206,58,160,0),24 }, + { IPv4(206,58,228,0),24 }, + { IPv4(206,58,236,0),24 }, + { IPv4(206,58,248,0),21 }, + { IPv4(206,61,96,0),24 }, + { IPv4(206,61,98,0),23 }, + { IPv4(206,61,100,0),23 }, + { IPv4(206,61,102,0),24 }, + { IPv4(206,61,103,0),24 }, + { IPv4(206,61,168,0),21 }, + { IPv4(206,62,140,0),22 }, + { IPv4(206,62,192,0),22 }, + { IPv4(206,63,143,0),24 }, + { IPv4(206,63,201,0),24 }, + { IPv4(206,63,202,0),24 }, + { IPv4(206,64,32,0),20 }, + { IPv4(206,64,47,0),24 }, + { IPv4(206,64,112,0),24 }, + { IPv4(206,64,128,0),23 }, + { IPv4(206,64,152,0),21 }, + { IPv4(206,64,192,0),24 }, + { IPv4(206,65,48,0),20 }, + { IPv4(206,65,48,0),21 }, + { IPv4(206,65,56,0),21 }, + { IPv4(206,65,64,0),23 }, + { IPv4(206,65,183,0),24 }, + { IPv4(206,66,160,0),24 }, + { IPv4(206,66,179,0),24 }, + { IPv4(206,67,68,0),22 }, + { IPv4(206,67,78,0),23 }, + { IPv4(206,67,96,0),20 }, + { IPv4(206,67,186,0),23 }, + { IPv4(206,67,234,0),24 }, + { IPv4(206,67,239,0),24 }, + { IPv4(206,67,240,0),20 }, + { IPv4(206,68,0,0),15 }, + { IPv4(206,70,0,0),16 }, + { IPv4(206,70,132,0),24 }, + { IPv4(206,71,64,0),19 }, + { IPv4(206,71,96,0),21 }, + { IPv4(206,71,104,0),21 }, + { IPv4(206,71,112,0),21 }, + { IPv4(206,71,120,0),21 }, + { IPv4(206,71,160,0),19 }, + { IPv4(206,71,224,0),19 }, + { IPv4(206,72,0,0),18 }, + { IPv4(206,73,0,0),16 }, + { IPv4(206,73,5,0),24 }, + { IPv4(206,73,8,0),24 }, + { IPv4(206,73,10,0),24 }, + { IPv4(206,73,14,0),24 }, + { IPv4(206,73,16,0),24 }, + { IPv4(206,73,21,0),24 }, + { IPv4(206,73,34,0),24 }, + { IPv4(206,73,39,0),24 }, + { IPv4(206,73,45,0),24 }, + { IPv4(206,73,47,0),24 }, + { IPv4(206,73,51,0),24 }, + { IPv4(206,73,52,0),24 }, + { IPv4(206,73,53,0),24 }, + { IPv4(206,73,54,0),24 }, + { IPv4(206,73,55,0),24 }, + { IPv4(206,73,59,0),24 }, + { IPv4(206,73,80,0),24 }, + { IPv4(206,73,103,0),24 }, + { IPv4(206,73,105,0),24 }, + { IPv4(206,73,107,0),24 }, + { IPv4(206,73,110,0),24 }, + { IPv4(206,73,112,0),24 }, + { IPv4(206,73,113,0),24 }, + { IPv4(206,73,123,0),24 }, + { IPv4(206,73,136,0),24 }, + { IPv4(206,73,158,0),24 }, + { IPv4(206,73,163,0),24 }, + { IPv4(206,73,164,0),24 }, + { IPv4(206,73,171,0),24 }, + { IPv4(206,73,174,0),24 }, + { IPv4(206,73,177,0),24 }, + { IPv4(206,73,182,0),24 }, + { IPv4(206,73,189,0),24 }, + { IPv4(206,73,190,0),24 }, + { IPv4(206,73,194,0),24 }, + { IPv4(206,73,200,0),24 }, + { IPv4(206,73,202,0),24 }, + { IPv4(206,73,205,0),24 }, + { IPv4(206,73,206,0),24 }, + { IPv4(206,73,209,0),24 }, + { IPv4(206,73,211,0),24 }, + { IPv4(206,73,222,0),24 }, + { IPv4(206,73,227,0),24 }, + { IPv4(206,73,228,0),24 }, + { IPv4(206,73,234,0),24 }, + { IPv4(206,73,235,0),24 }, + { IPv4(206,73,238,0),24 }, + { IPv4(206,73,239,0),24 }, + { IPv4(206,73,240,0),24 }, + { IPv4(206,73,244,0),24 }, + { IPv4(206,73,252,0),24 }, + { IPv4(206,74,0,0),16 }, + { IPv4(206,75,69,0),24 }, + { IPv4(206,75,82,0),24 }, + { IPv4(206,75,114,0),24 }, + { IPv4(206,75,216,0),24 }, + { IPv4(206,75,218,0),24 }, + { IPv4(206,78,0,0),19 }, + { IPv4(206,78,160,0),19 }, + { IPv4(206,79,32,0),20 }, + { IPv4(206,79,140,0),24 }, + { IPv4(206,80,0,0),23 }, + { IPv4(206,80,0,0),19 }, + { IPv4(206,80,7,0),24 }, + { IPv4(206,80,8,0),24 }, + { IPv4(206,80,17,0),24 }, + { IPv4(206,80,18,0),24 }, + { IPv4(206,80,19,0),24 }, + { IPv4(206,80,20,0),23 }, + { IPv4(206,80,22,0),23 }, + { IPv4(206,80,26,0),23 }, + { IPv4(206,80,32,0),19 }, + { IPv4(206,80,192,0),19 }, + { IPv4(206,80,203,0),24 }, + { IPv4(206,80,205,0),24 }, + { IPv4(206,80,206,0),24 }, + { IPv4(206,80,209,0),24 }, + { IPv4(206,80,210,0),24 }, + { IPv4(206,80,212,0),24 }, + { IPv4(206,80,213,0),24 }, + { IPv4(206,80,214,0),24 }, + { IPv4(206,80,223,0),24 }, + { IPv4(206,81,128,0),19 }, + { IPv4(206,81,136,0),24 }, + { IPv4(206,81,192,0),19 }, + { IPv4(206,81,204,0),24 }, + { IPv4(206,81,205,0),24 }, + { IPv4(206,81,207,0),24 }, + { IPv4(206,81,220,0),24 }, + { IPv4(206,81,220,0),22 }, + { IPv4(206,82,32,0),19 }, + { IPv4(206,82,228,0),24 }, + { IPv4(206,82,248,0),24 }, + { IPv4(206,83,0,0),19 }, + { IPv4(206,83,64,0),19 }, + { IPv4(206,83,87,0),24 }, + { IPv4(206,83,160,0),19 }, + { IPv4(206,85,240,0),24 }, + { IPv4(206,86,0,0),16 }, + { IPv4(206,86,29,0),24 }, + { IPv4(206,86,215,0),26 }, + { IPv4(206,92,172,0),24 }, + { IPv4(206,96,40,0),23 }, + { IPv4(206,96,184,0),24 }, + { IPv4(206,96,185,0),24 }, + { IPv4(206,96,186,0),24 }, + { IPv4(206,96,192,0),23 }, + { IPv4(206,96,200,0),23 }, + { IPv4(206,96,204,0),22 }, + { IPv4(206,96,208,0),21 }, + { IPv4(206,96,216,0),23 }, + { IPv4(206,96,219,0),24 }, + { IPv4(206,96,220,0),22 }, + { IPv4(206,97,122,0),24 }, + { IPv4(206,97,148,0),22 }, + { IPv4(206,97,254,0),24 }, + { IPv4(206,98,48,0),21 }, + { IPv4(206,98,64,0),24 }, + { IPv4(206,98,66,0),23 }, + { IPv4(206,98,68,0),24 }, + { IPv4(206,98,70,0),23 }, + { IPv4(206,98,78,0),23 }, + { IPv4(206,98,80,0),23 }, + { IPv4(206,98,88,0),21 }, + { IPv4(206,98,122,0),23 }, + { IPv4(206,98,124,0),23 }, + { IPv4(206,98,172,0),23 }, + { IPv4(206,98,179,0),24 }, + { IPv4(206,98,232,0),23 }, + { IPv4(206,98,238,0),24 }, + { IPv4(206,98,246,0),24 }, + { IPv4(206,99,32,0),24 }, + { IPv4(206,99,165,0),24 }, + { IPv4(206,99,178,0),24 }, + { IPv4(206,99,186,0),23 }, + { IPv4(206,99,201,0),24 }, + { IPv4(206,100,6,0),24 }, + { IPv4(206,100,7,0),24 }, + { IPv4(206,100,10,0),23 }, + { IPv4(206,100,22,0),23 }, + { IPv4(206,100,56,0),21 }, + { IPv4(206,100,84,0),23 }, + { IPv4(206,100,93,0),24 }, + { IPv4(206,100,148,0),22 }, + { IPv4(206,100,168,0),23 }, + { IPv4(206,100,179,0),24 }, + { IPv4(206,101,68,0),23 }, + { IPv4(206,101,224,0),22 }, + { IPv4(206,102,64,0),19 }, + { IPv4(206,102,127,0),24 }, + { IPv4(206,102,161,0),24 }, + { IPv4(206,102,208,0),23 }, + { IPv4(206,102,212,0),23 }, + { IPv4(206,102,220,0),22 }, + { IPv4(206,103,235,0),24 }, + { IPv4(206,103,236,0),22 }, + { IPv4(206,103,240,0),20 }, + { IPv4(206,104,53,0),24 }, + { IPv4(206,104,144,0),24 }, + { IPv4(206,104,146,0),24 }, + { IPv4(206,104,188,0),22 }, + { IPv4(206,105,172,0),24 }, + { IPv4(206,105,173,0),24 }, + { IPv4(206,105,176,0),20 }, + { IPv4(206,106,192,0),20 }, + { IPv4(206,106,242,0),24 }, + { IPv4(206,107,124,0),22 }, + { IPv4(206,107,180,0),24 }, + { IPv4(206,107,181,0),24 }, + { IPv4(206,107,235,0),24 }, + { IPv4(206,108,4,0),22 }, + { IPv4(206,108,237,0),24 }, + { IPv4(206,108,244,0),24 }, + { IPv4(206,108,246,0),24 }, + { IPv4(206,111,0,0),16 }, + { IPv4(206,111,18,0),24 }, + { IPv4(206,111,49,0),24 }, + { IPv4(206,111,50,0),24 }, + { IPv4(206,111,59,0),24 }, + { IPv4(206,111,102,0),24 }, + { IPv4(206,111,122,0),24 }, + { IPv4(206,112,0,0),19 }, + { IPv4(206,112,32,0),19 }, + { IPv4(206,112,32,0),24 }, + { IPv4(206,112,34,0),24 }, + { IPv4(206,112,47,0),24 }, + { IPv4(206,113,32,0),24 }, + { IPv4(206,113,33,172),30 }, + { IPv4(206,113,35,0),24 }, + { IPv4(206,113,37,128),25 }, + { IPv4(206,113,44,0),24 }, + { IPv4(206,113,46,0),24 }, + { IPv4(206,113,52,0),24 }, + { IPv4(206,113,55,0),24 }, + { IPv4(206,113,56,0),24 }, + { IPv4(206,113,57,0),24 }, + { IPv4(206,113,58,0),24 }, + { IPv4(206,113,59,0),24 }, + { IPv4(206,113,60,0),24 }, + { IPv4(206,113,63,0),24 }, + { IPv4(206,113,195,0),24 }, + { IPv4(206,113,203,0),24 }, + { IPv4(206,113,210,0),24 }, + { IPv4(206,113,224,0),19 }, + { IPv4(206,114,173,0),24 }, + { IPv4(206,114,174,0),23 }, + { IPv4(206,114,176,0),24 }, + { IPv4(206,114,183,0),24 }, + { IPv4(206,114,184,0),23 }, + { IPv4(206,114,186,0),24 }, + { IPv4(206,115,64,0),19 }, + { IPv4(206,117,32,0),21 }, + { IPv4(206,117,63,0),24 }, + { IPv4(206,117,100,0),22 }, + { IPv4(206,117,140,0),23 }, + { IPv4(206,117,166,0),23 }, + { IPv4(206,117,182,0),24 }, + { IPv4(206,117,210,0),24 }, + { IPv4(206,117,212,0),22 }, + { IPv4(206,121,0,0),16 }, + { IPv4(206,123,6,0),24 }, + { IPv4(206,123,23,0),24 }, + { IPv4(206,123,26,0),23 }, + { IPv4(206,123,32,0),20 }, + { IPv4(206,124,160,0),19 }, + { IPv4(206,124,192,0),19 }, + { IPv4(206,124,224,0),19 }, + { IPv4(206,126,160,0),21 }, + { IPv4(206,126,168,0),21 }, + { IPv4(206,126,176,0),21 }, + { IPv4(206,127,0,0),19 }, + { IPv4(206,127,224,0),19 }, + { IPv4(206,129,0,0),23 }, + { IPv4(206,129,97,0),24 }, + { IPv4(206,129,139,0),24 }, + { IPv4(206,129,156,0),22 }, + { IPv4(206,130,22,0),24 }, + { IPv4(206,130,45,0),24 }, + { IPv4(206,130,51,0),24 }, + { IPv4(206,130,57,0),24 }, + { IPv4(206,130,64,0),24 }, + { IPv4(206,130,73,0),24 }, + { IPv4(206,130,87,0),24 }, + { IPv4(206,130,91,0),24 }, + { IPv4(206,130,152,0),24 }, + { IPv4(206,130,227,0),24 }, + { IPv4(206,130,240,0),24 }, + { IPv4(206,131,0,0),17 }, + { IPv4(206,131,128,0),19 }, + { IPv4(206,131,160,0),19 }, + { IPv4(206,131,188,0),22 }, + { IPv4(206,131,208,0),20 }, + { IPv4(206,132,65,0),24 }, + { IPv4(206,132,128,0),23 }, + { IPv4(206,132,212,0),24 }, + { IPv4(206,135,26,0),24 }, + { IPv4(206,135,124,0),24 }, + { IPv4(206,135,197,0),24 }, + { IPv4(206,136,168,0),21 }, + { IPv4(206,137,78,0),24 }, + { IPv4(206,137,131,0),24 }, + { IPv4(206,137,189,0),24 }, + { IPv4(206,137,216,0),21 }, + { IPv4(206,137,233,0),24 }, + { IPv4(206,138,100,0),22 }, + { IPv4(206,138,112,0),24 }, + { IPv4(206,138,168,0),21 }, + { IPv4(206,138,252,0),22 }, + { IPv4(206,139,8,0),21 }, + { IPv4(206,139,176,0),20 }, + { IPv4(206,142,12,0),23 }, + { IPv4(206,142,53,0),24 }, + { IPv4(206,142,77,0),24 }, + { IPv4(206,142,100,0),24 }, + { IPv4(206,142,244,0),24 }, + { IPv4(206,142,245,0),24 }, + { IPv4(206,142,246,0),23 }, + { IPv4(206,142,249,0),24 }, + { IPv4(206,142,255,0),24 }, + { IPv4(206,143,34,0),24 }, + { IPv4(206,143,36,0),24 }, + { IPv4(206,143,128,0),17 }, + { IPv4(206,143,160,0),24 }, + { IPv4(206,144,0,0),14 }, + { IPv4(206,144,16,0),21 }, + { IPv4(206,144,56,0),21 }, + { IPv4(206,144,88,0),24 }, + { IPv4(206,144,96,0),19 }, + { IPv4(206,144,128,0),18 }, + { IPv4(206,144,212,0),22 }, + { IPv4(206,144,248,0),21 }, + { IPv4(206,145,13,0),24 }, + { IPv4(206,145,14,0),24 }, + { IPv4(206,145,24,0),22 }, + { IPv4(206,145,117,0),24 }, + { IPv4(206,145,128,0),19 }, + { IPv4(206,146,16,0),20 }, + { IPv4(206,146,60,0),24 }, + { IPv4(206,146,64,0),19 }, + { IPv4(206,146,143,0),24 }, + { IPv4(206,146,233,0),24 }, + { IPv4(206,147,106,0),23 }, + { IPv4(206,147,192,0),18 }, + { IPv4(206,149,144,0),22 }, + { IPv4(206,150,148,0),23 }, + { IPv4(206,150,164,0),24 }, + { IPv4(206,150,165,0),24 }, + { IPv4(206,150,166,0),23 }, + { IPv4(206,150,169,0),24 }, + { IPv4(206,150,174,0),23 }, + { IPv4(206,150,183,0),24 }, + { IPv4(206,150,187,0),24 }, + { IPv4(206,150,188,0),23 }, + { IPv4(206,150,228,0),24 }, + { IPv4(206,151,136,0),22 }, + { IPv4(206,151,224,0),19 }, + { IPv4(206,152,180,0),24 }, + { IPv4(206,152,180,0),23 }, + { IPv4(206,152,188,0),22 }, + { IPv4(206,152,208,0),20 }, + { IPv4(206,152,227,0),24 }, + { IPv4(206,153,26,0),24 }, + { IPv4(206,153,58,0),23 }, + { IPv4(206,153,176,0),20 }, + { IPv4(206,154,102,0),24 }, + { IPv4(206,154,105,0),24 }, + { IPv4(206,154,169,0),24 }, + { IPv4(206,155,22,0),24 }, + { IPv4(206,155,24,0),21 }, + { IPv4(206,155,136,0),23 }, + { IPv4(206,155,144,0),20 }, + { IPv4(206,155,169,0),24 }, + { IPv4(206,155,172,0),24 }, + { IPv4(206,155,173,0),24 }, + { IPv4(206,155,228,0),24 }, + { IPv4(206,155,229,0),24 }, + { IPv4(206,155,237,0),24 }, + { IPv4(206,156,4,0),24 }, + { IPv4(206,156,8,0),24 }, + { IPv4(206,156,12,0),24 }, + { IPv4(206,156,24,0),23 }, + { IPv4(206,156,26,0),24 }, + { IPv4(206,156,27,0),24 }, + { IPv4(206,156,32,0),24 }, + { IPv4(206,156,76,0),24 }, + { IPv4(206,157,123,0),24 }, + { IPv4(206,157,180,0),24 }, + { IPv4(206,157,192,0),23 }, + { IPv4(206,157,224,0),21 }, + { IPv4(206,159,40,0),22 }, + { IPv4(206,159,192,0),18 }, + { IPv4(206,160,160,0),21 }, + { IPv4(206,160,192,0),20 }, + { IPv4(206,161,72,0),21 }, + { IPv4(206,161,88,0),21 }, + { IPv4(206,161,160,0),19 }, + { IPv4(206,162,0,0),17 }, + { IPv4(206,162,1,0),24 }, + { IPv4(206,162,14,0),24 }, + { IPv4(206,162,15,0),24 }, + { IPv4(206,162,30,0),24 }, + { IPv4(206,162,31,0),24 }, + { IPv4(206,162,36,0),23 }, + { IPv4(206,162,53,0),24 }, + { IPv4(206,162,66,0),24 }, + { IPv4(206,162,73,0),24 }, + { IPv4(206,162,91,0),24 }, + { IPv4(206,162,112,0),24 }, + { IPv4(206,162,113,0),24 }, + { IPv4(206,162,114,0),24 }, + { IPv4(206,162,124,0),24 }, + { IPv4(206,163,0,0),17 }, + { IPv4(206,163,128,0),18 }, + { IPv4(206,163,188,0),24 }, + { IPv4(206,163,192,0),19 }, + { IPv4(206,165,200,0),22 }, + { IPv4(206,166,0,0),17 }, + { IPv4(206,166,128,0),18 }, + { IPv4(206,166,140,0),22 }, + { IPv4(206,166,156,0),22 }, + { IPv4(206,166,192,0),18 }, + { IPv4(206,166,197,0),24 }, + { IPv4(206,168,0,0),16 }, + { IPv4(206,168,43,0),24 }, + { IPv4(206,168,47,0),24 }, + { IPv4(206,168,140,0),22 }, + { IPv4(206,168,187,0),24 }, + { IPv4(206,168,224,0),24 }, + { IPv4(206,168,228,0),24 }, + { IPv4(206,169,0,0),16 }, + { IPv4(206,169,0,0),21 }, + { IPv4(206,169,1,0),24 }, + { IPv4(206,169,8,0),24 }, + { IPv4(206,169,8,0),22 }, + { IPv4(206,169,9,0),24 }, + { IPv4(206,169,10,0),24 }, + { IPv4(206,169,11,0),24 }, + { IPv4(206,169,12,0),23 }, + { IPv4(206,169,14,0),23 }, + { IPv4(206,169,16,0),21 }, + { IPv4(206,169,20,0),23 }, + { IPv4(206,169,24,0),24 }, + { IPv4(206,169,24,0),21 }, + { IPv4(206,169,26,0),23 }, + { IPv4(206,169,29,0),24 }, + { IPv4(206,169,30,0),24 }, + { IPv4(206,169,31,0),24 }, + { IPv4(206,169,32,0),21 }, + { IPv4(206,169,32,0),23 }, + { IPv4(206,169,40,0),21 }, + { IPv4(206,169,48,0),21 }, + { IPv4(206,169,56,0),21 }, + { IPv4(206,169,60,0),24 }, + { IPv4(206,169,62,0),24 }, + { IPv4(206,169,66,0),24 }, + { IPv4(206,169,70,0),23 }, + { IPv4(206,169,71,0),24 }, + { IPv4(206,169,72,0),21 }, + { IPv4(206,169,76,0),24 }, + { IPv4(206,169,80,0),22 }, + { IPv4(206,169,84,0),24 }, + { IPv4(206,169,86,0),24 }, + { IPv4(206,169,87,0),24 }, + { IPv4(206,169,88,0),22 }, + { IPv4(206,169,88,0),24 }, + { IPv4(206,169,89,0),24 }, + { IPv4(206,169,96,0),22 }, + { IPv4(206,169,96,0),23 }, + { IPv4(206,169,99,0),24 }, + { IPv4(206,169,100,0),22 }, + { IPv4(206,169,104,0),21 }, + { IPv4(206,169,106,0),23 }, + { IPv4(206,169,108,0),23 }, + { IPv4(206,169,112,0),21 }, + { IPv4(206,169,120,0),21 }, + { IPv4(206,169,120,0),24 }, + { IPv4(206,169,121,0),24 }, + { IPv4(206,169,122,0),24 }, + { IPv4(206,169,123,0),24 }, + { IPv4(206,169,124,0),24 }, + { IPv4(206,169,127,0),24 }, + { IPv4(206,169,128,0),22 }, + { IPv4(206,169,132,0),23 }, + { IPv4(206,169,132,0),24 }, + { IPv4(206,169,133,0),24 }, + { IPv4(206,169,136,0),21 }, + { IPv4(206,169,144,0),21 }, + { IPv4(206,169,145,0),24 }, + { IPv4(206,169,146,0),23 }, + { IPv4(206,169,150,0),24 }, + { IPv4(206,169,152,0),21 }, + { IPv4(206,169,158,0),23 }, + { IPv4(206,169,160,0),23 }, + { IPv4(206,169,160,0),21 }, + { IPv4(206,169,160,0),24 }, + { IPv4(206,169,161,0),24 }, + { IPv4(206,169,162,0),23 }, + { IPv4(206,169,168,0),21 }, + { IPv4(206,169,173,0),24 }, + { IPv4(206,169,176,0),21 }, + { IPv4(206,169,180,0),24 }, + { IPv4(206,169,182,0),23 }, + { IPv4(206,169,184,0),24 }, + { IPv4(206,169,184,0),21 }, + { IPv4(206,169,192,0),21 }, + { IPv4(206,169,200,0),22 }, + { IPv4(206,169,208,0),22 }, + { IPv4(206,169,212,0),22 }, + { IPv4(206,169,216,0),21 }, + { IPv4(206,169,217,0),24 }, + { IPv4(206,169,218,0),23 }, + { IPv4(206,169,222,0),23 }, + { IPv4(206,169,224,0),21 }, + { IPv4(206,169,232,0),21 }, + { IPv4(206,169,233,0),24 }, + { IPv4(206,169,234,0),23 }, + { IPv4(206,169,237,0),24 }, + { IPv4(206,169,239,0),24 }, + { IPv4(206,169,248,0),21 }, + { IPv4(206,171,214,0),23 }, + { IPv4(206,173,0,0),16 }, + { IPv4(206,173,8,0),22 }, + { IPv4(206,173,12,0),22 }, + { IPv4(206,173,20,0),22 }, + { IPv4(206,173,28,0),22 }, + { IPv4(206,175,82,0),24 }, + { IPv4(206,176,128,0),19 }, + { IPv4(206,176,192,0),21 }, + { IPv4(206,176,200,0),21 }, + { IPv4(206,176,208,0),21 }, + { IPv4(206,176,216,0),21 }, + { IPv4(206,180,64,0),18 }, + { IPv4(206,180,128,0),19 }, + { IPv4(206,180,192,0),20 }, + { IPv4(206,180,199,0),24 }, + { IPv4(206,180,200,0),24 }, + { IPv4(206,180,201,0),24 }, + { IPv4(206,180,204,0),22 }, + { IPv4(206,180,224,0),20 }, + { IPv4(206,182,132,0),24 }, + { IPv4(206,182,146,0),24 }, + { IPv4(206,182,150,0),24 }, + { IPv4(206,182,152,0),24 }, + { IPv4(206,182,157,0),24 }, + { IPv4(206,182,163,0),24 }, + { IPv4(206,182,164,0),24 }, + { IPv4(206,182,184,0),24 }, + { IPv4(206,182,204,0),24 }, + { IPv4(206,182,238,0),24 }, + { IPv4(206,183,64,0),19 }, + { IPv4(206,183,192,0),19 }, + { IPv4(206,183,224,0),19 }, + { IPv4(206,183,242,0),24 }, + { IPv4(206,184,0,0),16 }, + { IPv4(206,184,17,0),24 }, + { IPv4(206,184,20,0),23 }, + { IPv4(206,187,144,0),21 }, + { IPv4(206,189,155,0),24 }, + { IPv4(206,190,32,0),19 }, + { IPv4(206,190,64,0),19 }, + { IPv4(206,190,71,0),24 }, + { IPv4(206,190,72,0),24 }, + { IPv4(206,190,78,0),23 }, + { IPv4(206,190,80,0),23 }, + { IPv4(206,190,82,0),24 }, + { IPv4(206,190,91,0),24 }, + { IPv4(206,190,95,0),24 }, + { IPv4(206,190,128,0),19 }, + { IPv4(206,190,142,0),24 }, + { IPv4(206,190,192,0),19 }, + { IPv4(206,191,0,0),18 }, + { IPv4(206,191,64,0),18 }, + { IPv4(206,191,71,0),24 }, + { IPv4(206,191,77,0),24 }, + { IPv4(206,191,128,0),18 }, + { IPv4(206,191,158,0),24 }, + { IPv4(206,191,163,0),24 }, + { IPv4(206,191,182,0),23 }, + { IPv4(206,191,187,0),24 }, + { IPv4(206,193,128,0),18 }, + { IPv4(206,194,192,0),18 }, + { IPv4(206,195,3,0),24 }, + { IPv4(206,195,4,0),24 }, + { IPv4(206,195,19,0),24 }, + { IPv4(206,195,64,0),19 }, + { IPv4(206,196,64,0),19 }, + { IPv4(206,196,128,0),19 }, + { IPv4(206,196,253,0),24 }, + { IPv4(206,196,254,0),24 }, + { IPv4(206,197,23,0),24 }, + { IPv4(206,197,40,0),23 }, + { IPv4(206,197,43,0),24 }, + { IPv4(206,197,65,0),24 }, + { IPv4(206,197,69,0),24 }, + { IPv4(206,197,74,0),24 }, + { IPv4(206,197,77,0),24 }, + { IPv4(206,197,81,0),24 }, + { IPv4(206,197,104,0),24 }, + { IPv4(206,197,117,0),24 }, + { IPv4(206,197,121,0),24 }, + { IPv4(206,197,144,0),24 }, + { IPv4(206,197,156,0),24 }, + { IPv4(206,197,194,0),24 }, + { IPv4(206,197,206,0),24 }, + { IPv4(206,197,217,0),24 }, + { IPv4(206,197,218,0),24 }, + { IPv4(206,197,219,0),24 }, + { IPv4(206,197,236,0),24 }, + { IPv4(206,197,240,0),24 }, + { IPv4(206,197,251,0),24 }, + { IPv4(206,198,0,0),16 }, + { IPv4(206,199,16,0),24 }, + { IPv4(206,201,0,0),20 }, + { IPv4(206,201,17,0),24 }, + { IPv4(206,201,18,0),24 }, + { IPv4(206,201,19,0),24 }, + { IPv4(206,201,20,0),24 }, + { IPv4(206,201,32,0),20 }, + { IPv4(206,201,172,0),24 }, + { IPv4(206,201,173,0),24 }, + { IPv4(206,201,174,0),24 }, + { IPv4(206,201,192,0),20 }, + { IPv4(206,201,240,0),20 }, + { IPv4(206,202,28,0),24 }, + { IPv4(206,203,171,0),24 }, + { IPv4(206,204,0,0),16 }, + { IPv4(206,206,18,0),24 }, + { IPv4(206,206,24,0),21 }, + { IPv4(206,206,192,0),18 }, + { IPv4(206,206,224,0),22 }, + { IPv4(206,206,228,0),24 }, + { IPv4(206,206,236,0),22 }, + { IPv4(206,207,0,0),18 }, + { IPv4(206,207,16,0),21 }, + { IPv4(206,207,40,0),24 }, + { IPv4(206,207,42,0),24 }, + { IPv4(206,207,45,0),24 }, + { IPv4(206,207,46,0),24 }, + { IPv4(206,207,49,0),24 }, + { IPv4(206,207,50,0),24 }, + { IPv4(206,207,51,0),24 }, + { IPv4(206,207,52,0),22 }, + { IPv4(206,207,64,0),21 }, + { IPv4(206,207,72,0),23 }, + { IPv4(206,207,74,0),24 }, + { IPv4(206,207,96,0),24 }, + { IPv4(206,207,97,0),24 }, + { IPv4(206,207,100,0),24 }, + { IPv4(206,207,113,0),24 }, + { IPv4(206,207,114,0),23 }, + { IPv4(206,207,118,0),24 }, + { IPv4(206,207,119,0),24 }, + { IPv4(206,207,120,0),23 }, + { IPv4(206,207,122,0),24 }, + { IPv4(206,207,128,0),18 }, + { IPv4(206,207,136,0),24 }, + { IPv4(206,207,145,0),24 }, + { IPv4(206,207,153,0),24 }, + { IPv4(206,207,154,0),24 }, + { IPv4(206,207,160,0),20 }, + { IPv4(206,207,186,0),23 }, + { IPv4(206,207,188,0),23 }, + { IPv4(206,207,190,0),23 }, + { IPv4(206,207,192,0),18 }, + { IPv4(206,207,200,0),24 }, + { IPv4(206,207,224,0),19 }, + { IPv4(206,208,2,0),24 }, + { IPv4(206,208,3,0),24 }, + { IPv4(206,208,6,0),24 }, + { IPv4(206,208,88,0),21 }, + { IPv4(206,208,152,0),24 }, + { IPv4(206,208,168,0),24 }, + { IPv4(206,208,169,0),24 }, + { IPv4(206,208,184,0),21 }, + { IPv4(206,208,224,0),21 }, + { IPv4(206,208,236,0),24 }, + { IPv4(206,208,237,0),24 }, + { IPv4(206,208,238,0),24 }, + { IPv4(206,208,239,0),24 }, + { IPv4(206,208,240,0),22 }, + { IPv4(206,208,244,0),23 }, + { IPv4(206,208,246,0),24 }, + { IPv4(206,208,247,0),24 }, + { IPv4(206,209,73,0),24 }, + { IPv4(206,209,96,0),20 }, + { IPv4(206,209,210,0),24 }, + { IPv4(206,209,225,0),24 }, + { IPv4(206,210,26,0),24 }, + { IPv4(206,210,27,0),24 }, + { IPv4(206,210,28,0),24 }, + { IPv4(206,210,29,0),24 }, + { IPv4(206,210,30,0),24 }, + { IPv4(206,210,32,0),19 }, + { IPv4(206,210,64,0),19 }, + { IPv4(206,210,128,0),19 }, + { IPv4(206,211,32,0),19 }, + { IPv4(206,211,96,0),20 }, + { IPv4(206,211,112,0),21 }, + { IPv4(206,211,120,0),24 }, + { IPv4(206,211,121,0),24 }, + { IPv4(206,211,122,0),24 }, + { IPv4(206,213,64,0),18 }, + { IPv4(206,213,128,0),18 }, + { IPv4(206,213,192,0),19 }, + { IPv4(206,213,192,0),18 }, + { IPv4(206,213,209,0),24 }, + { IPv4(206,213,224,0),19 }, + { IPv4(206,213,251,0),24 }, + { IPv4(206,213,252,0),24 }, + { IPv4(206,214,0,0),15 }, + { IPv4(206,214,13,0),24 }, + { IPv4(206,214,25,0),24 }, + { IPv4(206,214,26,0),24 }, + { IPv4(206,214,31,0),24 }, + { IPv4(206,214,33,0),24 }, + { IPv4(206,214,58,0),24 }, + { IPv4(206,214,126,0),24 }, + { IPv4(206,214,172,0),24 }, + { IPv4(206,214,209,0),24 }, + { IPv4(206,215,65,0),24 }, + { IPv4(206,215,66,0),24 }, + { IPv4(206,215,140,0),24 }, + { IPv4(206,215,142,0),24 }, + { IPv4(206,215,143,0),24 }, + { IPv4(206,215,145,0),24 }, + { IPv4(206,215,167,0),24 }, + { IPv4(206,215,217,0),24 }, + { IPv4(206,215,223,0),24 }, + { IPv4(206,215,228,0),24 }, + { IPv4(206,215,229,0),24 }, + { IPv4(206,215,230,0),24 }, + { IPv4(206,215,231,0),24 }, + { IPv4(206,215,236,0),24 }, + { IPv4(206,215,237,0),24 }, + { IPv4(206,216,0,0),15 }, + { IPv4(206,216,1,0),24 }, + { IPv4(206,216,6,0),24 }, + { IPv4(206,216,53,0),24 }, + { IPv4(206,216,77,0),24 }, + { IPv4(206,216,103,0),24 }, + { IPv4(206,217,36,0),24 }, + { IPv4(206,217,102,0),24 }, + { IPv4(206,217,121,0),24 }, + { IPv4(206,217,207,0),24 }, + { IPv4(206,217,239,0),24 }, + { IPv4(206,219,0,0),20 }, + { IPv4(206,219,16,0),21 }, + { IPv4(206,219,35,0),24 }, + { IPv4(206,219,36,0),22 }, + { IPv4(206,219,44,0),23 }, + { IPv4(206,219,49,0),24 }, + { IPv4(206,219,50,0),23 }, + { IPv4(206,219,52,0),23 }, + { IPv4(206,219,64,0),19 }, + { IPv4(206,219,96,0),19 }, + { IPv4(206,219,128,0),18 }, + { IPv4(206,219,192,0),18 }, + { IPv4(206,220,28,0),24 }, + { IPv4(206,220,30,0),24 }, + { IPv4(206,220,64,0),24 }, + { IPv4(206,220,65,0),24 }, + { IPv4(206,220,136,0),22 }, + { IPv4(206,220,168,0),22 }, + { IPv4(206,220,224,0),22 }, + { IPv4(206,221,35,0),24 }, + { IPv4(206,221,164,0),24 }, + { IPv4(206,221,165,0),24 }, + { IPv4(206,221,166,0),24 }, + { IPv4(206,221,167,0),24 }, + { IPv4(206,222,32,0),19 }, + { IPv4(206,222,64,0),19 }, + { IPv4(206,222,96,0),19 }, + { IPv4(206,222,224,0),19 }, + { IPv4(206,223,36,0),24 }, + { IPv4(206,223,39,0),24 }, + { IPv4(206,223,70,0),24 }, + { IPv4(206,223,80,0),24 }, + { IPv4(206,223,93,0),24 }, + { IPv4(206,223,102,0),24 }, + { IPv4(206,223,110,0),24 }, + { IPv4(206,223,132,0),24 }, + { IPv4(206,223,133,0),24 }, + { IPv4(206,224,32,0),19 }, + { IPv4(206,224,64,0),19 }, + { IPv4(206,225,32,0),19 }, + { IPv4(206,228,16,0),20 }, + { IPv4(206,228,64,0),19 }, + { IPv4(206,228,78,0),24 }, + { IPv4(206,228,139,0),24 }, + { IPv4(206,228,141,0),24 }, + { IPv4(206,228,142,0),24 }, + { IPv4(206,228,143,0),24 }, + { IPv4(206,228,186,0),24 }, + { IPv4(206,228,187,0),24 }, + { IPv4(206,229,125,0),24 }, + { IPv4(206,229,220,0),24 }, + { IPv4(206,229,221,0),24 }, + { IPv4(206,230,192,0),19 }, + { IPv4(206,230,221,0),24 }, + { IPv4(206,231,96,0),24 }, + { IPv4(206,231,192,0),19 }, + { IPv4(206,236,78,0),24 }, + { IPv4(206,236,79,0),24 }, + { IPv4(206,239,0,0),16 }, + { IPv4(206,239,10,0),24 }, + { IPv4(206,240,24,0),22 }, + { IPv4(206,241,181,0),24 }, + { IPv4(206,241,182,0),24 }, + { IPv4(206,243,128,0),23 }, + { IPv4(206,243,130,0),24 }, + { IPv4(206,245,128,0),18 }, + { IPv4(206,245,192,0),18 }, + { IPv4(206,245,195,0),24 }, + { IPv4(206,245,199,0),24 }, + { IPv4(206,245,233,0),24 }, + { IPv4(206,245,234,0),24 }, + { IPv4(206,245,235,0),24 }, + { IPv4(206,245,240,0),24 }, + { IPv4(206,245,243,0),24 }, + { IPv4(206,246,32,0),20 }, + { IPv4(206,246,46,0),23 }, + { IPv4(206,246,64,0),18 }, + { IPv4(206,249,14,0),24 }, + { IPv4(206,250,201,0),24 }, + { IPv4(206,251,25,0),24 }, + { IPv4(206,251,128,0),19 }, + { IPv4(206,251,192,0),24 }, + { IPv4(206,251,195,0),24 }, + { IPv4(206,251,196,0),22 }, + { IPv4(206,251,200,0),21 }, + { IPv4(206,251,208,0),24 }, + { IPv4(206,251,210,0),23 }, + { IPv4(206,251,212,0),24 }, + { IPv4(206,251,213,0),24 }, + { IPv4(206,251,214,0),24 }, + { IPv4(206,251,224,0),19 }, + { IPv4(206,252,64,0),18 }, + { IPv4(206,252,128,0),19 }, + { IPv4(206,252,149,128),26 }, + { IPv4(206,252,224,0),19 }, + { IPv4(206,253,64,0),19 }, + { IPv4(206,253,94,0),24 }, + { IPv4(206,253,192,0),19 }, + { IPv4(206,253,224,0),19 }, + { IPv4(207,0,32,0),21 }, + { IPv4(207,0,67,0),24 }, + { IPv4(207,0,68,0),23 }, + { IPv4(207,0,72,0),23 }, + { IPv4(207,0,74,0),24 }, + { IPv4(207,0,112,0),20 }, + { IPv4(207,0,224,0),20 }, + { IPv4(207,1,56,0),23 }, + { IPv4(207,1,96,0),21 }, + { IPv4(207,1,104,0),22 }, + { IPv4(207,1,108,0),23 }, + { IPv4(207,1,136,0),21 }, + { IPv4(207,1,144,0),21 }, + { IPv4(207,1,152,0),23 }, + { IPv4(207,1,201,0),24 }, + { IPv4(207,2,144,0),20 }, + { IPv4(207,2,218,0),24 }, + { IPv4(207,3,144,0),21 }, + { IPv4(207,3,192,0),19 }, + { IPv4(207,5,16,0),22 }, + { IPv4(207,5,40,0),21 }, + { IPv4(207,5,64,0),22 }, + { IPv4(207,5,68,0),22 }, + { IPv4(207,5,80,0),21 }, + { IPv4(207,5,88,0),21 }, + { IPv4(207,5,96,0),24 }, + { IPv4(207,5,97,0),24 }, + { IPv4(207,5,98,0),24 }, + { IPv4(207,5,99,0),24 }, + { IPv4(207,5,100,0),24 }, + { IPv4(207,5,101,0),24 }, + { IPv4(207,5,102,0),24 }, + { IPv4(207,5,103,0),24 }, + { IPv4(207,5,104,0),24 }, + { IPv4(207,5,105,0),24 }, + { IPv4(207,5,106,0),24 }, + { IPv4(207,5,107,0),24 }, + { IPv4(207,5,108,0),24 }, + { IPv4(207,5,109,0),24 }, + { IPv4(207,5,110,0),24 }, + { IPv4(207,5,111,0),24 }, + { IPv4(207,7,16,0),23 }, + { IPv4(207,7,64,0),20 }, + { IPv4(207,7,80,0),20 }, + { IPv4(207,8,0,0),17 }, + { IPv4(207,8,128,0),17 }, + { IPv4(207,8,133,0),24 }, + { IPv4(207,8,149,0),24 }, + { IPv4(207,8,164,0),22 }, + { IPv4(207,8,174,0),23 }, + { IPv4(207,8,196,0),24 }, + { IPv4(207,8,218,0),24 }, + { IPv4(207,8,234,0),24 }, + { IPv4(207,10,0,0),16 }, + { IPv4(207,10,4,0),24 }, + { IPv4(207,10,28,0),22 }, + { IPv4(207,10,44,0),22 }, + { IPv4(207,10,55,0),24 }, + { IPv4(207,10,136,0),24 }, + { IPv4(207,10,137,0),24 }, + { IPv4(207,10,138,0),24 }, + { IPv4(207,10,139,0),24 }, + { IPv4(207,10,140,0),24 }, + { IPv4(207,10,141,0),24 }, + { IPv4(207,10,142,0),24 }, + { IPv4(207,10,143,0),24 }, + { IPv4(207,10,161,0),24 }, + { IPv4(207,10,196,0),24 }, + { IPv4(207,10,197,0),24 }, + { IPv4(207,10,198,0),24 }, + { IPv4(207,10,199,0),24 }, + { IPv4(207,10,206,0),24 }, + { IPv4(207,11,0,0),17 }, + { IPv4(207,11,207,0),24 }, + { IPv4(207,11,208,0),24 }, + { IPv4(207,11,210,0),24 }, + { IPv4(207,12,0,0),20 }, + { IPv4(207,12,16,0),20 }, + { IPv4(207,12,19,0),24 }, + { IPv4(207,12,20,0),23 }, + { IPv4(207,12,22,0),24 }, + { IPv4(207,12,181,0),24 }, + { IPv4(207,13,14,0),24 }, + { IPv4(207,13,175,0),24 }, + { IPv4(207,13,230,0),24 }, + { IPv4(207,14,96,0),21 }, + { IPv4(207,14,97,0),24 }, + { IPv4(207,14,98,0),23 }, + { IPv4(207,14,100,0),24 }, + { IPv4(207,14,104,0),21 }, + { IPv4(207,14,109,0),24 }, + { IPv4(207,14,144,0),20 }, + { IPv4(207,14,160,0),24 }, + { IPv4(207,14,161,0),24 }, + { IPv4(207,14,192,0),20 }, + { IPv4(207,14,210,0),24 }, + { IPv4(207,14,211,0),24 }, + { IPv4(207,15,208,0),21 }, + { IPv4(207,16,46,0),24 }, + { IPv4(207,16,47,0),24 }, + { IPv4(207,16,68,0),24 }, + { IPv4(207,16,70,0),24 }, + { IPv4(207,16,71,0),24 }, + { IPv4(207,17,33,0),24 }, + { IPv4(207,17,34,0),24 }, + { IPv4(207,17,35,0),24 }, + { IPv4(207,17,37,0),24 }, + { IPv4(207,17,46,0),24 }, + { IPv4(207,17,47,0),24 }, + { IPv4(207,17,52,0),24 }, + { IPv4(207,17,53,0),24 }, + { IPv4(207,17,67,0),24 }, + { IPv4(207,17,191,0),24 }, + { IPv4(207,17,212,0),22 }, + { IPv4(207,18,112,0),22 }, + { IPv4(207,18,144,0),20 }, + { IPv4(207,18,184,0),24 }, + { IPv4(207,18,193,0),24 }, + { IPv4(207,19,96,0),21 }, + { IPv4(207,19,194,0),23 }, + { IPv4(207,20,0,0),16 }, + { IPv4(207,20,85,0),24 }, + { IPv4(207,20,127,0),24 }, + { IPv4(207,20,139,0),24 }, + { IPv4(207,21,0,0),17 }, + { IPv4(207,21,33,0),24 }, + { IPv4(207,21,34,0),24 }, + { IPv4(207,21,128,0),18 }, + { IPv4(207,22,64,0),18 }, + { IPv4(207,22,135,0),24 }, + { IPv4(207,25,68,0),24 }, + { IPv4(207,25,71,0),24 }, + { IPv4(207,25,79,0),24 }, + { IPv4(207,25,80,0),24 }, + { IPv4(207,25,182,0),24 }, + { IPv4(207,25,225,0),24 }, + { IPv4(207,25,248,0),21 }, + { IPv4(207,26,208,0),21 }, + { IPv4(207,26,230,0),23 }, + { IPv4(207,28,0,0),16 }, + { IPv4(207,28,96,0),20 }, + { IPv4(207,28,112,0),22 }, + { IPv4(207,28,116,0),23 }, + { IPv4(207,29,192,0),20 }, + { IPv4(207,31,0,0),18 }, + { IPv4(207,31,64,0),18 }, + { IPv4(207,31,68,0),24 }, + { IPv4(207,31,72,0),24 }, + { IPv4(207,31,73,0),24 }, + { IPv4(207,31,75,0),24 }, + { IPv4(207,31,81,0),24 }, + { IPv4(207,31,82,0),24 }, + { IPv4(207,31,84,0),24 }, + { IPv4(207,31,92,0),23 }, + { IPv4(207,31,96,0),21 }, + { IPv4(207,31,118,0),24 }, + { IPv4(207,31,122,0),24 }, + { IPv4(207,31,123,0),24 }, + { IPv4(207,31,128,0),18 }, + { IPv4(207,31,192,0),18 }, + { IPv4(207,32,0,0),18 }, + { IPv4(207,32,35,0),24 }, + { IPv4(207,32,64,0),18 }, + { IPv4(207,33,0,0),16 }, + { IPv4(207,33,48,0),24 }, + { IPv4(207,33,49,0),24 }, + { IPv4(207,33,50,0),24 }, + { IPv4(207,33,51,0),24 }, + { IPv4(207,33,55,0),24 }, + { IPv4(207,33,112,0),24 }, + { IPv4(207,33,113,0),24 }, + { IPv4(207,33,114,0),24 }, + { IPv4(207,34,50,0),24 }, + { IPv4(207,36,0,0),16 }, + { IPv4(207,36,32,0),19 }, + { IPv4(207,36,64,0),19 }, + { IPv4(207,36,96,0),19 }, + { IPv4(207,36,202,0),23 }, + { IPv4(207,36,210,0),23 }, + { IPv4(207,36,214,0),23 }, + { IPv4(207,36,240,0),23 }, + { IPv4(207,36,246,0),23 }, + { IPv4(207,36,248,0),23 }, + { IPv4(207,36,250,0),23 }, + { IPv4(207,37,34,0),24 }, + { IPv4(207,38,102,0),24 }, + { IPv4(207,38,128,0),17 }, + { IPv4(207,40,5,0),24 }, + { IPv4(207,40,6,0),24 }, + { IPv4(207,40,14,0),23 }, + { IPv4(207,40,105,0),24 }, + { IPv4(207,40,196,0),23 }, + { IPv4(207,41,144,0),20 }, + { IPv4(207,42,0,0),20 }, + { IPv4(207,42,48,0),24 }, + { IPv4(207,42,153,0),24 }, + { IPv4(207,42,200,0),23 }, + { IPv4(207,42,238,0),24 }, + { IPv4(207,43,71,0),24 }, + { IPv4(207,43,120,0),22 }, + { IPv4(207,43,180,0),22 }, + { IPv4(207,43,208,0),21 }, + { IPv4(207,44,0,0),17 }, + { IPv4(207,44,20,0),24 }, + { IPv4(207,44,24,0),23 }, + { IPv4(207,44,30,0),23 }, + { IPv4(207,44,95,0),24 }, + { IPv4(207,45,40,0),24 }, + { IPv4(207,45,41,0),24 }, + { IPv4(207,45,66,0),24 }, + { IPv4(207,45,67,0),24 }, + { IPv4(207,45,68,0),24 }, + { IPv4(207,45,70,0),24 }, + { IPv4(207,45,71,0),24 }, + { IPv4(207,45,96,0),21 }, + { IPv4(207,45,130,0),24 }, + { IPv4(207,45,240,0),23 }, + { IPv4(207,46,0,0),19 }, + { IPv4(207,46,32,0),19 }, + { IPv4(207,46,64,0),19 }, + { IPv4(207,46,128,0),18 }, + { IPv4(207,46,192,0),18 }, + { IPv4(207,48,19,0),24 }, + { IPv4(207,48,34,0),23 }, + { IPv4(207,48,42,0),23 }, + { IPv4(207,48,186,0),23 }, + { IPv4(207,48,188,0),23 }, + { IPv4(207,48,190,0),24 }, + { IPv4(207,49,20,0),22 }, + { IPv4(207,49,40,0),23 }, + { IPv4(207,49,128,0),23 }, + { IPv4(207,49,130,0),23 }, + { IPv4(207,49,132,0),23 }, + { IPv4(207,49,156,0),24 }, + { IPv4(207,49,157,0),24 }, + { IPv4(207,49,158,0),24 }, + { IPv4(207,49,159,0),24 }, + { IPv4(207,50,32,0),22 }, + { IPv4(207,50,36,0),22 }, + { IPv4(207,50,40,0),22 }, + { IPv4(207,50,44,0),22 }, + { IPv4(207,50,48,0),21 }, + { IPv4(207,50,56,0),24 }, + { IPv4(207,50,57,0),24 }, + { IPv4(207,50,58,0),23 }, + { IPv4(207,50,60,0),22 }, + { IPv4(207,50,76,0),24 }, + { IPv4(207,50,80,0),22 }, + { IPv4(207,50,84,0),22 }, + { IPv4(207,50,88,0),22 }, + { IPv4(207,50,92,0),22 }, + { IPv4(207,50,96,0),20 }, + { IPv4(207,50,220,0),22 }, + { IPv4(207,50,222,0),24 }, + { IPv4(207,50,248,0),22 }, + { IPv4(207,51,62,0),23 }, + { IPv4(207,51,64,0),24 }, + { IPv4(207,51,69,0),24 }, + { IPv4(207,51,74,0),24 }, + { IPv4(207,51,78,0),24 }, + { IPv4(207,51,89,0),24 }, + { IPv4(207,51,90,0),23 }, + { IPv4(207,51,92,0),24 }, + { IPv4(207,51,93,0),24 }, + { IPv4(207,51,94,0),24 }, + { IPv4(207,51,148,0),23 }, + { IPv4(207,51,157,0),24 }, + { IPv4(207,51,216,0),24 }, + { IPv4(207,52,42,0),24 }, + { IPv4(207,53,39,0),24 }, + { IPv4(207,53,80,0),24 }, + { IPv4(207,53,87,0),24 }, + { IPv4(207,53,128,0),18 }, + { IPv4(207,53,172,0),24 }, + { IPv4(207,53,183,0),24 }, + { IPv4(207,53,184,0),23 }, + { IPv4(207,53,224,0),20 }, + { IPv4(207,54,32,0),19 }, + { IPv4(207,54,96,0),24 }, + { IPv4(207,54,97,0),24 }, + { IPv4(207,54,98,0),24 }, + { IPv4(207,54,99,0),24 }, + { IPv4(207,54,101,0),24 }, + { IPv4(207,54,102,0),24 }, + { IPv4(207,55,128,0),18 }, + { IPv4(207,55,192,0),19 }, + { IPv4(207,56,0,0),15 }, + { IPv4(207,58,0,0),17 }, + { IPv4(207,61,42,0),23 }, + { IPv4(207,61,146,0),24 }, + { IPv4(207,62,0,0),16 }, + { IPv4(207,63,0,0),16 }, + { IPv4(207,65,0,0),16 }, + { IPv4(207,65,0,0),18 }, + { IPv4(207,65,64,0),18 }, + { IPv4(207,65,100,0),24 }, + { IPv4(207,65,128,0),18 }, + { IPv4(207,65,192,0),18 }, + { IPv4(207,66,11,0),24 }, + { IPv4(207,66,161,0),24 }, + { IPv4(207,66,171,0),24 }, + { IPv4(207,66,186,0),24 }, + { IPv4(207,66,193,0),24 }, + { IPv4(207,66,228,0),24 }, + { IPv4(207,67,0,0),17 }, + { IPv4(207,67,104,0),24 }, + { IPv4(207,67,107,0),24 }, + { IPv4(207,67,128,0),17 }, + { IPv4(207,67,130,0),24 }, + { IPv4(207,67,137,0),24 }, + { IPv4(207,67,142,0),23 }, + { IPv4(207,67,200,0),21 }, + { IPv4(207,67,215,0),24 }, + { IPv4(207,67,216,0),21 }, + { IPv4(207,67,229,0),24 }, + { IPv4(207,68,128,0),20 }, + { IPv4(207,68,160,0),19 }, + { IPv4(207,69,0,0),16 }, + { IPv4(207,70,27,0),24 }, + { IPv4(207,70,35,0),24 }, + { IPv4(207,70,40,0),24 }, + { IPv4(207,70,42,0),24 }, + { IPv4(207,70,44,0),23 }, + { IPv4(207,70,53,0),24 }, + { IPv4(207,70,64,0),18 }, + { IPv4(207,70,73,0),24 }, + { IPv4(207,70,128,0),19 }, + { IPv4(207,70,160,0),19 }, + { IPv4(207,70,170,0),24 }, + { IPv4(207,71,8,0),24 }, + { IPv4(207,71,44,0),23 }, + { IPv4(207,71,64,0),18 }, + { IPv4(207,71,192,0),18 }, + { IPv4(207,74,176,0),21 }, + { IPv4(207,74,184,0),22 }, + { IPv4(207,75,116,0),22 }, + { IPv4(207,75,120,0),22 }, + { IPv4(207,75,124,0),23 }, + { IPv4(207,75,126,0),24 }, + { IPv4(207,76,72,0),21 }, + { IPv4(207,76,168,0),24 }, + { IPv4(207,76,169,0),24 }, + { IPv4(207,76,170,0),24 }, + { IPv4(207,76,171,0),24 }, + { IPv4(207,76,172,0),24 }, + { IPv4(207,76,173,0),24 }, + { IPv4(207,76,174,0),24 }, + { IPv4(207,76,175,0),24 }, + { IPv4(207,77,72,0),24 }, + { IPv4(207,77,83,0),24 }, + { IPv4(207,77,220,0),24 }, + { IPv4(207,78,8,0),21 }, + { IPv4(207,78,95,0),24 }, + { IPv4(207,78,104,0),24 }, + { IPv4(207,82,250,0),23 }, + { IPv4(207,82,252,0),23 }, + { IPv4(207,83,64,0),19 }, + { IPv4(207,84,253,0),24 }, + { IPv4(207,86,78,0),23 }, + { IPv4(207,86,86,0),23 }, + { IPv4(207,86,172,0),22 }, + { IPv4(207,86,244,0),22 }, + { IPv4(207,87,80,0),22 }, + { IPv4(207,87,162,0),24 }, + { IPv4(207,87,182,0),23 }, + { IPv4(207,87,184,0),23 }, + { IPv4(207,87,205,0),24 }, + { IPv4(207,87,214,0),24 }, + { IPv4(207,88,0,0),16 }, + { IPv4(207,88,25,0),24 }, + { IPv4(207,88,57,0),24 }, + { IPv4(207,88,58,0),24 }, + { IPv4(207,88,192,0),24 }, + { IPv4(207,88,193,0),24 }, + { IPv4(207,89,163,0),24 }, + { IPv4(207,89,165,0),24 }, + { IPv4(207,89,166,0),23 }, + { IPv4(207,90,0,0),18 }, + { IPv4(207,90,64,0),18 }, + { IPv4(207,90,192,0),18 }, + { IPv4(207,91,64,0),18 }, + { IPv4(207,91,106,0),23 }, + { IPv4(207,91,108,0),23 }, + { IPv4(207,92,0,0),14 }, + { IPv4(207,92,175,0),24 }, + { IPv4(207,93,45,0),24 }, + { IPv4(207,93,60,0),24 }, + { IPv4(207,93,93,0),24 }, + { IPv4(207,93,132,0),24 }, + { IPv4(207,94,33,0),24 }, + { IPv4(207,94,100,0),24 }, + { IPv4(207,94,225,0),24 }, + { IPv4(207,94,229,0),24 }, + { IPv4(207,96,0,0),17 }, + { IPv4(207,96,128,0),17 }, + { IPv4(207,97,0,0),17 }, + { IPv4(207,97,61,0),24 }, + { IPv4(207,99,0,0),17 }, + { IPv4(207,99,22,0),24 }, + { IPv4(207,99,128,0),17 }, + { IPv4(207,100,24,0),23 }, + { IPv4(207,100,32,0),20 }, + { IPv4(207,101,0,0),20 }, + { IPv4(207,105,237,0),24 }, + { IPv4(207,105,239,0),24 }, + { IPv4(207,106,0,0),16 }, + { IPv4(207,106,0,0),17 }, + { IPv4(207,106,31,0),24 }, + { IPv4(207,106,41,0),24 }, + { IPv4(207,106,42,0),24 }, + { IPv4(207,106,45,0),24 }, + { IPv4(207,106,49,0),24 }, + { IPv4(207,106,54,0),23 }, + { IPv4(207,106,84,0),24 }, + { IPv4(207,106,119,0),24 }, + { IPv4(207,106,121,0),24 }, + { IPv4(207,106,128,0),17 }, + { IPv4(207,106,167,0),24 }, + { IPv4(207,107,37,0),24 }, + { IPv4(207,107,134,0),24 }, + { IPv4(207,108,146,0),24 }, + { IPv4(207,108,195,0),24 }, + { IPv4(207,109,20,0),24 }, + { IPv4(207,110,0,0),18 }, + { IPv4(207,111,19,0),24 }, + { IPv4(207,111,20,0),24 }, + { IPv4(207,111,22,0),23 }, + { IPv4(207,111,24,0),23 }, + { IPv4(207,111,64,0),18 }, + { IPv4(207,111,160,0),20 }, + { IPv4(207,111,192,0),18 }, + { IPv4(207,112,140,0),22 }, + { IPv4(207,112,156,0),22 }, + { IPv4(207,112,192,0),21 }, + { IPv4(207,112,204,0),22 }, + { IPv4(207,112,236,0),23 }, + { IPv4(207,113,0,0),17 }, + { IPv4(207,113,128,0),17 }, + { IPv4(207,113,129,0),24 }, + { IPv4(207,113,130,0),24 }, + { IPv4(207,113,134,0),24 }, + { IPv4(207,113,140,0),24 }, + { IPv4(207,113,141,0),24 }, + { IPv4(207,113,155,0),24 }, + { IPv4(207,113,156,0),24 }, + { IPv4(207,113,167,0),24 }, + { IPv4(207,113,201,0),24 }, + { IPv4(207,113,222,0),24 }, + { IPv4(207,114,0,0),17 }, + { IPv4(207,114,128,0),17 }, + { IPv4(207,114,131,0),24 }, + { IPv4(207,114,134,0),24 }, + { IPv4(207,114,135,0),24 }, + { IPv4(207,114,140,0),24 }, + { IPv4(207,114,141,0),24 }, + { IPv4(207,114,142,0),24 }, + { IPv4(207,114,143,0),24 }, + { IPv4(207,114,146,0),24 }, + { IPv4(207,114,147,0),24 }, + { IPv4(207,114,148,0),24 }, + { IPv4(207,114,149,0),24 }, + { IPv4(207,114,150,0),24 }, + { IPv4(207,114,151,0),24 }, + { IPv4(207,114,153,0),24 }, + { IPv4(207,114,160,0),24 }, + { IPv4(207,114,162,0),24 }, + { IPv4(207,114,168,0),24 }, + { IPv4(207,114,170,0),24 }, + { IPv4(207,114,171,0),24 }, + { IPv4(207,114,177,0),24 }, + { IPv4(207,114,186,0),23 }, + { IPv4(207,114,193,0),24 }, + { IPv4(207,114,199,0),24 }, + { IPv4(207,114,201,0),24 }, + { IPv4(207,114,202,0),24 }, + { IPv4(207,114,207,0),24 }, + { IPv4(207,114,208,0),23 }, + { IPv4(207,114,212,0),24 }, + { IPv4(207,114,213,0),24 }, + { IPv4(207,114,214,0),24 }, + { IPv4(207,114,215,0),24 }, + { IPv4(207,114,221,0),24 }, + { IPv4(207,114,232,0),24 }, + { IPv4(207,114,236,0),24 }, + { IPv4(207,114,241,0),24 }, + { IPv4(207,114,248,0),21 }, + { IPv4(207,114,253,0),24 }, + { IPv4(207,115,0,0),18 }, + { IPv4(207,115,63,0),24 }, + { IPv4(207,115,64,0),19 }, + { IPv4(207,115,235,0),24 }, + { IPv4(207,117,0,0),16 }, + { IPv4(207,117,8,0),24 }, + { IPv4(207,117,33,0),24 }, + { IPv4(207,117,42,0),24 }, + { IPv4(207,117,66,0),24 }, + { IPv4(207,117,80,0),24 }, + { IPv4(207,117,106,0),24 }, + { IPv4(207,117,162,0),24 }, + { IPv4(207,117,210,0),24 }, + { IPv4(207,117,246,0),24 }, + { IPv4(207,120,28,0),22 }, + { IPv4(207,120,109,0),24 }, + { IPv4(207,120,160,0),23 }, + { IPv4(207,120,198,0),23 }, + { IPv4(207,120,200,0),24 }, + { IPv4(207,120,213,0),24 }, + { IPv4(207,120,214,0),23 }, + { IPv4(207,122,32,0),24 }, + { IPv4(207,123,13,0),24 }, + { IPv4(207,123,219,0),24 }, + { IPv4(207,124,75,0),24 }, + { IPv4(207,124,89,0),24 }, + { IPv4(207,124,90,0),24 }, + { IPv4(207,124,114,0),24 }, + { IPv4(207,124,115,0),24 }, + { IPv4(207,124,144,0),22 }, + { IPv4(207,124,171,0),24 }, + { IPv4(207,124,172,0),22 }, + { IPv4(207,124,176,0),23 }, + { IPv4(207,124,231,0),24 }, + { IPv4(207,126,96,0),19 }, + { IPv4(207,126,97,0),24 }, + { IPv4(207,126,128,0),24 }, + { IPv4(207,126,129,0),24 }, + { IPv4(207,126,130,0),24 }, + { IPv4(207,126,131,0),24 }, + { IPv4(207,126,132,0),24 }, + { IPv4(207,126,133,0),24 }, + { IPv4(207,126,134,0),24 }, + { IPv4(207,126,135,0),24 }, + { IPv4(207,127,0,0),16 }, + { IPv4(207,127,69,0),24 }, + { IPv4(207,127,96,0),24 }, + { IPv4(207,127,97,0),24 }, + { IPv4(207,127,98,0),24 }, + { IPv4(207,127,99,0),24 }, + { IPv4(207,127,104,0),24 }, + { IPv4(207,127,105,0),24 }, + { IPv4(207,127,106,0),24 }, + { IPv4(207,127,107,0),24 }, + { IPv4(207,127,108,0),24 }, + { IPv4(207,127,109,0),24 }, + { IPv4(207,127,110,0),24 }, + { IPv4(207,127,111,0),24 }, + { IPv4(207,127,113,0),24 }, + { IPv4(207,127,116,0),24 }, + { IPv4(207,127,117,0),24 }, + { IPv4(207,127,120,0),21 }, + { IPv4(207,127,128,0),24 }, + { IPv4(207,127,135,0),24 }, + { IPv4(207,127,138,0),24 }, + { IPv4(207,127,151,0),24 }, + { IPv4(207,127,152,0),24 }, + { IPv4(207,127,210,0),24 }, + { IPv4(207,127,211,0),24 }, + { IPv4(207,127,224,0),22 }, + { IPv4(207,127,231,0),24 }, + { IPv4(207,127,237,0),24 }, + { IPv4(207,127,238,0),24 }, + { IPv4(207,127,239,0),24 }, + { IPv4(207,128,0,0),14 }, + { IPv4(207,132,0,0),19 }, + { IPv4(207,132,32,0),19 }, + { IPv4(207,132,37,0),24 }, + { IPv4(207,132,38,0),24 }, + { IPv4(207,132,64,0),18 }, + { IPv4(207,132,72,0),24 }, + { IPv4(207,132,82,0),24 }, + { IPv4(207,132,89,0),24 }, + { IPv4(207,132,98,0),24 }, + { IPv4(207,132,99,0),24 }, + { IPv4(207,132,102,0),24 }, + { IPv4(207,132,106,0),24 }, + { IPv4(207,132,128,0),17 }, + { IPv4(207,132,136,0),22 }, + { IPv4(207,132,144,0),22 }, + { IPv4(207,132,152,0),22 }, + { IPv4(207,132,156,0),22 }, + { IPv4(207,132,164,0),22 }, + { IPv4(207,132,168,0),22 }, + { IPv4(207,132,226,0),24 }, + { IPv4(207,132,227,0),24 }, + { IPv4(207,132,228,0),22 }, + { IPv4(207,132,230,0),24 }, + { IPv4(207,132,236,0),24 }, + { IPv4(207,132,237,0),24 }, + { IPv4(207,132,238,0),24 }, + { IPv4(207,132,239,0),24 }, + { IPv4(207,132,254,0),24 }, + { IPv4(207,133,0,0),16 }, + { IPv4(207,133,87,0),24 }, + { IPv4(207,133,93,0),24 }, + { IPv4(207,133,121,0),24 }, + { IPv4(207,133,122,0),24 }, + { IPv4(207,133,141,0),24 }, + { IPv4(207,133,142,0),24 }, + { IPv4(207,133,152,0),24 }, + { IPv4(207,133,153,0),24 }, + { IPv4(207,133,179,0),24 }, + { IPv4(207,133,186,0),24 }, + { IPv4(207,133,191,0),24 }, + { IPv4(207,133,211,0),24 }, + { IPv4(207,133,224,0),24 }, + { IPv4(207,133,225,0),24 }, + { IPv4(207,133,226,0),24 }, + { IPv4(207,133,227,0),24 }, + { IPv4(207,133,228,0),24 }, + { IPv4(207,133,229,0),24 }, + { IPv4(207,133,230,0),24 }, + { IPv4(207,133,237,0),24 }, + { IPv4(207,133,238,0),24 }, + { IPv4(207,133,239,0),24 }, + { IPv4(207,135,64,0),18 }, + { IPv4(207,135,128,0),19 }, + { IPv4(207,136,128,0),19 }, + { IPv4(207,136,192,0),18 }, + { IPv4(207,137,0,0),16 }, + { IPv4(207,137,0,0),20 }, + { IPv4(207,137,52,0),24 }, + { IPv4(207,137,53,0),24 }, + { IPv4(207,137,54,0),24 }, + { IPv4(207,137,103,0),24 }, + { IPv4(207,137,184,0),22 }, + { IPv4(207,140,0,0),15 }, + { IPv4(207,140,30,0),23 }, + { IPv4(207,140,66,0),24 }, + { IPv4(207,140,80,0),24 }, + { IPv4(207,140,140,0),24 }, + { IPv4(207,140,149,0),24 }, + { IPv4(207,140,191,0),24 }, + { IPv4(207,140,224,0),21 }, + { IPv4(207,140,250,0),24 }, + { IPv4(207,141,37,0),24 }, + { IPv4(207,141,56,0),22 }, + { IPv4(207,141,150,0),24 }, + { IPv4(207,144,0,0),16 }, + { IPv4(207,148,192,0),19 }, + { IPv4(207,149,14,0),24 }, + { IPv4(207,149,47,0),24 }, + { IPv4(207,149,51,0),24 }, + { IPv4(207,149,52,0),22 }, + { IPv4(207,149,81,0),24 }, + { IPv4(207,149,113,0),24 }, + { IPv4(207,149,115,0),24 }, + { IPv4(207,149,192,0),22 }, + { IPv4(207,149,230,0),23 }, + { IPv4(207,150,0,0),17 }, + { IPv4(207,150,128,0),19 }, + { IPv4(207,150,224,0),20 }, + { IPv4(207,151,146,0),23 }, + { IPv4(207,151,152,0),21 }, + { IPv4(207,151,160,0),21 }, + { IPv4(207,152,64,0),18 }, + { IPv4(207,152,128,0),19 }, + { IPv4(207,152,128,0),18 }, + { IPv4(207,152,152,0),24 }, + { IPv4(207,152,166,0),24 }, + { IPv4(207,152,168,0),24 }, + { IPv4(207,152,169,0),24 }, + { IPv4(207,152,170,0),24 }, + { IPv4(207,152,171,0),24 }, + { IPv4(207,152,172,0),24 }, + { IPv4(207,153,0,0),18 }, + { IPv4(207,153,64,0),18 }, + { IPv4(207,153,67,0),24 }, + { IPv4(207,153,68,0),24 }, + { IPv4(207,153,90,0),24 }, + { IPv4(207,153,92,0),24 }, + { IPv4(207,153,93,0),24 }, + { IPv4(207,153,104,0),24 }, + { IPv4(207,153,107,0),24 }, + { IPv4(207,153,108,0),24 }, + { IPv4(207,153,110,0),24 }, + { IPv4(207,153,111,0),24 }, + { IPv4(207,153,112,0),24 }, + { IPv4(207,153,115,0),24 }, + { IPv4(207,153,122,0),24 }, + { IPv4(207,153,123,0),24 }, + { IPv4(207,153,127,0),24 }, + { IPv4(207,153,128,0),18 }, + { IPv4(207,153,192,0),18 }, + { IPv4(207,154,0,0),18 }, + { IPv4(207,155,0,0),16 }, + { IPv4(207,155,0,0),17 }, + { IPv4(207,155,128,0),17 }, + { IPv4(207,155,140,0),22 }, + { IPv4(207,156,128,0),17 }, + { IPv4(207,157,128,0),17 }, + { IPv4(207,158,0,0),18 }, + { IPv4(207,158,192,0),18 }, + { IPv4(207,159,0,0),18 }, + { IPv4(207,159,192,0),18 }, + { IPv4(207,160,0,0),17 }, + { IPv4(207,160,128,0),18 }, + { IPv4(207,160,192,0),19 }, + { IPv4(207,160,224,0),19 }, + { IPv4(207,161,6,0),24 }, + { IPv4(207,161,67,0),24 }, + { IPv4(207,161,84,0),24 }, + { IPv4(207,161,104,0),24 }, + { IPv4(207,161,106,0),24 }, + { IPv4(207,161,108,0),24 }, + { IPv4(207,161,149,0),24 }, + { IPv4(207,161,150,0),24 }, + { IPv4(207,161,188,0),22 }, + { IPv4(207,161,224,0),21 }, + { IPv4(207,161,241,0),24 }, + { IPv4(207,165,0,0),16 }, + { IPv4(207,167,130,0),23 }, + { IPv4(207,168,128,0),19 }, + { IPv4(207,170,128,0),18 }, + { IPv4(207,170,192,0),21 }, + { IPv4(207,170,200,0),21 }, + { IPv4(207,170,208,0),21 }, + { IPv4(207,170,216,0),21 }, + { IPv4(207,170,216,0),23 }, + { IPv4(207,170,218,0),24 }, + { IPv4(207,170,222,0),24 }, + { IPv4(207,170,224,0),21 }, + { IPv4(207,170,227,0),24 }, + { IPv4(207,170,228,0),24 }, + { IPv4(207,170,229,0),24 }, + { IPv4(207,170,230,0),23 }, + { IPv4(207,170,230,0),24 }, + { IPv4(207,170,232,0),21 }, + { IPv4(207,170,232,0),24 }, + { IPv4(207,170,237,0),24 }, + { IPv4(207,170,239,0),24 }, + { IPv4(207,170,240,0),21 }, + { IPv4(207,170,243,0),24 }, + { IPv4(207,170,248,0),21 }, + { IPv4(207,171,64,0),18 }, + { IPv4(207,171,160,0),20 }, + { IPv4(207,171,176,0),20 }, + { IPv4(207,172,0,0),16 }, + { IPv4(207,173,128,0),20 }, + { IPv4(207,173,224,0),24 }, + { IPv4(207,173,229,0),24 }, + { IPv4(207,173,230,0),24 }, + { IPv4(207,174,0,0),17 }, + { IPv4(207,174,6,0),24 }, + { IPv4(207,174,69,0),24 }, + { IPv4(207,174,72,0),24 }, + { IPv4(207,174,103,0),24 }, + { IPv4(207,174,128,0),18 }, + { IPv4(207,174,134,0),24 }, + { IPv4(207,174,137,0),24 }, + { IPv4(207,174,138,0),24 }, + { IPv4(207,174,140,0),24 }, + { IPv4(207,174,142,0),24 }, + { IPv4(207,174,143,0),24 }, + { IPv4(207,174,157,0),24 }, + { IPv4(207,174,172,0),24 }, + { IPv4(207,174,175,0),24 }, + { IPv4(207,174,178,0),24 }, + { IPv4(207,174,183,0),24 }, + { IPv4(207,174,184,0),24 }, + { IPv4(207,174,192,0),18 }, + { IPv4(207,174,201,0),24 }, + { IPv4(207,174,203,0),24 }, + { IPv4(207,174,204,0),24 }, + { IPv4(207,175,132,0),24 }, + { IPv4(207,175,138,0),23 }, + { IPv4(207,175,140,0),23 }, + { IPv4(207,175,157,0),24 }, + { IPv4(207,175,200,0),23 }, + { IPv4(207,175,209,0),24 }, + { IPv4(207,175,214,0),23 }, + { IPv4(207,175,216,0),22 }, + { IPv4(207,175,220,0),23 }, + { IPv4(207,176,8,0),21 }, + { IPv4(207,177,0,0),17 }, + { IPv4(207,178,42,0),23 }, + { IPv4(207,178,156,0),22 }, + { IPv4(207,179,70,0),24 }, + { IPv4(207,179,71,0),24 }, + { IPv4(207,179,72,0),24 }, + { IPv4(207,179,73,0),24 }, + { IPv4(207,179,74,0),24 }, + { IPv4(207,179,75,0),24 }, + { IPv4(207,179,76,0),24 }, + { IPv4(207,179,91,0),24 }, + { IPv4(207,179,96,0),20 }, + { IPv4(207,179,96,0),24 }, + { IPv4(207,179,174,0),24 }, + { IPv4(207,180,64,0),19 }, + { IPv4(207,180,124,0),24 }, + { IPv4(207,180,128,0),18 }, + { IPv4(207,180,192,0),18 }, + { IPv4(207,180,206,0),24 }, + { IPv4(207,181,64,0),19 }, + { IPv4(207,181,64,0),18 }, + { IPv4(207,181,69,0),24 }, + { IPv4(207,181,70,0),24 }, + { IPv4(207,181,76,0),22 }, + { IPv4(207,181,80,0),20 }, + { IPv4(207,181,96,0),19 }, + { IPv4(207,181,96,0),21 }, + { IPv4(207,181,104,0),22 }, + { IPv4(207,181,108,0),23 }, + { IPv4(207,181,112,0),20 }, + { IPv4(207,181,113,0),24 }, + { IPv4(207,181,114,0),24 }, + { IPv4(207,181,118,0),24 }, + { IPv4(207,181,126,0),24 }, + { IPv4(207,181,192,0),18 }, + { IPv4(207,182,101,0),24 }, + { IPv4(207,182,103,0),24 }, + { IPv4(207,182,114,0),24 }, + { IPv4(207,182,116,0),24 }, + { IPv4(207,182,118,0),24 }, + { IPv4(207,182,119,0),24 }, + { IPv4(207,182,120,0),24 }, + { IPv4(207,182,123,0),24 }, + { IPv4(207,182,160,0),19 }, + { IPv4(207,182,192,0),24 }, + { IPv4(207,182,207,0),24 }, + { IPv4(207,182,224,0),19 }, + { IPv4(207,183,32,0),19 }, + { IPv4(207,183,32,0),24 }, + { IPv4(207,183,96,0),20 }, + { IPv4(207,183,116,0),24 }, + { IPv4(207,183,117,0),24 }, + { IPv4(207,188,0,0),19 }, + { IPv4(207,188,192,0),19 }, + { IPv4(207,189,64,0),19 }, + { IPv4(207,189,143,0),24 }, + { IPv4(207,189,192,0),19 }, + { IPv4(207,190,128,0),19 }, + { IPv4(207,192,192,0),19 }, + { IPv4(207,196,0,0),17 }, + { IPv4(207,196,28,0),24 }, + { IPv4(207,196,81,0),24 }, + { IPv4(207,197,128,0),17 }, + { IPv4(207,198,11,0),24 }, + { IPv4(207,198,12,0),23 }, + { IPv4(207,198,14,0),24 }, + { IPv4(207,198,16,0),22 }, + { IPv4(207,198,21,0),24 }, + { IPv4(207,198,22,0),23 }, + { IPv4(207,198,39,0),24 }, + { IPv4(207,198,40,0),22 }, + { IPv4(207,198,44,0),23 }, + { IPv4(207,198,128,0),17 }, + { IPv4(207,199,0,0),17 }, + { IPv4(207,199,33,0),24 }, + { IPv4(207,199,128,0),18 }, + { IPv4(207,199,252,0),23 }, + { IPv4(207,200,64,0),19 }, + { IPv4(207,200,132,0),22 }, + { IPv4(207,200,136,0),22 }, + { IPv4(207,200,142,0),24 }, + { IPv4(207,201,61,0),24 }, + { IPv4(207,201,62,0),24 }, + { IPv4(207,201,128,0),18 }, + { IPv4(207,201,192,0),18 }, + { IPv4(207,202,0,0),17 }, + { IPv4(207,204,8,0),21 }, + { IPv4(207,204,166,0),24 }, + { IPv4(207,204,168,0),24 }, + { IPv4(207,204,169,0),24 }, + { IPv4(207,204,170,0),24 }, + { IPv4(207,204,192,0),20 }, + { IPv4(207,204,194,0),24 }, + { IPv4(207,204,195,0),24 }, + { IPv4(207,204,198,0),24 }, + { IPv4(207,204,202,0),24 }, + { IPv4(207,204,222,0),24 }, + { IPv4(207,204,248,0),24 }, + { IPv4(207,206,0,0),17 }, + { IPv4(207,207,0,0),18 }, + { IPv4(207,207,128,0),18 }, + { IPv4(207,207,160,0),21 }, + { IPv4(207,207,192,0),18 }, + { IPv4(207,208,0,0),16 }, + { IPv4(207,208,32,0),24 }, + { IPv4(207,209,17,0),24 }, + { IPv4(207,209,40,0),24 }, + { IPv4(207,209,88,0),24 }, + { IPv4(207,209,96,0),24 }, + { IPv4(207,209,98,0),24 }, + { IPv4(207,209,181,0),24 }, + { IPv4(207,209,182,0),24 }, + { IPv4(207,209,250,0),24 }, + { IPv4(207,211,0,0),16 }, + { IPv4(207,211,1,0),24 }, + { IPv4(207,211,2,0),24 }, + { IPv4(207,211,4,0),24 }, + { IPv4(207,211,22,0),23 }, + { IPv4(207,211,35,0),24 }, + { IPv4(207,211,36,0),24 }, + { IPv4(207,211,79,0),24 }, + { IPv4(207,211,80,0),20 }, + { IPv4(207,211,107,0),24 }, + { IPv4(207,211,150,0),24 }, + { IPv4(207,211,152,0),24 }, + { IPv4(207,211,160,0),24 }, + { IPv4(207,211,188,0),24 }, + { IPv4(207,211,208,0),21 }, + { IPv4(207,211,220,0),24 }, + { IPv4(207,211,221,0),24 }, + { IPv4(207,211,222,0),24 }, + { IPv4(207,211,223,0),24 }, + { IPv4(207,211,228,0),23 }, + { IPv4(207,211,230,0),24 }, + { IPv4(207,211,243,0),24 }, + { IPv4(207,211,248,0),24 }, + { IPv4(207,212,3,0),24 }, + { IPv4(207,212,112,0),21 }, + { IPv4(207,212,169,0),24 }, + { IPv4(207,213,32,0),23 }, + { IPv4(207,213,160,0),20 }, + { IPv4(207,213,246,0),24 }, + { IPv4(207,214,60,0),23 }, + { IPv4(207,217,0,0),16 }, + { IPv4(207,217,15,0),24 }, + { IPv4(207,218,46,0),24 }, + { IPv4(207,220,0,0),14 }, + { IPv4(207,220,23,0),24 }, + { IPv4(207,221,18,0),24 }, + { IPv4(207,221,22,0),23 }, + { IPv4(207,222,18,0),23 }, + { IPv4(207,222,44,0),24 }, + { IPv4(207,222,45,0),24 }, + { IPv4(207,222,46,0),24 }, + { IPv4(207,222,83,0),24 }, + { IPv4(207,222,124,0),24 }, + { IPv4(207,222,159,0),24 }, + { IPv4(207,222,161,0),24 }, + { IPv4(207,222,169,0),24 }, + { IPv4(207,222,175,0),24 }, + { IPv4(207,223,98,0),24 }, + { IPv4(207,223,154,0),24 }, + { IPv4(207,223,160,0),24 }, + { IPv4(207,223,208,0),24 }, + { IPv4(207,224,223,0),24 }, + { IPv4(207,225,253,0),24 }, + { IPv4(207,226,140,0),22 }, + { IPv4(207,226,160,0),19 }, + { IPv4(207,227,24,0),21 }, + { IPv4(207,227,81,0),24 }, + { IPv4(207,227,96,0),21 }, + { IPv4(207,227,192,0),21 }, + { IPv4(207,227,204,0),22 }, + { IPv4(207,227,204,0),24 }, + { IPv4(207,227,206,0),24 }, + { IPv4(207,228,224,0),19 }, + { IPv4(207,229,64,0),18 }, + { IPv4(207,229,71,0),24 }, + { IPv4(207,229,72,0),22 }, + { IPv4(207,229,128,0),18 }, + { IPv4(207,229,188,0),22 }, + { IPv4(207,229,192,0),18 }, + { IPv4(207,230,20,0),22 }, + { IPv4(207,230,24,0),22 }, + { IPv4(207,230,32,0),19 }, + { IPv4(207,230,56,0),24 }, + { IPv4(207,230,141,0),24 }, + { IPv4(207,230,143,0),24 }, + { IPv4(207,230,144,0),24 }, + { IPv4(207,230,148,0),24 }, + { IPv4(207,230,150,0),24 }, + { IPv4(207,230,151,0),24 }, + { IPv4(207,230,152,0),24 }, + { IPv4(207,230,153,0),24 }, + { IPv4(207,230,154,0),24 }, + { IPv4(207,230,155,0),24 }, + { IPv4(207,230,156,0),24 }, + { IPv4(207,230,157,0),24 }, + { IPv4(207,230,158,0),24 }, + { IPv4(207,230,159,0),24 }, + { IPv4(207,230,160,0),19 }, + { IPv4(207,230,224,0),19 }, + { IPv4(207,231,96,0),19 }, + { IPv4(207,231,128,0),19 }, + { IPv4(207,232,128,0),17 }, + { IPv4(207,233,0,0),17 }, + { IPv4(207,234,128,0),17 }, + { IPv4(207,234,138,0),23 }, + { IPv4(207,234,140,0),23 }, + { IPv4(207,234,142,0),24 }, + { IPv4(207,234,163,0),24 }, + { IPv4(207,234,169,0),24 }, + { IPv4(207,234,171,0),24 }, + { IPv4(207,234,173,0),24 }, + { IPv4(207,234,174,0),23 }, + { IPv4(207,234,176,0),24 }, + { IPv4(207,234,178,0),23 }, + { IPv4(207,234,180,0),24 }, + { IPv4(207,234,199,0),24 }, + { IPv4(207,234,223,0),24 }, + { IPv4(207,234,232,0),23 }, + { IPv4(207,234,248,0),24 }, + { IPv4(207,234,252,0),24 }, + { IPv4(207,235,4,0),22 }, + { IPv4(207,235,16,0),23 }, + { IPv4(207,237,0,0),16 }, + { IPv4(207,239,118,0),24 }, + { IPv4(207,239,150,0),24 }, + { IPv4(207,239,204,0),22 }, + { IPv4(207,239,212,0),22 }, + { IPv4(207,239,220,0),23 }, + { IPv4(207,239,220,0),22 }, + { IPv4(207,239,222,0),23 }, + { IPv4(207,239,226,0),24 }, + { IPv4(207,239,230,0),24 }, + { IPv4(207,239,240,0),21 }, + { IPv4(207,241,0,0),17 }, + { IPv4(207,241,20,0),24 }, + { IPv4(207,241,160,0),19 }, + { IPv4(207,241,192,0),24 }, + { IPv4(207,241,193,0),24 }, + { IPv4(207,241,194,0),24 }, + { IPv4(207,241,195,0),24 }, + { IPv4(207,241,196,0),24 }, + { IPv4(207,241,198,0),24 }, + { IPv4(207,241,200,0),24 }, + { IPv4(207,241,201,0),24 }, + { IPv4(207,241,202,0),24 }, + { IPv4(207,241,203,0),24 }, + { IPv4(207,241,204,0),24 }, + { IPv4(207,241,205,0),24 }, + { IPv4(207,241,206,0),24 }, + { IPv4(207,241,207,0),24 }, + { IPv4(207,241,240,0),20 }, + { IPv4(207,242,0,0),15 }, + { IPv4(207,242,16,0),20 }, + { IPv4(207,242,205,0),24 }, + { IPv4(207,242,244,0),24 }, + { IPv4(207,243,23,0),24 }, + { IPv4(207,243,58,0),24 }, + { IPv4(207,243,59,0),24 }, + { IPv4(207,243,120,0),22 }, + { IPv4(207,243,145,0),24 }, + { IPv4(207,243,146,0),24 }, + { IPv4(207,243,245,0),24 }, + { IPv4(207,244,0,0),24 }, + { IPv4(207,244,0,0),18 }, + { IPv4(207,244,1,0),24 }, + { IPv4(207,244,2,0),24 }, + { IPv4(207,244,5,0),24 }, + { IPv4(207,244,7,0),24 }, + { IPv4(207,244,8,0),24 }, + { IPv4(207,244,10,0),24 }, + { IPv4(207,244,11,0),24 }, + { IPv4(207,244,12,0),24 }, + { IPv4(207,244,13,0),24 }, + { IPv4(207,244,14,0),24 }, + { IPv4(207,244,15,0),24 }, + { IPv4(207,244,18,0),24 }, + { IPv4(207,244,20,0),24 }, + { IPv4(207,244,23,0),24 }, + { IPv4(207,244,25,0),24 }, + { IPv4(207,244,32,0),24 }, + { IPv4(207,244,33,0),24 }, + { IPv4(207,244,34,0),24 }, + { IPv4(207,244,35,0),24 }, + { IPv4(207,244,36,0),24 }, + { IPv4(207,244,37,0),24 }, + { IPv4(207,244,38,0),24 }, + { IPv4(207,244,39,0),24 }, + { IPv4(207,244,40,0),24 }, + { IPv4(207,244,42,0),24 }, + { IPv4(207,244,46,0),24 }, + { IPv4(207,244,47,0),24 }, + { IPv4(207,244,49,0),24 }, + { IPv4(207,244,56,0),24 }, + { IPv4(207,244,57,0),24 }, + { IPv4(207,244,58,0),24 }, + { IPv4(207,244,59,0),24 }, + { IPv4(207,244,60,0),24 }, + { IPv4(207,244,61,0),24 }, + { IPv4(207,244,62,0),24 }, + { IPv4(207,244,63,0),24 }, + { IPv4(207,245,0,0),18 }, + { IPv4(207,245,64,0),18 }, + { IPv4(207,245,136,0),24 }, + { IPv4(207,245,138,0),24 }, + { IPv4(207,245,192,0),18 }, + { IPv4(207,245,216,0),24 }, + { IPv4(207,245,243,0),24 }, + { IPv4(207,245,244,0),23 }, + { IPv4(207,246,0,0),18 }, + { IPv4(207,246,0,0),19 }, + { IPv4(207,246,64,0),18 }, + { IPv4(207,246,160,0),19 }, + { IPv4(207,246,208,0),24 }, + { IPv4(207,246,209,0),24 }, + { IPv4(207,246,224,0),19 }, + { IPv4(207,248,0,0),19 }, + { IPv4(207,248,40,0),21 }, + { IPv4(207,248,56,0),21 }, + { IPv4(207,248,66,0),24 }, + { IPv4(207,248,88,0),22 }, + { IPv4(207,248,88,0),24 }, + { IPv4(207,248,89,0),24 }, + { IPv4(207,248,90,0),24 }, + { IPv4(207,248,91,0),24 }, + { IPv4(207,248,96,0),24 }, + { IPv4(207,248,118,0),24 }, + { IPv4(207,248,128,0),19 }, + { IPv4(207,248,144,0),24 }, + { IPv4(207,248,178,0),24 }, + { IPv4(207,248,197,0),24 }, + { IPv4(207,248,224,0),20 }, + { IPv4(207,248,224,0),19 }, + { IPv4(207,248,240,0),20 }, + { IPv4(207,249,32,0),19 }, + { IPv4(207,249,64,0),19 }, + { IPv4(207,250,0,0),17 }, + { IPv4(207,250,13,0),24 }, + { IPv4(207,250,49,0),24 }, + { IPv4(207,250,74,0),24 }, + { IPv4(207,250,128,0),17 }, + { IPv4(207,250,143,0),24 }, + { IPv4(207,250,163,0),24 }, + { IPv4(207,250,166,0),24 }, + { IPv4(207,250,169,0),24 }, + { IPv4(207,250,177,0),24 }, + { IPv4(207,250,185,0),24 }, + { IPv4(207,250,191,0),24 }, + { IPv4(207,250,209,0),24 }, + { IPv4(207,251,112,0),23 }, + { IPv4(207,251,120,0),23 }, + { IPv4(207,252,0,0),16 }, + { IPv4(207,252,0,0),22 }, + { IPv4(207,252,5,0),24 }, + { IPv4(207,252,20,0),23 }, + { IPv4(207,252,22,0),24 }, + { IPv4(207,252,28,0),24 }, + { IPv4(207,252,60,0),22 }, + { IPv4(207,252,72,0),21 }, + { IPv4(207,252,96,0),24 }, + { IPv4(207,252,104,0),23 }, + { IPv4(207,252,121,0),24 }, + { IPv4(207,252,127,0),24 }, + { IPv4(207,252,144,0),22 }, + { IPv4(207,252,152,0),21 }, + { IPv4(207,252,152,0),24 }, + { IPv4(207,252,153,0),24 }, + { IPv4(207,252,154,0),24 }, + { IPv4(207,252,155,0),24 }, + { IPv4(207,252,156,0),24 }, + { IPv4(207,252,157,0),24 }, + { IPv4(207,252,158,0),24 }, + { IPv4(207,252,159,0),24 }, + { IPv4(207,252,161,0),24 }, + { IPv4(207,252,165,0),24 }, + { IPv4(207,252,196,0),22 }, + { IPv4(207,252,208,0),21 }, + { IPv4(207,253,0,0),16 }, + { IPv4(207,254,0,0),17 }, + { IPv4(207,254,112,0),20 }, + { IPv4(208,0,8,0),24 }, + { IPv4(208,0,32,0),20 }, + { IPv4(208,0,48,0),24 }, + { IPv4(208,0,80,0),20 }, + { IPv4(208,0,96,0),21 }, + { IPv4(208,0,192,0),20 }, + { IPv4(208,1,7,0),24 }, + { IPv4(208,1,56,0),23 }, + { IPv4(208,1,127,0),24 }, + { IPv4(208,1,140,0),22 }, + { IPv4(208,1,190,0),24 }, + { IPv4(208,1,224,0),24 }, + { IPv4(208,1,232,0),24 }, + { IPv4(208,2,96,0),20 }, + { IPv4(208,2,153,0),24 }, + { IPv4(208,2,182,0),24 }, + { IPv4(208,2,204,0),24 }, + { IPv4(208,2,250,0),23 }, + { IPv4(208,3,32,0),24 }, + { IPv4(208,3,34,0),23 }, + { IPv4(208,3,38,0),24 }, + { IPv4(208,3,45,0),24 }, + { IPv4(208,3,134,0),24 }, + { IPv4(208,3,164,0),22 }, + { IPv4(208,3,212,0),24 }, + { IPv4(208,3,248,0),22 }, + { IPv4(208,4,8,0),23 }, + { IPv4(208,4,48,0),24 }, + { IPv4(208,4,64,0),23 }, + { IPv4(208,4,179,0),24 }, + { IPv4(208,4,240,0),20 }, + { IPv4(208,5,24,0),21 }, + { IPv4(208,5,34,0),24 }, + { IPv4(208,5,35,0),24 }, + { IPv4(208,5,36,0),24 }, + { IPv4(208,5,48,0),22 }, + { IPv4(208,5,176,0),20 }, + { IPv4(208,5,192,0),24 }, + { IPv4(208,5,235,0),24 }, + { IPv4(208,6,68,0),22 }, + { IPv4(208,6,128,0),19 }, + { IPv4(208,7,62,0),24 }, + { IPv4(208,7,208,0),22 }, + { IPv4(208,8,16,0),21 }, + { IPv4(208,8,49,0),24 }, + { IPv4(208,8,80,0),24 }, + { IPv4(208,9,194,0),24 }, + { IPv4(208,9,196,0),24 }, + { IPv4(208,9,212,0),24 }, + { IPv4(208,10,25,0),24 }, + { IPv4(208,10,235,0),24 }, + { IPv4(208,10,236,0),23 }, + { IPv4(208,10,246,0),23 }, + { IPv4(208,11,24,0),21 }, + { IPv4(208,11,224,0),24 }, + { IPv4(208,12,0,0),19 }, + { IPv4(208,12,32,0),19 }, + { IPv4(208,12,104,0),24 }, + { IPv4(208,12,166,0),24 }, + { IPv4(208,12,168,0),22 }, + { IPv4(208,13,0,0),19 }, + { IPv4(208,13,36,0),23 }, + { IPv4(208,13,96,0),19 }, + { IPv4(208,13,146,0),24 }, + { IPv4(208,13,157,0),24 }, + { IPv4(208,13,174,0),24 }, + { IPv4(208,14,219,0),24 }, + { IPv4(208,15,192,0),19 }, + { IPv4(208,16,208,0),21 }, + { IPv4(208,17,9,0),24 }, + { IPv4(208,17,12,0),23 }, + { IPv4(208,17,184,0),24 }, + { IPv4(208,17,185,0),24 }, + { IPv4(208,17,215,0),24 }, + { IPv4(208,18,52,0),24 }, + { IPv4(208,18,53,0),24 }, + { IPv4(208,19,81,0),24 }, + { IPv4(208,19,140,0),23 }, + { IPv4(208,19,155,0),24 }, + { IPv4(208,20,148,0),24 }, + { IPv4(208,20,151,0),24 }, + { IPv4(208,20,232,0),24 }, + { IPv4(208,20,236,0),24 }, + { IPv4(208,21,22,0),24 }, + { IPv4(208,21,24,0),23 }, + { IPv4(208,21,42,0),24 }, + { IPv4(208,21,100,0),22 }, + { IPv4(208,21,104,0),21 }, + { IPv4(208,22,59,0),24 }, + { IPv4(208,22,176,0),24 }, + { IPv4(208,23,32,0),19 }, + { IPv4(208,23,167,0),24 }, + { IPv4(208,23,200,0),21 }, + { IPv4(208,24,134,128),25 }, + { IPv4(208,24,136,0),23 }, + { IPv4(208,25,12,0),24 }, + { IPv4(208,25,64,0),19 }, + { IPv4(208,25,98,0),24 }, + { IPv4(208,25,168,0),24 }, + { IPv4(208,25,212,0),24 }, + { IPv4(208,25,244,0),24 }, + { IPv4(208,25,251,0),24 }, + { IPv4(208,25,252,0),23 }, + { IPv4(208,26,8,0),21 }, + { IPv4(208,26,98,0),24 }, + { IPv4(208,26,152,0),24 }, + { IPv4(208,27,20,0),22 }, + { IPv4(208,27,24,0),22 }, + { IPv4(208,27,56,0),24 }, + { IPv4(208,27,86,0),24 }, + { IPv4(208,27,132,0),24 }, + { IPv4(208,27,190,0),24 }, + { IPv4(208,27,191,0),24 }, + { IPv4(208,27,200,0),24 }, + { IPv4(208,28,64,0),24 }, + { IPv4(208,28,112,0),24 }, + { IPv4(208,29,28,0),22 }, + { IPv4(208,29,218,0),24 }, + { IPv4(208,30,64,0),21 }, + { IPv4(208,30,129,0),24 }, + { IPv4(208,30,132,0),22 }, + { IPv4(208,30,152,0),21 }, + { IPv4(208,31,144,0),24 }, + { IPv4(208,31,149,0),24 }, + { IPv4(208,31,156,0),23 }, + { IPv4(208,32,65,0),24 }, + { IPv4(208,32,67,0),24 }, + { IPv4(208,32,226,0),24 }, + { IPv4(208,33,36,0),24 }, + { IPv4(208,33,38,0),23 }, + { IPv4(208,33,40,0),24 }, + { IPv4(208,33,92,0),24 }, + { IPv4(208,33,182,0),24 }, + { IPv4(208,33,216,0),22 }, + { IPv4(208,34,8,0),21 }, + { IPv4(208,34,42,0),24 }, + { IPv4(208,34,62,0),24 }, + { IPv4(208,34,80,0),21 }, + { IPv4(208,34,96,0),20 }, + { IPv4(208,34,241,0),24 }, + { IPv4(208,35,201,0),24 }, + { IPv4(208,36,0,0),15 }, + { IPv4(208,36,9,0),24 }, + { IPv4(208,36,10,0),24 }, + { IPv4(208,36,49,0),24 }, + { IPv4(208,36,50,0),23 }, + { IPv4(208,36,53,0),24 }, + { IPv4(208,36,54,0),23 }, + { IPv4(208,36,96,0),23 }, + { IPv4(208,36,99,0),24 }, + { IPv4(208,36,100,0),24 }, + { IPv4(208,36,102,0),24 }, + { IPv4(208,36,103,0),24 }, + { IPv4(208,36,115,0),24 }, + { IPv4(208,36,144,0),23 }, + { IPv4(208,36,148,0),24 }, + { IPv4(208,36,176,0),24 }, + { IPv4(208,36,177,0),24 }, + { IPv4(208,36,178,0),24 }, + { IPv4(208,36,179,0),24 }, + { IPv4(208,36,200,0),21 }, + { IPv4(208,36,212,0),22 }, + { IPv4(208,36,217,0),24 }, + { IPv4(208,36,224,0),24 }, + { IPv4(208,36,230,0),24 }, + { IPv4(208,36,231,0),24 }, + { IPv4(208,37,49,0),24 }, + { IPv4(208,37,50,0),24 }, + { IPv4(208,37,80,0),23 }, + { IPv4(208,37,88,0),23 }, + { IPv4(208,37,120,0),22 }, + { IPv4(208,37,138,0),23 }, + { IPv4(208,37,148,0),22 }, + { IPv4(208,37,172,0),24 }, + { IPv4(208,37,200,0),24 }, + { IPv4(208,37,207,0),24 }, + { IPv4(208,39,20,0),22 }, + { IPv4(208,39,52,0),22 }, + { IPv4(208,39,208,0),22 }, + { IPv4(208,40,128,0),18 }, + { IPv4(208,40,192,0),20 }, + { IPv4(208,42,0,0),17 }, + { IPv4(208,42,10,0),24 }, + { IPv4(208,42,11,0),24 }, + { IPv4(208,42,128,0),18 }, + { IPv4(208,43,0,0),16 }, + { IPv4(208,44,45,0),24 }, + { IPv4(208,44,47,0),24 }, + { IPv4(208,44,50,0),24 }, + { IPv4(208,44,70,0),23 }, + { IPv4(208,44,72,0),21 }, + { IPv4(208,44,82,0),23 }, + { IPv4(208,44,110,0),24 }, + { IPv4(208,44,119,0),24 }, + { IPv4(208,44,140,0),22 }, + { IPv4(208,44,144,0),24 }, + { IPv4(208,44,151,0),24 }, + { IPv4(208,44,168,0),23 }, + { IPv4(208,44,212,0),24 }, + { IPv4(208,44,217,0),24 }, + { IPv4(208,44,218,0),24 }, + { IPv4(208,44,241,0),24 }, + { IPv4(208,44,252,0),24 }, + { IPv4(208,44,253,0),24 }, + { IPv4(208,45,32,0),24 }, + { IPv4(208,45,33,0),24 }, + { IPv4(208,45,36,0),24 }, + { IPv4(208,45,37,0),24 }, + { IPv4(208,45,52,0),22 }, + { IPv4(208,45,148,0),24 }, + { IPv4(208,45,171,0),24 }, + { IPv4(208,45,172,0),22 }, + { IPv4(208,45,205,0),24 }, + { IPv4(208,45,252,0),23 }, + { IPv4(208,46,49,0),24 }, + { IPv4(208,46,64,0),24 }, + { IPv4(208,46,66,0),24 }, + { IPv4(208,46,68,0),24 }, + { IPv4(208,46,69,0),24 }, + { IPv4(208,46,71,0),24 }, + { IPv4(208,46,75,0),24 }, + { IPv4(208,46,78,0),24 }, + { IPv4(208,46,79,0),24 }, + { IPv4(208,46,84,0),24 }, + { IPv4(208,46,86,0),24 }, + { IPv4(208,46,102,0),24 }, + { IPv4(208,46,169,0),24 }, + { IPv4(208,46,170,0),24 }, + { IPv4(208,46,224,0),24 }, + { IPv4(208,46,228,0),24 }, + { IPv4(208,46,229,0),24 }, + { IPv4(208,46,230,0),24 }, + { IPv4(208,46,235,0),24 }, + { IPv4(208,46,236,0),24 }, + { IPv4(208,46,237,0),24 }, + { IPv4(208,46,238,0),24 }, + { IPv4(208,47,64,0),24 }, + { IPv4(208,47,65,0),24 }, + { IPv4(208,47,66,0),24 }, + { IPv4(208,47,67,0),24 }, + { IPv4(208,47,80,0),23 }, + { IPv4(208,47,96,0),23 }, + { IPv4(208,47,128,0),21 }, + { IPv4(208,47,174,0),24 }, + { IPv4(208,47,216,0),24 }, + { IPv4(208,47,218,0),24 }, + { IPv4(208,47,219,0),24 }, + { IPv4(208,47,231,0),24 }, + { IPv4(208,47,232,0),22 }, + { IPv4(208,47,244,0),22 }, + { IPv4(208,48,140,0),22 }, + { IPv4(208,48,247,0),24 }, + { IPv4(208,49,48,0),24 }, + { IPv4(208,49,75,0),24 }, + { IPv4(208,49,147,0),24 }, + { IPv4(208,49,172,0),24 }, + { IPv4(208,50,54,0),24 }, + { IPv4(208,50,55,0),24 }, + { IPv4(208,50,100,0),24 }, + { IPv4(208,50,127,0),24 }, + { IPv4(208,50,231,0),24 }, + { IPv4(208,51,152,0),24 }, + { IPv4(208,55,0,0),16 }, + { IPv4(208,58,0,0),15 }, + { IPv4(208,58,121,0),24 }, + { IPv4(208,59,215,0),24 }, + { IPv4(208,62,114,0),24 }, + { IPv4(208,63,39,0),24 }, + { IPv4(208,63,49,0),24 }, + { IPv4(208,63,50,0),23 }, + { IPv4(208,63,57,0),24 }, + { IPv4(208,63,58,0),23 }, + { IPv4(208,128,64,0),20 }, + { IPv4(208,129,96,0),19 }, + { IPv4(208,129,168,0),22 }, + { IPv4(208,129,249,0),24 }, + { IPv4(208,129,250,0),24 }, + { IPv4(208,129,251,0),24 }, + { IPv4(208,129,252,0),24 }, + { IPv4(208,129,253,0),24 }, + { IPv4(208,130,200,0),24 }, + { IPv4(208,131,192,0),20 }, + { IPv4(208,131,208,0),22 }, + { IPv4(208,131,222,0),24 }, + { IPv4(208,132,32,0),20 }, + { IPv4(208,132,95,0),24 }, + { IPv4(208,132,160,0),19 }, + { IPv4(208,132,196,0),24 }, + { IPv4(208,132,245,0),24 }, + { IPv4(208,132,246,0),24 }, + { IPv4(208,133,5,0),24 }, + { IPv4(208,133,35,0),24 }, + { IPv4(208,133,105,0),24 }, + { IPv4(208,133,160,0),21 }, + { IPv4(208,133,168,0),22 }, + { IPv4(208,133,172,0),23 }, + { IPv4(208,133,174,0),24 }, + { IPv4(208,133,177,0),24 }, + { IPv4(208,133,193,0),24 }, + { IPv4(208,133,204,0),23 }, + { IPv4(208,133,216,0),22 }, + { IPv4(208,133,220,0),23 }, + { IPv4(208,134,16,0),21 }, + { IPv4(208,134,24,0),23 }, + { IPv4(208,134,128,0),23 }, + { IPv4(208,134,130,0),24 }, + { IPv4(208,134,131,0),24 }, + { IPv4(208,134,132,0),23 }, + { IPv4(208,134,134,0),24 }, + { IPv4(208,134,135,0),24 }, + { IPv4(208,134,136,0),23 }, + { IPv4(208,134,138,0),24 }, + { IPv4(208,134,144,0),22 }, + { IPv4(208,134,148,0),24 }, + { IPv4(208,134,151,0),24 }, + { IPv4(208,136,0,0),19 }, + { IPv4(208,136,102,0),24 }, + { IPv4(208,136,252,0),22 }, + { IPv4(208,137,1,0),24 }, + { IPv4(208,137,6,0),24 }, + { IPv4(208,137,12,0),24 }, + { IPv4(208,137,24,0),21 }, + { IPv4(208,137,36,0),24 }, + { IPv4(208,137,160,0),23 }, + { IPv4(208,137,183,0),24 }, + { IPv4(208,138,20,0),24 }, + { IPv4(208,138,48,0),20 }, + { IPv4(208,138,86,0),24 }, + { IPv4(208,138,204,0),22 }, + { IPv4(208,138,224,0),24 }, + { IPv4(208,138,254,0),24 }, + { IPv4(208,139,82,0),24 }, + { IPv4(208,139,138,0),23 }, + { IPv4(208,139,195,0),24 }, + { IPv4(208,140,72,0),21 }, + { IPv4(208,141,160,0),19 }, + { IPv4(208,141,228,0),24 }, + { IPv4(208,142,106,0),23 }, + { IPv4(208,142,111,0),24 }, + { IPv4(208,142,112,0),24 }, + { IPv4(208,142,113,0),24 }, + { IPv4(208,142,114,0),24 }, + { IPv4(208,142,115,0),24 }, + { IPv4(208,142,116,0),24 }, + { IPv4(208,142,117,0),24 }, + { IPv4(208,142,118,0),24 }, + { IPv4(208,142,119,0),24 }, + { IPv4(208,142,120,0),24 }, + { IPv4(208,142,121,0),24 }, + { IPv4(208,142,136,0),21 }, + { IPv4(208,142,142,0),24 }, + { IPv4(208,142,144,0),21 }, + { IPv4(208,143,22,0),24 }, + { IPv4(208,143,23,0),24 }, + { IPv4(208,143,33,0),24 }, + { IPv4(208,143,38,0),24 }, + { IPv4(208,143,104,0),24 }, + { IPv4(208,143,108,0),24 }, + { IPv4(208,143,109,0),24 }, + { IPv4(208,144,90,0),24 }, + { IPv4(208,144,112,0),21 }, + { IPv4(208,144,196,0),24 }, + { IPv4(208,144,224,0),24 }, + { IPv4(208,144,228,0),24 }, + { IPv4(208,144,230,0),24 }, + { IPv4(208,144,235,0),24 }, + { IPv4(208,145,0,0),24 }, + { IPv4(208,145,18,0),24 }, + { IPv4(208,145,120,0),24 }, + { IPv4(208,145,121,0),24 }, + { IPv4(208,145,122,0),24 }, + { IPv4(208,145,126,0),24 }, + { IPv4(208,145,127,0),24 }, + { IPv4(208,146,32,0),20 }, + { IPv4(208,146,40,0),23 }, + { IPv4(208,146,142,0),24 }, + { IPv4(208,146,208,0),22 }, + { IPv4(208,146,248,0),21 }, + { IPv4(208,147,18,0),24 }, + { IPv4(208,147,64,0),24 }, + { IPv4(208,147,88,0),21 }, + { IPv4(208,147,144,0),21 }, + { IPv4(208,147,152,0),22 }, + { IPv4(208,148,32,0),24 }, + { IPv4(208,148,34,0),24 }, + { IPv4(208,148,35,0),24 }, + { IPv4(208,148,37,0),24 }, + { IPv4(208,148,38,0),24 }, + { IPv4(208,148,39,0),24 }, + { IPv4(208,148,50,0),23 }, + { IPv4(208,148,52,0),23 }, + { IPv4(208,148,74,0),24 }, + { IPv4(208,148,76,0),22 }, + { IPv4(208,149,80,0),20 }, + { IPv4(208,149,169,0),24 }, + { IPv4(208,151,96,0),22 }, + { IPv4(208,151,96,0),19 }, + { IPv4(208,151,100,0),22 }, + { IPv4(208,151,104,0),21 }, + { IPv4(208,151,112,0),21 }, + { IPv4(208,151,120,0),23 }, + { IPv4(208,151,122,0),23 }, + { IPv4(208,151,124,0),22 }, + { IPv4(208,151,216,0),22 }, + { IPv4(208,152,0,0),22 }, + { IPv4(208,152,4,0),22 }, + { IPv4(208,152,8,0),23 }, + { IPv4(208,152,28,0),22 }, + { IPv4(208,152,32,0),19 }, + { IPv4(208,152,112,0),22 }, + { IPv4(208,152,116,0),23 }, + { IPv4(208,152,118,0),24 }, + { IPv4(208,152,120,0),21 }, + { IPv4(208,152,153,0),24 }, + { IPv4(208,152,184,0),21 }, + { IPv4(208,152,204,0),22 }, + { IPv4(208,153,32,0),21 }, + { IPv4(208,153,75,0),24 }, + { IPv4(208,153,82,0),23 }, + { IPv4(208,153,84,0),24 }, + { IPv4(208,153,136,0),24 }, + { IPv4(208,153,137,0),24 }, + { IPv4(208,153,138,0),24 }, + { IPv4(208,153,140,0),24 }, + { IPv4(208,153,141,0),24 }, + { IPv4(208,153,228,0),24 }, + { IPv4(208,154,0,0),22 }, + { IPv4(208,154,56,0),24 }, + { IPv4(208,154,80,0),23 }, + { IPv4(208,154,96,0),19 }, + { IPv4(208,154,154,0),24 }, + { IPv4(208,154,199,0),24 }, + { IPv4(208,155,180,0),24 }, + { IPv4(208,155,181,0),24 }, + { IPv4(208,155,182,0),24 }, + { IPv4(208,155,183,0),24 }, + { IPv4(208,155,184,0),24 }, + { IPv4(208,155,185,0),24 }, + { IPv4(208,155,186,0),24 }, + { IPv4(208,155,187,0),24 }, + { IPv4(208,155,188,0),24 }, + { IPv4(208,155,189,0),24 }, + { IPv4(208,155,190,0),24 }, + { IPv4(208,156,21,0),24 }, + { IPv4(208,156,22,0),23 }, + { IPv4(208,157,120,0),24 }, + { IPv4(208,157,122,0),23 }, + { IPv4(208,157,136,0),24 }, + { IPv4(208,157,255,0),24 }, + { IPv4(208,158,227,0),24 }, + { IPv4(208,159,4,0),23 }, + { IPv4(208,159,61,0),24 }, + { IPv4(208,159,176,0),21 }, + { IPv4(208,159,224,0),21 }, + { IPv4(208,159,232,0),21 }, + { IPv4(208,159,240,0),24 }, + { IPv4(208,159,240,0),22 }, + { IPv4(208,159,244,0),22 }, + { IPv4(208,159,245,0),24 }, + { IPv4(208,159,248,0),21 }, + { IPv4(208,160,10,0),23 }, + { IPv4(208,160,14,0),23 }, + { IPv4(208,160,32,0),20 }, + { IPv4(208,160,85,0),24 }, + { IPv4(208,160,104,0),21 }, + { IPv4(208,160,151,0),24 }, + { IPv4(208,160,218,0),24 }, + { IPv4(208,160,224,0),19 }, + { IPv4(208,161,224,0),21 }, + { IPv4(208,161,232,0),21 }, + { IPv4(208,161,240,0),21 }, + { IPv4(208,161,240,0),22 }, + { IPv4(208,161,244,0),22 }, + { IPv4(208,161,248,0),24 }, + { IPv4(208,162,36,0),22 }, + { IPv4(208,162,64,0),19 }, + { IPv4(208,162,120,0),22 }, + { IPv4(208,162,124,0),23 }, + { IPv4(208,162,126,0),24 }, + { IPv4(208,162,240,0),21 }, + { IPv4(208,162,248,0),22 }, + { IPv4(208,162,252,0),24 }, + { IPv4(208,162,254,0),24 }, + { IPv4(208,163,72,0),24 }, + { IPv4(208,163,73,0),24 }, + { IPv4(208,163,74,0),24 }, + { IPv4(208,163,75,0),24 }, + { IPv4(208,163,76,0),24 }, + { IPv4(208,163,77,0),24 }, + { IPv4(208,163,78,0),24 }, + { IPv4(208,163,79,0),24 }, + { IPv4(208,163,96,0),21 }, + { IPv4(208,163,104,0),21 }, + { IPv4(208,163,112,0),20 }, + { IPv4(208,163,164,0),24 }, + { IPv4(208,163,192,0),22 }, + { IPv4(208,163,196,0),22 }, + { IPv4(208,163,200,0),22 }, + { IPv4(208,163,204,0),22 }, + { IPv4(208,163,208,0),22 }, + { IPv4(208,163,208,0),24 }, + { IPv4(208,163,216,0),22 }, + { IPv4(208,163,224,0),22 }, + { IPv4(208,163,228,0),22 }, + { IPv4(208,163,232,0),22 }, + { IPv4(208,163,236,0),22 }, + { IPv4(208,163,240,0),21 }, + { IPv4(208,163,248,0),21 }, + { IPv4(208,164,0,0),21 }, + { IPv4(208,164,6,0),23 }, + { IPv4(208,164,8,0),21 }, + { IPv4(208,164,16,0),24 }, + { IPv4(208,164,16,0),21 }, + { IPv4(208,164,24,0),21 }, + { IPv4(208,164,232,0),24 }, + { IPv4(208,164,233,0),24 }, + { IPv4(208,164,234,0),24 }, + { IPv4(208,165,16,0),24 }, + { IPv4(208,165,32,0),21 }, + { IPv4(208,165,32,0),20 }, + { IPv4(208,165,64,0),20 }, + { IPv4(208,165,80,0),21 }, + { IPv4(208,165,88,0),21 }, + { IPv4(208,165,112,0),22 }, + { IPv4(208,165,116,0),23 }, + { IPv4(208,165,144,0),20 }, + { IPv4(208,166,64,0),21 }, + { IPv4(208,166,120,0),21 }, + { IPv4(208,166,153,0),24 }, + { IPv4(208,166,176,0),20 }, + { IPv4(208,166,235,0),24 }, + { IPv4(208,166,237,0),24 }, + { IPv4(208,167,72,0),21 }, + { IPv4(208,167,128,0),20 }, + { IPv4(208,167,184,0),22 }, + { IPv4(208,168,4,0),22 }, + { IPv4(208,168,16,0),24 }, + { IPv4(208,168,17,0),24 }, + { IPv4(208,168,18,0),24 }, + { IPv4(208,168,19,0),24 }, + { IPv4(208,168,120,0),21 }, + { IPv4(208,168,152,0),22 }, + { IPv4(208,168,174,0),23 }, + { IPv4(208,168,176,0),22 }, + { IPv4(208,168,177,0),24 }, + { IPv4(208,168,211,0),24 }, + { IPv4(208,168,213,0),24 }, + { IPv4(208,168,215,0),24 }, + { IPv4(208,169,0,0),24 }, + { IPv4(208,169,4,0),22 }, + { IPv4(208,169,16,0),21 }, + { IPv4(208,169,32,0),22 }, + { IPv4(208,170,96,0),20 }, + { IPv4(208,170,112,0),20 }, + { IPv4(208,170,152,0),22 }, + { IPv4(208,170,156,0),23 }, + { IPv4(208,170,168,0),21 }, + { IPv4(208,170,170,0),24 }, + { IPv4(208,170,240,0),22 }, + { IPv4(208,171,80,0),22 }, + { IPv4(208,171,120,0),21 }, + { IPv4(208,171,146,0),24 }, + { IPv4(208,171,147,0),24 }, + { IPv4(208,171,151,0),24 }, + { IPv4(208,171,152,0),24 }, + { IPv4(208,171,153,0),24 }, + { IPv4(208,171,154,0),24 }, + { IPv4(208,171,155,0),24 }, + { IPv4(208,171,157,0),24 }, + { IPv4(208,171,158,0),24 }, + { IPv4(208,171,159,0),24 }, + { IPv4(208,171,212,0),24 }, + { IPv4(208,176,0,0),15 }, + { IPv4(208,176,50,0),24 }, + { IPv4(208,176,58,0),23 }, + { IPv4(208,176,72,0),22 }, + { IPv4(208,176,80,0),20 }, + { IPv4(208,176,99,0),24 }, + { IPv4(208,176,100,0),24 }, + { IPv4(208,176,105,0),24 }, + { IPv4(208,176,106,0),23 }, + { IPv4(208,176,161,0),24 }, + { IPv4(208,176,162,0),23 }, + { IPv4(208,176,224,0),21 }, + { IPv4(208,176,232,0),22 }, + { IPv4(208,176,236,0),22 }, + { IPv4(208,177,1,0),24 }, + { IPv4(208,177,25,0),24 }, + { IPv4(208,177,27,0),24 }, + { IPv4(208,177,32,0),22 }, + { IPv4(208,177,40,0),22 }, + { IPv4(208,177,44,0),24 }, + { IPv4(208,177,67,0),24 }, + { IPv4(208,177,74,0),23 }, + { IPv4(208,177,76,0),23 }, + { IPv4(208,177,177,0),24 }, + { IPv4(208,177,192,0),21 }, + { IPv4(208,177,200,0),22 }, + { IPv4(208,177,245,0),24 }, + { IPv4(208,177,248,0),22 }, + { IPv4(208,178,52,0),23 }, + { IPv4(208,178,74,0),23 }, + { IPv4(208,178,130,0),24 }, + { IPv4(208,178,237,0),24 }, + { IPv4(208,179,0,0),16 }, + { IPv4(208,179,16,0),24 }, + { IPv4(208,179,60,0),24 }, + { IPv4(208,179,91,0),24 }, + { IPv4(208,179,193,0),24 }, + { IPv4(208,179,194,0),24 }, + { IPv4(208,180,0,0),19 }, + { IPv4(208,180,32,0),20 }, + { IPv4(208,180,48,0),21 }, + { IPv4(208,180,56,0),22 }, + { IPv4(208,180,60,0),22 }, + { IPv4(208,180,64,0),21 }, + { IPv4(208,180,72,0),24 }, + { IPv4(208,180,73,0),24 }, + { IPv4(208,180,74,0),23 }, + { IPv4(208,180,76,0),24 }, + { IPv4(208,180,77,0),24 }, + { IPv4(208,180,78,0),24 }, + { IPv4(208,180,79,0),24 }, + { IPv4(208,180,80,0),20 }, + { IPv4(208,180,96,0),20 }, + { IPv4(208,180,112,0),21 }, + { IPv4(208,180,120,0),21 }, + { IPv4(208,180,128,0),20 }, + { IPv4(208,180,144,0),21 }, + { IPv4(208,180,152,0),21 }, + { IPv4(208,180,160,0),20 }, + { IPv4(208,180,176,0),21 }, + { IPv4(208,180,184,0),22 }, + { IPv4(208,180,188,0),22 }, + { IPv4(208,180,192,0),21 }, + { IPv4(208,180,200,0),22 }, + { IPv4(208,180,204,0),22 }, + { IPv4(208,180,208,0),21 }, + { IPv4(208,180,216,0),21 }, + { IPv4(208,180,224,0),20 }, + { IPv4(208,180,240,0),21 }, + { IPv4(208,182,0,0),17 }, + { IPv4(208,182,128,0),19 }, + { IPv4(208,182,160,0),20 }, + { IPv4(208,182,176,0),22 }, + { IPv4(208,182,180,0),23 }, + { IPv4(208,182,182,0),23 }, + { IPv4(208,182,184,0),21 }, + { IPv4(208,182,192,0),18 }, + { IPv4(208,183,0,0),17 }, + { IPv4(208,183,128,0),19 }, + { IPv4(208,183,160,0),19 }, + { IPv4(208,183,192,0),19 }, + { IPv4(208,183,224,0),20 }, + { IPv4(208,183,240,0),20 }, + { IPv4(208,184,0,0),15 }, + { IPv4(208,184,7,0),24 }, + { IPv4(208,184,29,0),24 }, + { IPv4(208,184,40,0),22 }, + { IPv4(208,184,40,0),24 }, + { IPv4(208,184,41,0),24 }, + { IPv4(208,184,42,0),24 }, + { IPv4(208,184,43,0),24 }, + { IPv4(208,184,46,0),24 }, + { IPv4(208,184,153,0),24 }, + { IPv4(208,184,189,0),24 }, + { IPv4(208,184,190,0),23 }, + { IPv4(208,184,216,0),24 }, + { IPv4(208,184,219,0),24 }, + { IPv4(208,184,227,0),24 }, + { IPv4(208,184,252,0),22 }, + { IPv4(208,185,7,0),24 }, + { IPv4(208,185,33,0),24 }, + { IPv4(208,185,35,0),24 }, + { IPv4(208,185,43,0),24 }, + { IPv4(208,185,44,0),24 }, + { IPv4(208,185,45,0),24 }, + { IPv4(208,185,48,0),23 }, + { IPv4(208,185,74,0),24 }, + { IPv4(208,185,98,0),23 }, + { IPv4(208,185,109,0),24 }, + { IPv4(208,185,112,0),24 }, + { IPv4(208,185,114,0),23 }, + { IPv4(208,185,129,0),24 }, + { IPv4(208,185,130,0),24 }, + { IPv4(208,185,143,0),24 }, + { IPv4(208,185,169,0),24 }, + { IPv4(208,185,186,0),24 }, + { IPv4(208,185,204,0),24 }, + { IPv4(208,185,205,0),24 }, + { IPv4(208,185,220,0),24 }, + { IPv4(208,185,221,0),24 }, + { IPv4(208,185,222,0),23 }, + { IPv4(208,185,224,0),23 }, + { IPv4(208,186,104,0),21 }, + { IPv4(208,186,108,0),24 }, + { IPv4(208,186,109,0),24 }, + { IPv4(208,186,110,0),24 }, + { IPv4(208,186,111,0),24 }, + { IPv4(208,186,224,0),24 }, + { IPv4(208,187,190,0),24 }, + { IPv4(208,187,194,0),24 }, + { IPv4(208,187,214,0),23 }, + { IPv4(208,187,218,0),24 }, + { IPv4(208,187,219,0),24 }, + { IPv4(208,187,224,0),22 }, + { IPv4(208,188,184,0),21 }, + { IPv4(208,189,32,0),20 }, + { IPv4(208,189,96,0),20 }, + { IPv4(208,189,103,0),24 }, + { IPv4(208,189,208,0),21 }, + { IPv4(208,189,210,0),24 }, + { IPv4(208,189,216,0),21 }, + { IPv4(208,191,32,0),20 }, + { IPv4(208,191,48,0),20 }, + { IPv4(208,191,62,0),24 }, + { IPv4(208,191,128,0),20 }, + { IPv4(208,192,3,0),24 }, + { IPv4(208,192,14,0),24 }, + { IPv4(208,192,32,0),21 }, + { IPv4(208,192,86,0),24 }, + { IPv4(208,192,120,0),24 }, + { IPv4(208,192,208,0),22 }, + { IPv4(208,193,14,0),24 }, + { IPv4(208,193,15,0),24 }, + { IPv4(208,193,53,0),24 }, + { IPv4(208,193,120,0),21 }, + { IPv4(208,193,132,0),24 }, + { IPv4(208,194,74,0),23 }, + { IPv4(208,194,97,0),24 }, + { IPv4(208,194,157,0),24 }, + { IPv4(208,195,104,0),21 }, + { IPv4(208,195,255,0),24 }, + { IPv4(208,196,96,0),19 }, + { IPv4(208,196,168,0),24 }, + { IPv4(208,197,4,0),24 }, + { IPv4(208,197,8,0),24 }, + { IPv4(208,197,35,0),24 }, + { IPv4(208,197,70,0),24 }, + { IPv4(208,197,116,0),23 }, + { IPv4(208,197,246,0),24 }, + { IPv4(208,197,247,0),24 }, + { IPv4(208,198,0,0),22 }, + { IPv4(208,198,4,0),22 }, + { IPv4(208,198,224,0),24 }, + { IPv4(208,198,224,0),21 }, + { IPv4(208,198,225,0),24 }, + { IPv4(208,198,226,0),24 }, + { IPv4(208,198,240,0),22 }, + { IPv4(208,199,168,0),21 }, + { IPv4(208,200,136,0),21 }, + { IPv4(208,200,180,0),24 }, + { IPv4(208,200,181,0),24 }, + { IPv4(208,200,185,0),24 }, + { IPv4(208,200,214,0),24 }, + { IPv4(208,201,44,0),22 }, + { IPv4(208,201,64,0),21 }, + { IPv4(208,201,73,0),24 }, + { IPv4(208,201,108,0),24 }, + { IPv4(208,201,179,0),24 }, + { IPv4(208,201,200,0),21 }, + { IPv4(208,202,77,0),24 }, + { IPv4(208,202,104,0),24 }, + { IPv4(208,202,107,0),24 }, + { IPv4(208,202,128,0),21 }, + { IPv4(208,202,218,0),23 }, + { IPv4(208,203,47,0),24 }, + { IPv4(208,203,56,0),21 }, + { IPv4(208,203,56,0),22 }, + { IPv4(208,203,112,0),23 }, + { IPv4(208,203,201,0),24 }, + { IPv4(208,205,104,0),21 }, + { IPv4(208,205,120,0),24 }, + { IPv4(208,205,124,0),24 }, + { IPv4(208,205,125,0),24 }, + { IPv4(208,205,126,0),24 }, + { IPv4(208,205,127,0),24 }, + { IPv4(208,205,136,0),24 }, + { IPv4(208,205,144,0),20 }, + { IPv4(208,205,160,0),20 }, + { IPv4(208,205,176,0),24 }, + { IPv4(208,205,240,0),22 }, + { IPv4(208,205,244,0),22 }, + { IPv4(208,206,84,0),24 }, + { IPv4(208,206,85,0),24 }, + { IPv4(208,206,229,0),24 }, + { IPv4(208,206,231,0),24 }, + { IPv4(208,207,64,0),24 }, + { IPv4(208,208,91,0),24 }, + { IPv4(208,208,104,0),21 }, + { IPv4(208,209,28,0),24 }, + { IPv4(208,209,29,0),24 }, + { IPv4(208,209,30,0),24 }, + { IPv4(208,209,31,0),24 }, + { IPv4(208,209,38,0),24 }, + { IPv4(208,209,160,0),22 }, + { IPv4(208,209,210,0),24 }, + { IPv4(208,209,232,0),24 }, + { IPv4(208,209,252,0),22 }, + { IPv4(208,211,80,0),24 }, + { IPv4(208,211,104,0),21 }, + { IPv4(208,212,64,0),20 }, + { IPv4(208,213,56,0),21 }, + { IPv4(208,213,126,0),24 }, + { IPv4(208,213,144,0),20 }, + { IPv4(208,213,229,0),24 }, + { IPv4(208,214,18,0),24 }, + { IPv4(208,214,40,0),22 }, + { IPv4(208,214,216,0),21 }, + { IPv4(208,215,200,0),24 }, + { IPv4(208,215,201,0),24 }, + { IPv4(208,215,236,0),24 }, + { IPv4(208,216,39,0),24 }, + { IPv4(208,216,80,0),21 }, + { IPv4(208,216,91,0),24 }, + { IPv4(208,216,180,0),22 }, + { IPv4(208,217,21,0),24 }, + { IPv4(208,217,71,0),24 }, + { IPv4(208,217,74,0),24 }, + { IPv4(208,217,166,0),24 }, + { IPv4(208,217,167,0),24 }, + { IPv4(208,217,198,0),24 }, + { IPv4(208,217,208,0),20 }, + { IPv4(208,217,238,0),24 }, + { IPv4(208,218,122,0),24 }, + { IPv4(208,218,128,0),21 }, + { IPv4(208,218,210,0),24 }, + { IPv4(208,218,214,0),24 }, + { IPv4(208,218,215,0),24 }, + { IPv4(208,219,48,0),24 }, + { IPv4(208,219,49,0),24 }, + { IPv4(208,219,74,0),24 }, + { IPv4(208,219,112,0),20 }, + { IPv4(208,219,128,0),19 }, + { IPv4(208,219,220,0),24 }, + { IPv4(208,220,100,0),24 }, + { IPv4(208,220,180,0),24 }, + { IPv4(208,220,181,0),24 }, + { IPv4(208,220,192,0),19 }, + { IPv4(208,221,72,0),21 }, + { IPv4(208,221,192,0),24 }, + { IPv4(208,221,193,0),24 }, + { IPv4(208,221,194,0),24 }, + { IPv4(208,221,195,0),24 }, + { IPv4(208,222,120,0),21 }, + { IPv4(208,222,150,0),23 }, + { IPv4(208,222,244,0),24 }, + { IPv4(208,222,245,0),24 }, + { IPv4(208,222,252,0),24 }, + { IPv4(208,222,253,0),24 }, + { IPv4(208,223,76,0),24 }, + { IPv4(208,223,208,0),23 }, + { IPv4(208,224,87,0),24 }, + { IPv4(208,224,122,0),24 }, + { IPv4(208,224,224,0),24 }, + { IPv4(208,224,225,0),24 }, + { IPv4(208,225,40,0),24 }, + { IPv4(208,225,187,0),24 }, + { IPv4(208,225,239,0),24 }, + { IPv4(208,226,36,0),24 }, + { IPv4(208,226,37,0),24 }, + { IPv4(208,226,38,0),24 }, + { IPv4(208,226,39,0),24 }, + { IPv4(208,226,120,0),22 }, + { IPv4(208,226,130,0),23 }, + { IPv4(208,228,160,0),20 }, + { IPv4(208,229,54,0),24 }, + { IPv4(208,229,121,0),24 }, + { IPv4(208,229,240,0),24 }, + { IPv4(208,230,56,0),24 }, + { IPv4(208,230,58,0),23 }, + { IPv4(208,230,128,0),20 }, + { IPv4(208,230,194,0),24 }, + { IPv4(208,230,196,0),24 }, + { IPv4(208,230,197,0),24 }, + { IPv4(208,230,244,0),24 }, + { IPv4(208,230,250,0),24 }, + { IPv4(208,230,251,0),24 }, + { IPv4(208,231,60,0),24 }, + { IPv4(208,231,128,0),22 }, + { IPv4(208,231,162,0),24 }, + { IPv4(208,232,142,0),24 }, + { IPv4(208,232,142,0),23 }, + { IPv4(208,232,245,0),24 }, + { IPv4(208,233,88,0),21 }, + { IPv4(208,233,112,0),21 }, + { IPv4(208,233,124,0),23 }, + { IPv4(208,234,0,0),19 }, + { IPv4(208,234,120,0),22 }, + { IPv4(208,234,168,0),24 }, + { IPv4(208,234,169,0),24 }, + { IPv4(208,234,192,0),23 }, + { IPv4(208,234,218,0),23 }, + { IPv4(208,234,252,0),24 }, + { IPv4(208,236,170,0),24 }, + { IPv4(208,237,33,0),24 }, + { IPv4(208,237,56,0),22 }, + { IPv4(208,237,80,0),22 }, + { IPv4(208,237,88,0),23 }, + { IPv4(208,238,43,0),24 }, + { IPv4(208,238,44,0),24 }, + { IPv4(208,238,45,0),24 }, + { IPv4(208,238,46,0),24 }, + { IPv4(208,238,47,0),24 }, + { IPv4(208,238,126,0),23 }, + { IPv4(208,238,144,0),21 }, + { IPv4(208,238,228,0),24 }, + { IPv4(208,238,229,0),24 }, + { IPv4(208,238,230,0),24 }, + { IPv4(208,239,116,0),22 }, + { IPv4(208,239,159,0),24 }, + { IPv4(208,239,169,0),24 }, + { IPv4(208,239,172,0),22 }, + { IPv4(208,240,76,0),23 }, + { IPv4(208,240,128,0),21 }, + { IPv4(208,240,240,0),22 }, + { IPv4(208,240,252,0),22 }, + { IPv4(208,241,0,0),22 }, + { IPv4(208,241,48,0),22 }, + { IPv4(208,241,152,0),21 }, + { IPv4(208,241,166,0),23 }, + { IPv4(208,241,190,0),24 }, + { IPv4(208,241,191,0),24 }, + { IPv4(208,242,0,0),24 }, + { IPv4(208,242,1,0),24 }, + { IPv4(208,242,62,0),24 }, + { IPv4(208,242,63,0),24 }, + { IPv4(208,242,114,0),23 }, + { IPv4(208,243,4,0),22 }, + { IPv4(208,243,98,0),23 }, + { IPv4(208,243,100,0),22 }, + { IPv4(208,244,39,0),24 }, + { IPv4(208,244,82,0),23 }, + { IPv4(208,244,88,0),21 }, + { IPv4(208,244,111,0),24 }, + { IPv4(208,244,118,0),23 }, + { IPv4(208,244,140,0),22 }, + { IPv4(208,244,174,0),23 }, + { IPv4(208,244,246,0),23 }, + { IPv4(208,245,36,0),23 }, + { IPv4(208,245,86,0),24 }, + { IPv4(208,245,128,0),24 }, + { IPv4(208,245,132,0),22 }, + { IPv4(208,245,232,0),24 }, + { IPv4(208,245,248,0),21 }, + { IPv4(208,246,83,0),24 }, + { IPv4(208,246,134,0),24 }, + { IPv4(208,246,144,224),27 }, + { IPv4(208,246,164,0),23 }, + { IPv4(208,246,215,0),24 }, + { IPv4(208,247,17,0),24 }, + { IPv4(208,247,100,0),24 }, + { IPv4(208,247,121,0),24 }, + { IPv4(208,247,129,0),24 }, + { IPv4(208,247,208,0),24 }, + { IPv4(208,247,248,0),22 }, + { IPv4(208,248,33,0),24 }, + { IPv4(208,248,77,0),24 }, + { IPv4(208,248,108,0),23 }, + { IPv4(208,248,128,0),20 }, + { IPv4(208,248,186,0),23 }, + { IPv4(208,248,192,0),24 }, + { IPv4(208,248,193,0),24 }, + { IPv4(208,248,194,0),24 }, + { IPv4(208,248,195,0),24 }, + { IPv4(208,248,242,0),24 }, + { IPv4(208,249,36,0),24 }, + { IPv4(208,249,116,0),24 }, + { IPv4(208,249,117,0),24 }, + { IPv4(208,249,206,0),24 }, + { IPv4(208,251,67,0),24 }, + { IPv4(208,251,90,0),23 }, + { IPv4(208,251,159,0),24 }, + { IPv4(208,252,24,0),24 }, + { IPv4(208,252,201,0),24 }, + { IPv4(208,253,72,0),21 }, + { IPv4(208,254,155,0),24 }, + { IPv4(208,255,140,0),24 }, + { IPv4(208,255,152,0),21 }, + { IPv4(208,255,181,0),24 }, + { IPv4(208,255,225,0),24 }, + { IPv4(209,1,23,0),24 }, + { IPv4(209,1,108,0),22 }, + { IPv4(209,1,112,0),24 }, + { IPv4(209,1,113,0),24 }, + { IPv4(209,1,128,0),24 }, + { IPv4(209,2,0,0),16 }, + { IPv4(209,2,36,0),22 }, + { IPv4(209,2,40,0),24 }, + { IPv4(209,2,47,0),24 }, + { IPv4(209,2,48,0),24 }, + { IPv4(209,2,49,0),24 }, + { IPv4(209,2,50,0),24 }, + { IPv4(209,2,51,0),24 }, + { IPv4(209,2,68,0),22 }, + { IPv4(209,2,90,0),24 }, + { IPv4(209,2,92,0),24 }, + { IPv4(209,2,93,0),24 }, + { IPv4(209,2,105,0),24 }, + { IPv4(209,2,125,0),24 }, + { IPv4(209,2,128,0),24 }, + { IPv4(209,2,129,0),24 }, + { IPv4(209,2,130,0),24 }, + { IPv4(209,2,131,0),24 }, + { IPv4(209,2,132,0),24 }, + { IPv4(209,2,133,0),24 }, + { IPv4(209,2,138,0),24 }, + { IPv4(209,2,139,0),24 }, + { IPv4(209,2,143,0),24 }, + { IPv4(209,2,146,0),24 }, + { IPv4(209,2,156,0),24 }, + { IPv4(209,2,160,0),21 }, + { IPv4(209,2,185,0),24 }, + { IPv4(209,2,187,0),24 }, + { IPv4(209,2,208,0),24 }, + { IPv4(209,2,209,0),24 }, + { IPv4(209,2,210,0),24 }, + { IPv4(209,2,211,0),24 }, + { IPv4(209,2,212,0),24 }, + { IPv4(209,2,213,0),24 }, + { IPv4(209,2,216,0),24 }, + { IPv4(209,2,217,0),24 }, + { IPv4(209,2,218,0),24 }, + { IPv4(209,2,219,0),24 }, + { IPv4(209,2,220,0),24 }, + { IPv4(209,2,221,0),24 }, + { IPv4(209,2,222,0),24 }, + { IPv4(209,2,223,0),24 }, + { IPv4(209,2,224,0),24 }, + { IPv4(209,2,225,0),24 }, + { IPv4(209,2,226,0),24 }, + { IPv4(209,2,227,0),24 }, + { IPv4(209,2,228,0),24 }, + { IPv4(209,2,229,0),24 }, + { IPv4(209,2,230,0),24 }, + { IPv4(209,2,231,0),24 }, + { IPv4(209,2,232,0),24 }, + { IPv4(209,2,233,0),24 }, + { IPv4(209,2,234,0),24 }, + { IPv4(209,2,235,0),24 }, + { IPv4(209,2,236,0),24 }, + { IPv4(209,2,237,0),24 }, + { IPv4(209,2,238,0),24 }, + { IPv4(209,2,239,0),24 }, + { IPv4(209,2,253,0),24 }, + { IPv4(209,2,254,0),24 }, + { IPv4(209,3,118,0),24 }, + { IPv4(209,3,198,0),24 }, + { IPv4(209,4,228,0),24 }, + { IPv4(209,4,250,0),23 }, + { IPv4(209,4,252,0),23 }, + { IPv4(209,4,254,0),23 }, + { IPv4(209,6,0,0),16 }, + { IPv4(209,6,161,0),24 }, + { IPv4(209,7,0,0),16 }, + { IPv4(209,8,48,0),22 }, + { IPv4(209,8,80,0),23 }, + { IPv4(209,8,192,0),22 }, + { IPv4(209,10,0,0),16 }, + { IPv4(209,10,0,0),19 }, + { IPv4(209,10,14,0),23 }, + { IPv4(209,10,16,0),22 }, + { IPv4(209,10,16,0),24 }, + { IPv4(209,10,24,0),21 }, + { IPv4(209,10,32,0),20 }, + { IPv4(209,10,42,128),25 }, + { IPv4(209,10,48,0),21 }, + { IPv4(209,10,51,0),24 }, + { IPv4(209,10,56,0),21 }, + { IPv4(209,10,64,0),19 }, + { IPv4(209,10,94,0),24 }, + { IPv4(209,10,96,0),19 }, + { IPv4(209,10,123,0),24 }, + { IPv4(209,10,125,0),24 }, + { IPv4(209,10,128,0),20 }, + { IPv4(209,10,128,0),23 }, + { IPv4(209,10,130,0),23 }, + { IPv4(209,10,144,0),21 }, + { IPv4(209,10,146,0),24 }, + { IPv4(209,10,152,0),22 }, + { IPv4(209,10,156,0),22 }, + { IPv4(209,10,160,0),21 }, + { IPv4(209,10,168,0),21 }, + { IPv4(209,10,176,0),20 }, + { IPv4(209,10,180,0),24 }, + { IPv4(209,10,192,0),21 }, + { IPv4(209,10,200,0),22 }, + { IPv4(209,10,204,0),22 }, + { IPv4(209,10,208,0),20 }, + { IPv4(209,10,214,0),24 }, + { IPv4(209,10,224,0),20 }, + { IPv4(209,10,228,128),25 }, + { IPv4(209,10,240,0),20 }, + { IPv4(209,10,244,0),23 }, + { IPv4(209,10,252,0),24 }, + { IPv4(209,11,0,0),22 }, + { IPv4(209,11,0,0),17 }, + { IPv4(209,11,4,0),22 }, + { IPv4(209,11,5,128),25 }, + { IPv4(209,11,8,0),21 }, + { IPv4(209,11,16,0),20 }, + { IPv4(209,11,32,0),19 }, + { IPv4(209,11,56,0),24 }, + { IPv4(209,11,64,0),19 }, + { IPv4(209,11,81,0),24 }, + { IPv4(209,11,96,0),20 }, + { IPv4(209,11,98,0),24 }, + { IPv4(209,11,112,0),20 }, + { IPv4(209,11,121,0),24 }, + { IPv4(209,11,122,0),24 }, + { IPv4(209,11,128,0),19 }, + { IPv4(209,11,135,0),24 }, + { IPv4(209,11,160,0),21 }, + { IPv4(209,11,160,0),19 }, + { IPv4(209,11,176,0),20 }, + { IPv4(209,11,192,0),19 }, + { IPv4(209,11,216,0),21 }, + { IPv4(209,12,0,0),16 }, + { IPv4(209,12,38,0),24 }, + { IPv4(209,12,61,0),24 }, + { IPv4(209,12,62,0),23 }, + { IPv4(209,12,65,0),24 }, + { IPv4(209,12,74,0),24 }, + { IPv4(209,12,75,0),24 }, + { IPv4(209,12,118,0),24 }, + { IPv4(209,12,138,0),23 }, + { IPv4(209,12,140,0),22 }, + { IPv4(209,12,144,0),21 }, + { IPv4(209,12,152,0),24 }, + { IPv4(209,12,183,0),24 }, + { IPv4(209,13,0,0),16 }, + { IPv4(209,14,136,0),24 }, + { IPv4(209,16,128,0),18 }, + { IPv4(209,17,64,0),19 }, + { IPv4(209,17,96,0),24 }, + { IPv4(209,17,192,0),19 }, + { IPv4(209,18,128,0),17 }, + { IPv4(209,19,0,0),17 }, + { IPv4(209,19,4,0),24 }, + { IPv4(209,19,5,0),24 }, + { IPv4(209,19,68,0),24 }, + { IPv4(209,19,75,0),24 }, + { IPv4(209,19,84,0),24 }, + { IPv4(209,19,139,0),24 }, + { IPv4(209,19,192,0),18 }, + { IPv4(209,19,212,0),24 }, + { IPv4(209,20,64,0),19 }, + { IPv4(209,21,0,0),18 }, + { IPv4(209,21,104,0),21 }, + { IPv4(209,21,128,0),17 }, + { IPv4(209,21,136,0),21 }, + { IPv4(209,21,144,0),21 }, + { IPv4(209,22,2,0),24 }, + { IPv4(209,22,6,0),24 }, + { IPv4(209,22,7,0),24 }, + { IPv4(209,22,8,0),24 }, + { IPv4(209,22,25,0),24 }, + { IPv4(209,22,37,0),24 }, + { IPv4(209,22,47,0),24 }, + { IPv4(209,22,51,0),24 }, + { IPv4(209,22,60,0),24 }, + { IPv4(209,22,153,0),24 }, + { IPv4(209,22,161,0),24 }, + { IPv4(209,22,162,0),24 }, + { IPv4(209,22,181,0),24 }, + { IPv4(209,22,182,0),24 }, + { IPv4(209,22,186,0),23 }, + { IPv4(209,22,212,0),24 }, + { IPv4(209,22,213,0),24 }, + { IPv4(209,22,214,0),24 }, + { IPv4(209,22,215,0),24 }, + { IPv4(209,22,216,0),24 }, + { IPv4(209,22,217,0),24 }, + { IPv4(209,22,218,0),24 }, + { IPv4(209,22,219,0),24 }, + { IPv4(209,23,80,0),20 }, + { IPv4(209,23,82,0),24 }, + { IPv4(209,24,0,0),16 }, + { IPv4(209,25,0,0),23 }, + { IPv4(209,25,24,0),24 }, + { IPv4(209,25,30,0),24 }, + { IPv4(209,25,32,0),23 }, + { IPv4(209,25,34,0),24 }, + { IPv4(209,25,40,0),24 }, + { IPv4(209,25,41,0),24 }, + { IPv4(209,25,42,0),24 }, + { IPv4(209,25,43,0),24 }, + { IPv4(209,25,85,0),24 }, + { IPv4(209,25,86,0),23 }, + { IPv4(209,25,91,0),24 }, + { IPv4(209,25,92,0),24 }, + { IPv4(209,25,92,0),23 }, + { IPv4(209,25,93,0),24 }, + { IPv4(209,25,98,0),23 }, + { IPv4(209,25,100,0),24 }, + { IPv4(209,25,124,0),24 }, + { IPv4(209,25,128,0),18 }, + { IPv4(209,25,192,0),19 }, + { IPv4(209,25,224,0),20 }, + { IPv4(209,25,240,0),20 }, + { IPv4(209,26,32,0),22 }, + { IPv4(209,26,178,0),23 }, + { IPv4(209,26,182,0),24 }, + { IPv4(209,27,3,0),24 }, + { IPv4(209,27,102,0),24 }, + { IPv4(209,27,137,0),24 }, + { IPv4(209,27,197,0),24 }, + { IPv4(209,27,198,0),23 }, + { IPv4(209,27,236,0),22 }, + { IPv4(209,27,240,0),24 }, + { IPv4(209,27,244,0),23 }, + { IPv4(209,27,244,0),22 }, + { IPv4(209,27,246,0),23 }, + { IPv4(209,27,248,0),21 }, + { IPv4(209,28,0,0),16 }, + { IPv4(209,28,6,0),24 }, + { IPv4(209,28,9,0),24 }, + { IPv4(209,28,16,0),24 }, + { IPv4(209,28,34,0),24 }, + { IPv4(209,28,56,0),24 }, + { IPv4(209,28,69,0),24 }, + { IPv4(209,28,71,0),24 }, + { IPv4(209,28,75,0),24 }, + { IPv4(209,28,82,0),24 }, + { IPv4(209,28,174,0),24 }, + { IPv4(209,30,0,0),24 }, + { IPv4(209,31,0,0),16 }, + { IPv4(209,31,80,0),24 }, + { IPv4(209,31,128,0),21 }, + { IPv4(209,32,0,0),16 }, + { IPv4(209,32,92,0),22 }, + { IPv4(209,32,128,0),19 }, + { IPv4(209,32,224,0),22 }, + { IPv4(209,34,0,0),19 }, + { IPv4(209,34,32,0),19 }, + { IPv4(209,35,0,0),16 }, + { IPv4(209,36,0,0),15 }, + { IPv4(209,36,53,0),24 }, + { IPv4(209,36,95,0),24 }, + { IPv4(209,36,112,0),24 }, + { IPv4(209,36,113,0),24 }, + { IPv4(209,36,114,0),24 }, + { IPv4(209,36,115,0),24 }, + { IPv4(209,36,128,0),24 }, + { IPv4(209,37,4,0),24 }, + { IPv4(209,37,80,0),24 }, + { IPv4(209,37,81,0),24 }, + { IPv4(209,37,82,0),24 }, + { IPv4(209,37,83,0),24 }, + { IPv4(209,37,85,0),24 }, + { IPv4(209,37,93,0),24 }, + { IPv4(209,37,138,0),24 }, + { IPv4(209,37,145,0),24 }, + { IPv4(209,39,0,0),16 }, + { IPv4(209,39,118,0),24 }, + { IPv4(209,39,119,0),24 }, + { IPv4(209,40,192,0),21 }, + { IPv4(209,41,0,0),18 }, + { IPv4(209,41,64,0),18 }, + { IPv4(209,41,128,0),20 }, + { IPv4(209,41,128,0),19 }, + { IPv4(209,41,160,0),20 }, + { IPv4(209,41,176,0),21 }, + { IPv4(209,41,192,0),18 }, + { IPv4(209,41,207,0),24 }, + { IPv4(209,41,224,0),24 }, + { IPv4(209,41,244,0),24 }, + { IPv4(209,41,247,0),24 }, + { IPv4(209,42,32,0),20 }, + { IPv4(209,43,128,0),17 }, + { IPv4(209,43,130,0),24 }, + { IPv4(209,43,250,0),24 }, + { IPv4(209,44,14,0),24 }, + { IPv4(209,44,64,0),18 }, + { IPv4(209,44,73,0),24 }, + { IPv4(209,44,99,0),24 }, + { IPv4(209,44,100,0),24 }, + { IPv4(209,44,106,0),24 }, + { IPv4(209,44,107,0),24 }, + { IPv4(209,44,108,0),24 }, + { IPv4(209,44,109,0),24 }, + { IPv4(209,44,119,0),24 }, + { IPv4(209,44,124,0),24 }, + { IPv4(209,45,128,0),24 }, + { IPv4(209,45,129,0),24 }, + { IPv4(209,45,130,0),24 }, + { IPv4(209,45,200,0),23 }, + { IPv4(209,45,202,0),23 }, + { IPv4(209,46,0,0),17 }, + { IPv4(209,46,128,0),17 }, + { IPv4(209,46,129,0),24 }, + { IPv4(209,46,140,0),24 }, + { IPv4(209,46,141,0),24 }, + { IPv4(209,46,146,0),24 }, + { IPv4(209,46,147,0),24 }, + { IPv4(209,47,88,0),24 }, + { IPv4(209,47,172,0),24 }, + { IPv4(209,47,192,0),24 }, + { IPv4(209,48,11,0),24 }, + { IPv4(209,49,80,0),21 }, + { IPv4(209,49,88,0),22 }, + { IPv4(209,49,100,0),23 }, + { IPv4(209,49,168,0),24 }, + { IPv4(209,49,172,0),22 }, + { IPv4(209,50,35,0),24 }, + { IPv4(209,50,37,0),24 }, + { IPv4(209,50,44,0),24 }, + { IPv4(209,50,45,0),24 }, + { IPv4(209,50,46,0),24 }, + { IPv4(209,50,128,0),19 }, + { IPv4(209,50,192,0),19 }, + { IPv4(209,50,224,0),19 }, + { IPv4(209,51,0,0),19 }, + { IPv4(209,51,48,0),20 }, + { IPv4(209,51,128,0),19 }, + { IPv4(209,51,160,0),19 }, + { IPv4(209,51,224,0),19 }, + { IPv4(209,51,227,0),24 }, + { IPv4(209,51,228,0),24 }, + { IPv4(209,51,239,0),24 }, + { IPv4(209,51,240,0),24 }, + { IPv4(209,51,255,0),24 }, + { IPv4(209,54,28,0),22 }, + { IPv4(209,54,32,0),22 }, + { IPv4(209,54,36,0),22 }, + { IPv4(209,54,42,0),24 }, + { IPv4(209,54,53,0),24 }, + { IPv4(209,54,72,0),21 }, + { IPv4(209,54,93,0),24 }, + { IPv4(209,54,111,0),24 }, + { IPv4(209,54,123,0),24 }, + { IPv4(209,54,192,0),20 }, + { IPv4(209,54,196,0),24 }, + { IPv4(209,55,64,0),18 }, + { IPv4(209,55,128,0),24 }, + { IPv4(209,55,255,0),24 }, + { IPv4(209,56,0,0),16 }, + { IPv4(209,56,96,0),21 }, + { IPv4(209,57,0,0),16 }, + { IPv4(209,57,144,0),21 }, + { IPv4(209,58,60,0),24 }, + { IPv4(209,58,61,0),24 }, + { IPv4(209,58,68,0),23 }, + { IPv4(209,58,76,0),24 }, + { IPv4(209,58,84,0),24 }, + { IPv4(209,58,140,0),24 }, + { IPv4(209,58,224,0),20 }, + { IPv4(209,60,0,0),16 }, + { IPv4(209,60,1,0),24 }, + { IPv4(209,60,14,0),24 }, + { IPv4(209,60,15,0),24 }, + { IPv4(209,60,16,0),23 }, + { IPv4(209,60,34,0),24 }, + { IPv4(209,60,70,0),24 }, + { IPv4(209,60,71,0),24 }, + { IPv4(209,60,72,0),23 }, + { IPv4(209,60,77,0),24 }, + { IPv4(209,60,88,0),24 }, + { IPv4(209,60,90,0),24 }, + { IPv4(209,60,140,0),23 }, + { IPv4(209,60,142,0),24 }, + { IPv4(209,60,160,0),24 }, + { IPv4(209,60,164,0),24 }, + { IPv4(209,60,167,0),24 }, + { IPv4(209,60,170,0),24 }, + { IPv4(209,60,223,0),24 }, + { IPv4(209,60,242,0),24 }, + { IPv4(209,60,252,0),23 }, + { IPv4(209,60,254,0),24 }, + { IPv4(209,61,85,0),24 }, + { IPv4(209,61,86,0),24 }, + { IPv4(209,61,128,0),18 }, + { IPv4(209,61,192,0),19 }, + { IPv4(209,61,224,0),20 }, + { IPv4(209,62,30,0),23 }, + { IPv4(209,62,32,0),23 }, + { IPv4(209,62,35,0),24 }, + { IPv4(209,62,36,0),22 }, + { IPv4(209,62,40,0),22 }, + { IPv4(209,62,44,0),24 }, + { IPv4(209,62,45,0),24 }, + { IPv4(209,64,0,0),15 }, + { IPv4(209,64,11,0),24 }, + { IPv4(209,64,25,0),24 }, + { IPv4(209,64,139,0),24 }, + { IPv4(209,64,142,0),24 }, + { IPv4(209,64,152,0),22 }, + { IPv4(209,64,156,0),23 }, + { IPv4(209,64,181,0),24 }, + { IPv4(209,64,182,0),24 }, + { IPv4(209,64,202,0),24 }, + { IPv4(209,65,16,0),24 }, + { IPv4(209,65,17,0),24 }, + { IPv4(209,65,18,0),24 }, + { IPv4(209,65,19,0),24 }, + { IPv4(209,65,36,0),22 }, + { IPv4(209,66,0,0),19 }, + { IPv4(209,66,64,0),18 }, + { IPv4(209,66,100,0),23 }, + { IPv4(209,67,18,0),24 }, + { IPv4(209,67,42,0),24 }, + { IPv4(209,67,48,0),22 }, + { IPv4(209,67,152,0),24 }, + { IPv4(209,68,0,0),18 }, + { IPv4(209,68,128,0),19 }, + { IPv4(209,68,192,0),18 }, + { IPv4(209,69,0,0),16 }, + { IPv4(209,70,0,0),16 }, + { IPv4(209,70,175,0),24 }, + { IPv4(209,72,0,0),24 }, + { IPv4(209,72,0,0),16 }, + { IPv4(209,72,132,0),24 }, + { IPv4(209,72,133,0),24 }, + { IPv4(209,72,134,0),24 }, + { IPv4(209,72,135,0),24 }, + { IPv4(209,72,136,0),24 }, + { IPv4(209,72,137,0),24 }, + { IPv4(209,72,138,0),24 }, + { IPv4(209,72,139,0),24 }, + { IPv4(209,72,140,0),24 }, + { IPv4(209,72,141,0),24 }, + { IPv4(209,72,142,0),24 }, + { IPv4(209,72,143,0),24 }, + { IPv4(209,72,144,0),24 }, + { IPv4(209,72,145,0),24 }, + { IPv4(209,72,149,0),24 }, + { IPv4(209,72,150,0),24 }, + { IPv4(209,72,151,0),24 }, + { IPv4(209,72,152,0),24 }, + { IPv4(209,72,154,0),24 }, + { IPv4(209,72,155,0),24 }, + { IPv4(209,72,156,0),24 }, + { IPv4(209,72,157,0),24 }, + { IPv4(209,72,158,0),24 }, + { IPv4(209,72,159,0),24 }, + { IPv4(209,72,160,0),24 }, + { IPv4(209,72,161,0),24 }, + { IPv4(209,72,162,0),24 }, + { IPv4(209,72,163,0),24 }, + { IPv4(209,72,164,0),24 }, + { IPv4(209,72,165,0),24 }, + { IPv4(209,73,0,0),18 }, + { IPv4(209,73,40,0),24 }, + { IPv4(209,73,64,0),18 }, + { IPv4(209,73,192,0),18 }, + { IPv4(209,74,0,0),18 }, + { IPv4(209,74,96,0),19 }, + { IPv4(209,74,128,0),18 }, + { IPv4(209,74,148,0),24 }, + { IPv4(209,74,155,0),24 }, + { IPv4(209,74,224,0),20 }, + { IPv4(209,75,0,0),16 }, + { IPv4(209,75,4,0),22 }, + { IPv4(209,75,112,0),21 }, + { IPv4(209,78,0,0),19 }, + { IPv4(209,79,64,0),19 }, + { IPv4(209,80,64,0),24 }, + { IPv4(209,80,65,0),24 }, + { IPv4(209,80,66,0),23 }, + { IPv4(209,80,68,0),24 }, + { IPv4(209,80,72,0),24 }, + { IPv4(209,80,76,0),24 }, + { IPv4(209,80,80,0),24 }, + { IPv4(209,80,88,0),24 }, + { IPv4(209,80,116,0),24 }, + { IPv4(209,80,118,0),24 }, + { IPv4(209,80,120,0),24 }, + { IPv4(209,80,122,0),24 }, + { IPv4(209,81,0,0),18 }, + { IPv4(209,81,55,0),24 }, + { IPv4(209,81,56,0),24 }, + { IPv4(209,81,57,0),24 }, + { IPv4(209,81,58,0),24 }, + { IPv4(209,81,59,0),24 }, + { IPv4(209,81,60,0),24 }, + { IPv4(209,81,61,0),24 }, + { IPv4(209,81,64,0),19 }, + { IPv4(209,81,69,0),24 }, + { IPv4(209,81,139,0),24 }, + { IPv4(209,81,164,0),22 }, + { IPv4(209,81,204,0),22 }, + { IPv4(209,81,216,0),22 }, + { IPv4(209,82,0,0),17 }, + { IPv4(209,83,1,0),24 }, + { IPv4(209,83,16,0),23 }, + { IPv4(209,83,165,0),24 }, + { IPv4(209,83,168,0),23 }, + { IPv4(209,84,64,0),21 }, + { IPv4(209,84,182,0),24 }, + { IPv4(209,84,183,0),24 }, + { IPv4(209,85,0,0),16 }, + { IPv4(209,86,0,0),21 }, + { IPv4(209,86,0,0),16 }, + { IPv4(209,86,8,0),22 }, + { IPv4(209,86,32,0),19 }, + { IPv4(209,87,128,0),20 }, + { IPv4(209,87,144,0),20 }, + { IPv4(209,87,192,0),20 }, + { IPv4(209,88,112,0),24 }, + { IPv4(209,88,113,0),24 }, + { IPv4(209,88,114,0),24 }, + { IPv4(209,88,164,0),22 }, + { IPv4(209,88,165,0),24 }, + { IPv4(209,90,32,0),22 }, + { IPv4(209,90,36,0),24 }, + { IPv4(209,90,38,0),24 }, + { IPv4(209,90,39,0),24 }, + { IPv4(209,90,128,0),18 }, + { IPv4(209,90,192,0),18 }, + { IPv4(209,91,128,0),18 }, + { IPv4(209,91,141,0),24 }, + { IPv4(209,91,147,0),24 }, + { IPv4(209,91,148,0),24 }, + { IPv4(209,91,149,0),24 }, + { IPv4(209,91,155,0),24 }, + { IPv4(209,91,156,0),24 }, + { IPv4(209,91,158,0),24 }, + { IPv4(209,91,167,0),24 }, + { IPv4(209,93,0,0),17 }, + { IPv4(209,93,4,0),24 }, + { IPv4(209,93,12,0),24 }, + { IPv4(209,93,15,0),24 }, + { IPv4(209,93,21,0),24 }, + { IPv4(209,93,22,0),24 }, + { IPv4(209,93,24,0),24 }, + { IPv4(209,93,31,0),24 }, + { IPv4(209,93,45,0),24 }, + { IPv4(209,93,46,0),24 }, + { IPv4(209,93,63,0),24 }, + { IPv4(209,93,64,0),24 }, + { IPv4(209,93,72,0),24 }, + { IPv4(209,93,102,0),24 }, + { IPv4(209,93,128,0),18 }, + { IPv4(209,93,128,0),24 }, + { IPv4(209,93,133,0),24 }, + { IPv4(209,93,179,0),24 }, + { IPv4(209,93,185,0),24 }, + { IPv4(209,93,192,0),19 }, + { IPv4(209,93,192,0),24 }, + { IPv4(209,93,195,0),24 }, + { IPv4(209,93,196,0),24 }, + { IPv4(209,93,201,0),24 }, + { IPv4(209,93,206,0),24 }, + { IPv4(209,93,216,0),24 }, + { IPv4(209,93,224,0),19 }, + { IPv4(209,93,226,0),24 }, + { IPv4(209,93,235,0),24 }, + { IPv4(209,93,237,0),24 }, + { IPv4(209,93,238,0),24 }, + { IPv4(209,93,254,0),24 }, + { IPv4(209,94,0,0),19 }, + { IPv4(209,94,96,0),19 }, + { IPv4(209,94,128,0),19 }, + { IPv4(209,94,211,0),24 }, + { IPv4(209,95,0,0),19 }, + { IPv4(209,95,32,0),19 }, + { IPv4(209,95,95,0),24 }, + { IPv4(209,98,0,0),16 }, + { IPv4(209,98,16,0),20 }, + { IPv4(209,98,70,0),24 }, + { IPv4(209,98,89,0),24 }, + { IPv4(209,98,97,0),24 }, + { IPv4(209,98,132,0),22 }, + { IPv4(209,98,164,0),24 }, + { IPv4(209,99,0,0),17 }, + { IPv4(209,99,232,0),23 }, + { IPv4(209,100,32,0),21 }, + { IPv4(209,100,32,0),24 }, + { IPv4(209,100,33,0),24 }, + { IPv4(209,100,35,0),24 }, + { IPv4(209,100,36,0),24 }, + { IPv4(209,100,37,0),24 }, + { IPv4(209,100,38,0),24 }, + { IPv4(209,100,39,0),24 }, + { IPv4(209,100,42,0),24 }, + { IPv4(209,100,104,0),21 }, + { IPv4(209,100,120,0),21 }, + { IPv4(209,100,156,0),22 }, + { IPv4(209,101,20,0),24 }, + { IPv4(209,101,24,0),24 }, + { IPv4(209,101,32,0),24 }, + { IPv4(209,101,39,0),24 }, + { IPv4(209,101,40,0),24 }, + { IPv4(209,101,64,0),21 }, + { IPv4(209,101,254,0),24 }, + { IPv4(209,102,21,0),24 }, + { IPv4(209,102,23,0),24 }, + { IPv4(209,102,28,0),24 }, + { IPv4(209,102,72,0),22 }, + { IPv4(209,102,76,0),23 }, + { IPv4(209,102,92,0),22 }, + { IPv4(209,102,96,0),22 }, + { IPv4(209,102,192,0),19 }, + { IPv4(209,103,128,0),19 }, + { IPv4(209,104,32,0),24 }, + { IPv4(209,104,33,0),24 }, + { IPv4(209,104,34,0),23 }, + { IPv4(209,104,36,0),22 }, + { IPv4(209,104,42,0),23 }, + { IPv4(209,104,44,0),24 }, + { IPv4(209,104,61,0),24 }, + { IPv4(209,104,62,0),24 }, + { IPv4(209,104,63,0),24 }, + { IPv4(209,105,0,0),17 }, + { IPv4(209,106,0,0),17 }, + { IPv4(209,106,128,0),18 }, + { IPv4(209,106,192,0),19 }, + { IPv4(209,106,224,0),19 }, + { IPv4(209,107,0,0),18 }, + { IPv4(209,107,31,0),24 }, + { IPv4(209,107,64,0),19 }, + { IPv4(209,107,128,0),18 }, + { IPv4(209,108,0,0),15 }, + { IPv4(209,108,0,0),14 }, + { IPv4(209,108,96,0),20 }, + { IPv4(209,109,8,0),22 }, + { IPv4(209,109,28,0),22 }, + { IPv4(209,109,56,0),21 }, + { IPv4(209,109,59,0),24 }, + { IPv4(209,109,130,0),23 }, + { IPv4(209,109,133,0),24 }, + { IPv4(209,109,134,0),24 }, + { IPv4(209,109,140,0),23 }, + { IPv4(209,109,144,0),23 }, + { IPv4(209,109,150,0),24 }, + { IPv4(209,109,224,0),21 }, + { IPv4(209,110,97,0),24 }, + { IPv4(209,111,0,0),24 }, + { IPv4(209,111,5,0),24 }, + { IPv4(209,111,6,0),23 }, + { IPv4(209,111,6,0),24 }, + { IPv4(209,111,216,0),24 }, + { IPv4(209,111,217,0),24 }, + { IPv4(209,111,218,0),24 }, + { IPv4(209,111,219,0),24 }, + { IPv4(209,111,220,0),24 }, + { IPv4(209,112,0,0),18 }, + { IPv4(209,112,96,0),20 }, + { IPv4(209,112,128,0),18 }, + { IPv4(209,112,192,0),19 }, + { IPv4(209,113,128,0),17 }, + { IPv4(209,113,170,0),24 }, + { IPv4(209,114,0,0),18 }, + { IPv4(209,114,128,0),18 }, + { IPv4(209,114,189,0),24 }, + { IPv4(209,115,0,0),17 }, + { IPv4(209,115,25,0),24 }, + { IPv4(209,115,29,0),24 }, + { IPv4(209,115,38,0),24 }, + { IPv4(209,115,39,0),24 }, + { IPv4(209,115,53,0),24 }, + { IPv4(209,115,94,0),24 }, + { IPv4(209,115,120,0),22 }, + { IPv4(209,116,0,0),21 }, + { IPv4(209,116,118,0),24 }, + { IPv4(209,116,172,0),24 }, + { IPv4(209,117,106,0),24 }, + { IPv4(209,117,122,0),24 }, + { IPv4(209,117,156,0),24 }, + { IPv4(209,117,158,0),24 }, + { IPv4(209,117,200,0),22 }, + { IPv4(209,117,204,0),23 }, + { IPv4(209,117,206,0),23 }, + { IPv4(209,117,208,0),21 }, + { IPv4(209,118,28,0),24 }, + { IPv4(209,118,74,0),24 }, + { IPv4(209,118,138,0),23 }, + { IPv4(209,118,182,0),24 }, + { IPv4(209,118,183,0),24 }, + { IPv4(209,118,231,0),24 }, + { IPv4(209,118,248,0),22 }, + { IPv4(209,119,36,0),23 }, + { IPv4(209,119,93,0),24 }, + { IPv4(209,119,196,0),22 }, + { IPv4(209,119,226,0),23 }, + { IPv4(209,119,228,0),22 }, + { IPv4(209,122,0,0),16 }, + { IPv4(209,123,0,0),16 }, + { IPv4(209,123,45,0),24 }, + { IPv4(209,123,72,0),24 }, + { IPv4(209,123,73,0),24 }, + { IPv4(209,123,74,0),24 }, + { IPv4(209,123,75,0),24 }, + { IPv4(209,123,190,0),23 }, + { IPv4(209,123,219,0),24 }, + { IPv4(209,124,0,0),19 }, + { IPv4(209,124,64,0),19 }, + { IPv4(209,124,96,0),20 }, + { IPv4(209,124,128,0),19 }, + { IPv4(209,124,192,0),19 }, + { IPv4(209,124,224,0),19 }, + { IPv4(209,125,0,0),16 }, + { IPv4(209,125,17,0),24 }, + { IPv4(209,125,47,0),24 }, + { IPv4(209,125,49,0),24 }, + { IPv4(209,125,62,0),24 }, + { IPv4(209,125,93,0),24 }, + { IPv4(209,125,149,0),24 }, + { IPv4(209,126,128,0),19 }, + { IPv4(209,126,160,0),20 }, + { IPv4(209,126,176,0),20 }, + { IPv4(209,128,64,0),19 }, + { IPv4(209,128,96,0),19 }, + { IPv4(209,128,192,0),19 }, + { IPv4(209,129,0,0),16 }, + { IPv4(209,129,40,0),22 }, + { IPv4(209,129,44,0),23 }, + { IPv4(209,130,0,0),17 }, + { IPv4(209,130,152,0),24 }, + { IPv4(209,130,153,0),24 }, + { IPv4(209,130,154,0),24 }, + { IPv4(209,130,155,0),24 }, + { IPv4(209,130,156,0),24 }, + { IPv4(209,130,157,0),24 }, + { IPv4(209,130,158,0),24 }, + { IPv4(209,130,159,0),24 }, + { IPv4(209,131,96,0),22 }, + { IPv4(209,131,96,0),20 }, + { IPv4(209,131,100,0),22 }, + { IPv4(209,132,0,0),17 }, + { IPv4(209,132,207,0),24 }, + { IPv4(209,132,212,0),24 }, + { IPv4(209,132,213,0),24 }, + { IPv4(209,132,214,0),24 }, + { IPv4(209,133,0,0),17 }, + { IPv4(209,133,21,0),24 }, + { IPv4(209,133,28,0),23 }, + { IPv4(209,133,38,0),24 }, + { IPv4(209,133,50,0),24 }, + { IPv4(209,133,93,0),24 }, + { IPv4(209,133,117,0),24 }, + { IPv4(209,133,128,0),18 }, + { IPv4(209,134,128,0),19 }, + { IPv4(209,134,160,0),19 }, + { IPv4(209,135,192,0),18 }, + { IPv4(209,136,0,0),16 }, + { IPv4(209,136,21,0),24 }, + { IPv4(209,136,22,0),24 }, + { IPv4(209,136,26,0),24 }, + { IPv4(209,136,27,0),24 }, + { IPv4(209,136,28,0),24 }, + { IPv4(209,136,29,0),24 }, + { IPv4(209,136,30,0),24 }, + { IPv4(209,136,31,0),24 }, + { IPv4(209,136,32,0),24 }, + { IPv4(209,136,33,0),24 }, + { IPv4(209,136,34,0),24 }, + { IPv4(209,136,35,0),24 }, + { IPv4(209,136,36,0),24 }, + { IPv4(209,136,64,0),24 }, + { IPv4(209,136,70,0),24 }, + { IPv4(209,136,72,0),24 }, + { IPv4(209,136,81,0),24 }, + { IPv4(209,136,82,0),24 }, + { IPv4(209,136,164,0),22 }, + { IPv4(209,136,168,0),23 }, + { IPv4(209,136,170,0),24 }, + { IPv4(209,136,249,0),24 }, + { IPv4(209,136,250,0),24 }, + { IPv4(209,136,251,0),24 }, + { IPv4(209,136,252,0),24 }, + { IPv4(209,137,128,0),19 }, + { IPv4(209,137,136,0),21 }, + { IPv4(209,137,144,0),21 }, + { IPv4(209,137,152,0),21 }, + { IPv4(209,137,160,0),20 }, + { IPv4(209,137,192,0),19 }, + { IPv4(209,138,65,0),24 }, + { IPv4(209,139,0,0),17 }, + { IPv4(209,139,128,0),18 }, + { IPv4(209,140,168,0),21 }, + { IPv4(209,140,192,0),19 }, + { IPv4(209,141,4,0),24 }, + { IPv4(209,141,26,0),24 }, + { IPv4(209,141,28,0),24 }, + { IPv4(209,141,66,0),24 }, + { IPv4(209,141,67,0),24 }, + { IPv4(209,141,72,0),21 }, + { IPv4(209,141,104,0),24 }, + { IPv4(209,141,112,0),21 }, + { IPv4(209,141,123,0),24 }, + { IPv4(209,141,180,0),22 }, + { IPv4(209,141,184,0),24 }, + { IPv4(209,141,228,0),23 }, + { IPv4(209,141,241,0),24 }, + { IPv4(209,143,0,0),18 }, + { IPv4(209,144,20,0),23 }, + { IPv4(209,144,52,0),24 }, + { IPv4(209,144,54,0),24 }, + { IPv4(209,144,55,0),24 }, + { IPv4(209,144,136,0),23 }, + { IPv4(209,144,210,0),24 }, + { IPv4(209,144,211,0),24 }, + { IPv4(209,144,219,0),24 }, + { IPv4(209,146,63,0),24 }, + { IPv4(209,146,128,0),20 }, + { IPv4(209,146,128,0),17 }, + { IPv4(209,146,128,0),18 }, + { IPv4(209,146,144,0),21 }, + { IPv4(209,146,147,0),24 }, + { IPv4(209,146,152,0),22 }, + { IPv4(209,146,155,0),24 }, + { IPv4(209,146,156,0),24 }, + { IPv4(209,146,160,0),20 }, + { IPv4(209,146,164,0),24 }, + { IPv4(209,146,171,0),24 }, + { IPv4(209,146,172,0),24 }, + { IPv4(209,146,173,0),24 }, + { IPv4(209,146,176,0),22 }, + { IPv4(209,146,178,0),23 }, + { IPv4(209,146,182,0),24 }, + { IPv4(209,146,184,0),21 }, + { IPv4(209,146,188,0),24 }, + { IPv4(209,146,192,0),19 }, + { IPv4(209,146,203,0),24 }, + { IPv4(209,146,224,0),20 }, + { IPv4(209,146,230,0),24 }, + { IPv4(209,146,231,0),24 }, + { IPv4(209,146,240,0),21 }, + { IPv4(209,146,244,0),24 }, + { IPv4(209,146,248,0),24 }, + { IPv4(209,146,249,0),24 }, + { IPv4(209,146,250,0),24 }, + { IPv4(209,146,251,0),24 }, + { IPv4(209,146,252,0),24 }, + { IPv4(209,146,253,0),24 }, + { IPv4(209,147,0,0),18 }, + { IPv4(209,147,64,0),19 }, + { IPv4(209,147,128,0),18 }, + { IPv4(209,149,164,0),23 }, + { IPv4(209,150,32,0),19 }, + { IPv4(209,150,88,0),22 }, + { IPv4(209,150,160,0),19 }, + { IPv4(209,151,0,0),19 }, + { IPv4(209,151,32,0),19 }, + { IPv4(209,151,128,0),20 }, + { IPv4(209,151,192,0),22 }, + { IPv4(209,151,196,0),22 }, + { IPv4(209,151,200,0),22 }, + { IPv4(209,151,204,0),22 }, + { IPv4(209,151,208,0),22 }, + { IPv4(209,151,212,0),22 }, + { IPv4(209,151,224,0),19 }, + { IPv4(209,152,64,0),18 }, + { IPv4(209,152,192,0),19 }, + { IPv4(209,153,192,0),18 }, + { IPv4(209,153,205,0),24 }, + { IPv4(209,154,100,0),24 }, + { IPv4(209,155,0,0),16 }, + { IPv4(209,155,25,0),24 }, + { IPv4(209,155,26,0),23 }, + { IPv4(209,155,28,0),22 }, + { IPv4(209,155,42,0),24 }, + { IPv4(209,155,43,0),24 }, + { IPv4(209,155,59,0),24 }, + { IPv4(209,155,75,0),24 }, + { IPv4(209,155,76,0),24 }, + { IPv4(209,155,88,0),24 }, + { IPv4(209,155,110,0),24 }, + { IPv4(209,155,118,0),24 }, + { IPv4(209,155,124,0),24 }, + { IPv4(209,155,125,0),24 }, + { IPv4(209,155,144,0),24 }, + { IPv4(209,155,145,0),24 }, + { IPv4(209,155,146,0),24 }, + { IPv4(209,155,147,0),24 }, + { IPv4(209,155,156,0),24 }, + { IPv4(209,155,162,0),23 }, + { IPv4(209,155,168,0),22 }, + { IPv4(209,155,185,0),24 }, + { IPv4(209,155,192,0),23 }, + { IPv4(209,155,198,0),24 }, + { IPv4(209,155,199,0),24 }, + { IPv4(209,155,204,0),22 }, + { IPv4(209,155,224,0),21 }, + { IPv4(209,155,238,0),24 }, + { IPv4(209,157,0,0),16 }, + { IPv4(209,160,82,0),24 }, + { IPv4(209,161,0,0),18 }, + { IPv4(209,161,32,0),19 }, + { IPv4(209,161,64,0),19 }, + { IPv4(209,161,96,0),20 }, + { IPv4(209,162,64,0),18 }, + { IPv4(209,162,128,0),19 }, + { IPv4(209,162,202,0),24 }, + { IPv4(209,163,0,0),18 }, + { IPv4(209,163,232,0),21 }, + { IPv4(209,163,244,0),22 }, + { IPv4(209,163,248,0),22 }, + { IPv4(209,164,0,0),18 }, + { IPv4(209,164,128,0),18 }, + { IPv4(209,165,192,0),19 }, + { IPv4(209,165,224,0),22 }, + { IPv4(209,165,230,0),23 }, + { IPv4(209,165,236,0),24 }, + { IPv4(209,166,128,0),18 }, + { IPv4(209,168,0,0),17 }, + { IPv4(209,168,7,0),24 }, + { IPv4(209,168,8,0),24 }, + { IPv4(209,168,63,0),24 }, + { IPv4(209,170,0,0),18 }, + { IPv4(209,170,192,0),19 }, + { IPv4(209,170,224,0),19 }, + { IPv4(209,172,64,0),18 }, + { IPv4(209,172,224,0),19 }, + { IPv4(209,173,0,0),19 }, + { IPv4(209,173,32,0),24 }, + { IPv4(209,173,57,0),24 }, + { IPv4(209,173,58,0),24 }, + { IPv4(209,173,64,0),20 }, + { IPv4(209,173,128,0),19 }, + { IPv4(209,173,160,0),19 }, + { IPv4(209,174,0,0),16 }, + { IPv4(209,175,0,0),16 }, + { IPv4(209,175,208,0),21 }, + { IPv4(209,176,0,0),22 }, + { IPv4(209,176,16,0),22 }, + { IPv4(209,176,198,0),24 }, + { IPv4(209,176,248,0),22 }, + { IPv4(209,177,0,0),18 }, + { IPv4(209,177,6,0),23 }, + { IPv4(209,177,22,0),23 }, + { IPv4(209,177,29,0),24 }, + { IPv4(209,177,41,0),24 }, + { IPv4(209,177,42,0),24 }, + { IPv4(209,177,43,0),24 }, + { IPv4(209,177,44,0),24 }, + { IPv4(209,177,58,0),24 }, + { IPv4(209,177,64,0),19 }, + { IPv4(209,177,94,0),24 }, + { IPv4(209,177,192,0),24 }, + { IPv4(209,177,192,0),18 }, + { IPv4(209,177,192,0),19 }, + { IPv4(209,177,193,0),24 }, + { IPv4(209,177,194,0),23 }, + { IPv4(209,177,196,0),24 }, + { IPv4(209,177,197,0),24 }, + { IPv4(209,177,198,0),24 }, + { IPv4(209,177,199,0),24 }, + { IPv4(209,177,200,0),24 }, + { IPv4(209,177,201,0),24 }, + { IPv4(209,177,202,0),24 }, + { IPv4(209,177,203,0),24 }, + { IPv4(209,177,204,0),24 }, + { IPv4(209,177,205,0),24 }, + { IPv4(209,177,206,0),24 }, + { IPv4(209,177,207,0),24 }, + { IPv4(209,177,208,0),24 }, + { IPv4(209,177,209,0),24 }, + { IPv4(209,177,210,0),24 }, + { IPv4(209,177,212,0),24 }, + { IPv4(209,177,213,0),24 }, + { IPv4(209,177,214,0),24 }, + { IPv4(209,177,216,0),22 }, + { IPv4(209,177,220,0),23 }, + { IPv4(209,177,222,0),24 }, + { IPv4(209,177,223,0),24 }, + { IPv4(209,178,0,0),17 }, + { IPv4(209,178,128,0),18 }, + { IPv4(209,178,213,0),24 }, + { IPv4(209,179,0,0),16 }, + { IPv4(209,180,0,0),15 }, + { IPv4(209,180,28,0),24 }, + { IPv4(209,180,220,0),22 }, + { IPv4(209,182,192,0),21 }, + { IPv4(209,182,200,0),21 }, + { IPv4(209,182,208,0),21 }, + { IPv4(209,182,248,0),21 }, + { IPv4(209,183,0,0),19 }, + { IPv4(209,183,48,0),21 }, + { IPv4(209,183,192,0),18 }, + { IPv4(209,185,128,0),24 }, + { IPv4(209,185,129,0),24 }, + { IPv4(209,185,130,0),23 }, + { IPv4(209,185,149,0),24 }, + { IPv4(209,185,240,0),22 }, + { IPv4(209,186,0,0),15 }, + { IPv4(209,186,0,0),24 }, + { IPv4(209,186,12,0),24 }, + { IPv4(209,186,13,0),24 }, + { IPv4(209,186,14,0),24 }, + { IPv4(209,186,15,0),24 }, + { IPv4(209,186,19,0),24 }, + { IPv4(209,186,58,0),23 }, + { IPv4(209,186,64,0),24 }, + { IPv4(209,186,80,0),22 }, + { IPv4(209,186,84,0),23 }, + { IPv4(209,186,103,0),24 }, + { IPv4(209,186,118,0),24 }, + { IPv4(209,186,132,0),23 }, + { IPv4(209,186,142,0),24 }, + { IPv4(209,186,148,0),24 }, + { IPv4(209,186,149,0),24 }, + { IPv4(209,186,150,0),24 }, + { IPv4(209,186,151,0),24 }, + { IPv4(209,186,186,0),24 }, + { IPv4(209,186,187,0),24 }, + { IPv4(209,186,188,0),24 }, + { IPv4(209,186,189,0),24 }, + { IPv4(209,186,190,0),23 }, + { IPv4(209,186,197,0),24 }, + { IPv4(209,186,240,0),21 }, + { IPv4(209,186,248,0),22 }, + { IPv4(209,187,22,0),24 }, + { IPv4(209,187,49,0),24 }, + { IPv4(209,187,50,0),23 }, + { IPv4(209,187,76,0),23 }, + { IPv4(209,187,78,0),23 }, + { IPv4(209,187,89,0),24 }, + { IPv4(209,187,90,0),24 }, + { IPv4(209,187,112,0),21 }, + { IPv4(209,187,137,0),24 }, + { IPv4(209,187,140,0),24 }, + { IPv4(209,187,141,0),24 }, + { IPv4(209,187,142,0),24 }, + { IPv4(209,187,143,0),24 }, + { IPv4(209,187,160,0),24 }, + { IPv4(209,187,161,0),24 }, + { IPv4(209,187,162,0),24 }, + { IPv4(209,187,163,0),24 }, + { IPv4(209,187,164,0),24 }, + { IPv4(209,187,165,0),24 }, + { IPv4(209,187,166,0),24 }, + { IPv4(209,187,167,0),24 }, + { IPv4(209,187,168,0),22 }, + { IPv4(209,187,176,0),20 }, + { IPv4(209,187,200,0),22 }, + { IPv4(209,187,207,0),24 }, + { IPv4(209,187,208,0),21 }, + { IPv4(209,187,216,0),24 }, + { IPv4(209,187,217,0),24 }, + { IPv4(209,187,218,0),24 }, + { IPv4(209,187,219,0),24 }, + { IPv4(209,187,220,0),24 }, + { IPv4(209,187,221,0),24 }, + { IPv4(209,187,224,0),19 }, + { IPv4(209,189,0,0),17 }, + { IPv4(209,189,128,0),18 }, + { IPv4(209,190,128,0),19 }, + { IPv4(209,190,160,0),19 }, + { IPv4(209,190,192,0),18 }, + { IPv4(209,191,78,0),24 }, + { IPv4(209,191,84,0),24 }, + { IPv4(209,191,103,0),24 }, + { IPv4(209,191,116,0),24 }, + { IPv4(209,191,119,0),24 }, + { IPv4(209,191,128,0),19 }, + { IPv4(209,191,133,0),24 }, + { IPv4(209,191,136,0),24 }, + { IPv4(209,191,138,0),24 }, + { IPv4(209,191,139,0),24 }, + { IPv4(209,191,142,0),24 }, + { IPv4(209,191,150,0),23 }, + { IPv4(209,191,152,0),24 }, + { IPv4(209,191,153,0),24 }, + { IPv4(209,191,155,0),24 }, + { IPv4(209,191,160,0),19 }, + { IPv4(209,191,164,0),23 }, + { IPv4(209,191,167,0),24 }, + { IPv4(209,191,168,0),21 }, + { IPv4(209,191,168,0),24 }, + { IPv4(209,191,172,0),24 }, + { IPv4(209,191,173,0),24 }, + { IPv4(209,191,176,0),22 }, + { IPv4(209,191,180,0),24 }, + { IPv4(209,191,182,0),23 }, + { IPv4(209,191,192,0),19 }, + { IPv4(209,192,20,0),22 }, + { IPv4(209,192,48,0),22 }, + { IPv4(209,192,210,0),24 }, + { IPv4(209,193,0,0),18 }, + { IPv4(209,193,64,0),19 }, + { IPv4(209,193,95,0),24 }, + { IPv4(209,193,96,0),20 }, + { IPv4(209,193,128,0),17 }, + { IPv4(209,194,0,0),16 }, + { IPv4(209,194,53,0),24 }, + { IPv4(209,194,68,0),24 }, + { IPv4(209,194,69,0),24 }, + { IPv4(209,194,70,0),24 }, + { IPv4(209,194,71,0),24 }, + { IPv4(209,194,164,0),24 }, + { IPv4(209,194,165,0),24 }, + { IPv4(209,194,166,0),24 }, + { IPv4(209,194,167,0),24 }, + { IPv4(209,194,173,0),24 }, + { IPv4(209,194,192,0),24 }, + { IPv4(209,194,193,0),24 }, + { IPv4(209,194,194,0),24 }, + { IPv4(209,194,194,0),29 }, + { IPv4(209,194,195,0),24 }, + { IPv4(209,194,196,0),24 }, + { IPv4(209,194,197,0),24 }, + { IPv4(209,194,203,0),24 }, + { IPv4(209,194,212,0),22 }, + { IPv4(209,194,216,0),24 }, + { IPv4(209,195,0,0),19 }, + { IPv4(209,195,32,0),19 }, + { IPv4(209,195,64,0),18 }, + { IPv4(209,195,192,0),19 }, + { IPv4(209,195,192,0),18 }, + { IPv4(209,195,224,0),19 }, + { IPv4(209,196,192,0),24 }, + { IPv4(209,196,192,0),19 }, + { IPv4(209,197,0,0),19 }, + { IPv4(209,197,64,0),18 }, + { IPv4(209,198,64,0),18 }, + { IPv4(209,198,192,0),19 }, + { IPv4(209,198,197,0),24 }, + { IPv4(209,198,198,0),24 }, + { IPv4(209,198,199,0),24 }, + { IPv4(209,198,202,0),23 }, + { IPv4(209,198,206,0),24 }, + { IPv4(209,198,209,0),24 }, + { IPv4(209,198,224,0),20 }, + { IPv4(209,198,225,0),24 }, + { IPv4(209,198,232,0),24 }, + { IPv4(209,198,233,0),24 }, + { IPv4(209,198,235,0),24 }, + { IPv4(209,198,237,0),24 }, + { IPv4(209,198,238,0),24 }, + { IPv4(209,198,239,0),24 }, + { IPv4(209,198,240,0),21 }, + { IPv4(209,198,240,0),23 }, + { IPv4(209,198,243,0),24 }, + { IPv4(209,198,245,0),24 }, + { IPv4(209,198,247,0),24 }, + { IPv4(209,198,248,0),21 }, + { IPv4(209,198,250,0),23 }, + { IPv4(209,198,253,0),24 }, + { IPv4(209,203,0,0),18 }, + { IPv4(209,203,80,0),21 }, + { IPv4(209,203,86,0),24 }, + { IPv4(209,203,88,0),21 }, + { IPv4(209,203,92,0),23 }, + { IPv4(209,203,192,0),19 }, + { IPv4(209,203,214,0),24 }, + { IPv4(209,205,71,0),24 }, + { IPv4(209,205,81,0),24 }, + { IPv4(209,205,82,0),24 }, + { IPv4(209,206,168,0),24 }, + { IPv4(209,206,172,0),24 }, + { IPv4(209,206,240,0),22 }, + { IPv4(209,207,128,0),17 }, + { IPv4(209,208,128,0),17 }, + { IPv4(209,208,207,0),24 }, + { IPv4(209,208,210,0),24 }, + { IPv4(209,208,228,0),24 }, + { IPv4(209,208,249,0),24 }, + { IPv4(209,208,250,0),24 }, + { IPv4(209,209,64,0),19 }, + { IPv4(209,209,224,0),19 }, + { IPv4(209,209,248,0),23 }, + { IPv4(209,209,250,0),23 }, + { IPv4(209,210,120,0),21 }, + { IPv4(209,210,228,0),22 }, + { IPv4(209,210,251,0),24 }, + { IPv4(209,211,30,0),23 }, + { IPv4(209,211,107,0),24 }, + { IPv4(209,211,110,0),24 }, + { IPv4(209,211,129,0),24 }, + { IPv4(209,211,143,0),24 }, + { IPv4(209,211,168,0),23 }, + { IPv4(209,211,177,0),24 }, + { IPv4(209,211,188,0),24 }, + { IPv4(209,211,199,0),24 }, + { IPv4(209,211,200,0),24 }, + { IPv4(209,211,201,0),24 }, + { IPv4(209,211,202,0),24 }, + { IPv4(209,211,203,0),24 }, + { IPv4(209,211,204,0),24 }, + { IPv4(209,212,96,0),19 }, + { IPv4(209,212,127,0),24 }, + { IPv4(209,213,32,0),19 }, + { IPv4(209,213,33,0),24 }, + { IPv4(209,213,34,0),24 }, + { IPv4(209,213,43,0),24 }, + { IPv4(209,213,45,0),24 }, + { IPv4(209,213,47,0),24 }, + { IPv4(209,213,51,0),24 }, + { IPv4(209,213,64,0),19 }, + { IPv4(209,213,94,0),23 }, + { IPv4(209,213,96,0),19 }, + { IPv4(209,213,194,0),24 }, + { IPv4(209,213,195,0),24 }, + { IPv4(209,213,198,0),24 }, + { IPv4(209,216,0,0),18 }, + { IPv4(209,216,96,0),19 }, + { IPv4(209,216,192,0),18 }, + { IPv4(209,217,32,0),20 }, + { IPv4(209,217,48,0),21 }, + { IPv4(209,217,64,0),18 }, + { IPv4(209,217,128,0),18 }, + { IPv4(209,217,192,0),19 }, + { IPv4(209,218,0,0),15 }, + { IPv4(209,218,32,0),23 }, + { IPv4(209,218,54,0),24 }, + { IPv4(209,218,64,0),22 }, + { IPv4(209,218,90,0),24 }, + { IPv4(209,218,160,0),22 }, + { IPv4(209,218,201,0),24 }, + { IPv4(209,218,206,0),24 }, + { IPv4(209,219,69,0),24 }, + { IPv4(209,219,188,0),22 }, + { IPv4(209,219,210,0),24 }, + { IPv4(209,219,240,0),24 }, + { IPv4(209,219,241,0),24 }, + { IPv4(209,219,242,0),24 }, + { IPv4(209,219,243,0),24 }, + { IPv4(209,220,0,0),16 }, + { IPv4(209,220,18,0),23 }, + { IPv4(209,220,96,0),24 }, + { IPv4(209,220,118,0),24 }, + { IPv4(209,220,178,0),24 }, + { IPv4(209,220,182,0),23 }, + { IPv4(209,221,136,0),22 }, + { IPv4(209,221,140,0),24 }, + { IPv4(209,221,166,0),23 }, + { IPv4(209,221,192,0),19 }, + { IPv4(209,221,224,0),24 }, + { IPv4(209,221,225,0),24 }, + { IPv4(209,221,226,0),24 }, + { IPv4(209,222,32,0),20 }, + { IPv4(209,222,64,0),18 }, + { IPv4(209,223,100,0),24 }, + { IPv4(209,223,131,0),24 }, + { IPv4(209,223,152,0),23 }, + { IPv4(209,223,183,0),24 }, + { IPv4(209,223,189,0),24 }, + { IPv4(209,223,200,0),21 }, + { IPv4(209,224,160,0),24 }, + { IPv4(209,224,161,0),24 }, + { IPv4(209,224,162,0),24 }, + { IPv4(209,224,163,0),24 }, + { IPv4(209,224,164,0),24 }, + { IPv4(209,224,165,0),24 }, + { IPv4(209,224,166,0),24 }, + { IPv4(209,224,167,0),24 }, + { IPv4(209,224,204,0),22 }, + { IPv4(209,224,208,0),21 }, + { IPv4(209,224,223,0),24 }, + { IPv4(209,225,36,0),22 }, + { IPv4(209,225,49,0),24 }, + { IPv4(209,225,128,0),18 }, + { IPv4(209,227,0,0),17 }, + { IPv4(209,227,18,0),24 }, + { IPv4(209,227,36,0),24 }, + { IPv4(209,227,62,0),24 }, + { IPv4(209,227,128,0),18 }, + { IPv4(209,227,128,0),19 }, + { IPv4(209,227,130,0),23 }, + { IPv4(209,227,132,0),22 }, + { IPv4(209,227,136,0),23 }, + { IPv4(209,227,138,0),23 }, + { IPv4(209,227,140,0),23 }, + { IPv4(209,227,144,0),22 }, + { IPv4(209,227,148,0),23 }, + { IPv4(209,227,152,0),23 }, + { IPv4(209,227,154,0),23 }, + { IPv4(209,227,160,0),19 }, + { IPv4(209,227,166,0),23 }, + { IPv4(209,227,188,0),22 }, + { IPv4(209,227,188,0),23 }, + { IPv4(209,227,190,0),23 }, + { IPv4(209,227,192,0),18 }, + { IPv4(209,228,22,0),24 }, + { IPv4(209,228,176,0),20 }, + { IPv4(209,229,80,0),20 }, + { IPv4(209,232,144,0),20 }, + { IPv4(209,233,156,0),22 }, + { IPv4(209,234,0,0),18 }, + { IPv4(209,234,64,0),19 }, + { IPv4(209,234,88,0),24 }, + { IPv4(209,234,89,0),24 }, + { IPv4(209,234,96,0),20 }, + { IPv4(209,234,128,0),21 }, + { IPv4(209,234,128,0),24 }, + { IPv4(209,234,130,0),23 }, + { IPv4(209,234,132,0),23 }, + { IPv4(209,234,134,0),24 }, + { IPv4(209,234,136,0),21 }, + { IPv4(209,234,139,0),24 }, + { IPv4(209,234,144,0),21 }, + { IPv4(209,234,147,0),24 }, + { IPv4(209,234,150,0),24 }, + { IPv4(209,234,155,0),24 }, + { IPv4(209,234,168,0),22 }, + { IPv4(209,234,176,0),22 }, + { IPv4(209,234,176,0),21 }, + { IPv4(209,234,184,0),21 }, + { IPv4(209,234,186,0),24 }, + { IPv4(209,234,192,0),21 }, + { IPv4(209,234,194,0),24 }, + { IPv4(209,234,196,0),24 }, + { IPv4(209,234,199,0),24 }, + { IPv4(209,234,216,0),24 }, + { IPv4(209,234,217,0),24 }, + { IPv4(209,234,218,0),23 }, + { IPv4(209,234,218,0),24 }, + { IPv4(209,234,219,0),24 }, + { IPv4(209,234,220,0),22 }, + { IPv4(209,236,0,0),18 }, + { IPv4(209,236,64,0),19 }, + { IPv4(209,236,96,0),19 }, + { IPv4(209,236,170,0),24 }, + { IPv4(209,236,171,0),24 }, + { IPv4(209,236,194,0),24 }, + { IPv4(209,237,0,0),18 }, + { IPv4(209,237,12,0),24 }, + { IPv4(209,237,13,0),24 }, + { IPv4(209,237,14,0),24 }, + { IPv4(209,237,15,0),24 }, + { IPv4(209,237,56,0),23 }, + { IPv4(209,237,58,0),24 }, + { IPv4(209,237,59,0),24 }, + { IPv4(209,237,103,0),24 }, + { IPv4(209,237,104,0),24 }, + { IPv4(209,237,105,0),24 }, + { IPv4(209,237,106,0),24 }, + { IPv4(209,237,107,0),24 }, + { IPv4(209,238,0,0),16 }, + { IPv4(209,239,64,0),19 }, + { IPv4(209,239,80,0),24 }, + { IPv4(209,239,128,0),19 }, + { IPv4(209,240,32,0),19 }, + { IPv4(209,240,96,0),19 }, + { IPv4(209,240,128,0),19 }, + { IPv4(209,240,192,0),19 }, + { IPv4(209,240,198,0),24 }, + { IPv4(209,240,199,0),24 }, + { IPv4(209,240,224,0),19 }, + { IPv4(209,241,0,0),24 }, + { IPv4(209,241,158,0),24 }, + { IPv4(209,241,222,0),23 }, + { IPv4(209,241,234,0),24 }, + { IPv4(209,241,235,0),24 }, + { IPv4(209,241,243,0),24 }, + { IPv4(209,241,244,0),24 }, + { IPv4(209,241,245,0),24 }, + { IPv4(209,242,128,0),19 }, + { IPv4(209,242,160,0),20 }, + { IPv4(209,243,32,0),20 }, + { IPv4(209,243,92,0),24 }, + { IPv4(209,243,94,0),24 }, + { IPv4(209,243,101,0),24 }, + { IPv4(209,243,102,0),23 }, + { IPv4(209,243,107,0),24 }, + { IPv4(209,243,109,0),24 }, + { IPv4(209,243,110,0),24 }, + { IPv4(209,244,203,0),24 }, + { IPv4(209,244,216,0),23 }, + { IPv4(209,245,21,0),24 }, + { IPv4(209,245,89,0),24 }, + { IPv4(209,246,37,0),24 }, + { IPv4(209,246,151,0),24 }, + { IPv4(209,247,96,0),23 }, + { IPv4(209,248,64,0),18 }, + { IPv4(209,249,0,0),16 }, + { IPv4(209,249,2,0),24 }, + { IPv4(209,249,51,0),24 }, + { IPv4(209,249,70,0),24 }, + { IPv4(209,249,76,0),23 }, + { IPv4(209,249,92,0),22 }, + { IPv4(209,249,113,0),24 }, + { IPv4(209,249,114,0),24 }, + { IPv4(209,249,173,0),24 }, + { IPv4(209,249,174,0),24 }, + { IPv4(209,249,239,0),24 }, + { IPv4(209,249,240,0),24 }, + { IPv4(209,249,246,0),23 }, + { IPv4(209,249,250,0),24 }, + { IPv4(209,250,160,0),19 }, + { IPv4(209,251,32,0),21 }, + { IPv4(209,251,40,0),21 }, + { IPv4(209,251,192,0),19 }, + { IPv4(210,4,24,0),24 }, + { IPv4(210,4,25,0),24 }, + { IPv4(210,4,26,0),24 }, + { IPv4(210,4,27,0),24 }, + { IPv4(210,4,56,0),24 }, + { IPv4(210,4,128,0),20 }, + { IPv4(210,4,144,0),20 }, + { IPv4(210,7,96,0),19 }, + { IPv4(210,7,104,0),22 }, + { IPv4(210,7,112,0),22 }, + { IPv4(210,7,128,0),19 }, + { IPv4(210,7,160,0),19 }, + { IPv4(210,7,191,0),24 }, + { IPv4(210,7,192,0),19 }, + { IPv4(210,7,199,0),24 }, + { IPv4(210,7,221,0),24 }, + { IPv4(210,7,222,0),24 }, + { IPv4(210,7,224,0),19 }, + { IPv4(210,8,0,0),14 }, + { IPv4(210,8,4,0),23 }, + { IPv4(210,8,30,0),23 }, + { IPv4(210,9,16,0),20 }, + { IPv4(210,9,44,0),22 }, + { IPv4(210,10,124,0),24 }, + { IPv4(210,10,124,0),22 }, + { IPv4(210,10,125,0),24 }, + { IPv4(210,10,126,0),24 }, + { IPv4(210,10,127,0),24 }, + { IPv4(210,12,32,0),19 }, + { IPv4(210,15,80,0),20 }, + { IPv4(210,15,88,0),21 }, + { IPv4(210,16,0,0),20 }, + { IPv4(210,16,0,0),17 }, + { IPv4(210,16,16,0),20 }, + { IPv4(210,16,32,0),20 }, + { IPv4(210,16,48,0),20 }, + { IPv4(210,16,64,0),22 }, + { IPv4(210,16,68,0),22 }, + { IPv4(210,16,72,0),22 }, + { IPv4(210,16,80,0),22 }, + { IPv4(210,16,84,0),22 }, + { IPv4(210,16,100,0),24 }, + { IPv4(210,16,101,0),24 }, + { IPv4(210,16,102,0),24 }, + { IPv4(210,16,103,0),24 }, + { IPv4(210,16,104,0),24 }, + { IPv4(210,16,127,0),24 }, + { IPv4(210,17,0,0),17 }, + { IPv4(210,18,0,0),17 }, + { IPv4(210,18,64,0),20 }, + { IPv4(210,18,64,0),21 }, + { IPv4(210,18,76,0),22 }, + { IPv4(210,18,80,0),20 }, + { IPv4(210,18,88,0),21 }, + { IPv4(210,18,92,0),22 }, + { IPv4(210,18,96,0),20 }, + { IPv4(210,18,96,0),21 }, + { IPv4(210,18,104,0),23 }, + { IPv4(210,19,0,0),18 }, + { IPv4(210,19,0,0),19 }, + { IPv4(210,19,16,0),20 }, + { IPv4(210,19,32,0),19 }, + { IPv4(210,19,48,0),20 }, + { IPv4(210,19,64,0),19 }, + { IPv4(210,19,64,0),18 }, + { IPv4(210,19,96,0),19 }, + { IPv4(210,21,0,0),16 }, + { IPv4(210,22,0,0),16 }, + { IPv4(210,23,96,0),19 }, + { IPv4(210,23,112,0),20 }, + { IPv4(210,23,115,0),24 }, + { IPv4(210,23,128,0),19 }, + { IPv4(210,23,133,0),24 }, + { IPv4(210,23,142,0),23 }, + { IPv4(210,23,144,0),22 }, + { IPv4(210,23,154,0),23 }, + { IPv4(210,23,156,0),23 }, + { IPv4(210,23,192,0),19 }, + { IPv4(210,23,208,0),20 }, + { IPv4(210,23,208,0),22 }, + { IPv4(210,23,235,0),24 }, + { IPv4(210,23,239,0),24 }, + { IPv4(210,23,240,0),20 }, + { IPv4(210,23,253,0),24 }, + { IPv4(210,23,254,0),23 }, + { IPv4(210,24,64,0),18 }, + { IPv4(210,24,208,0),20 }, + { IPv4(210,24,224,0),22 }, + { IPv4(210,25,0,0),17 }, + { IPv4(210,50,30,0),24 }, + { IPv4(210,50,48,0),23 }, + { IPv4(210,50,51,0),24 }, + { IPv4(210,50,52,0),22 }, + { IPv4(210,50,56,0),21 }, + { IPv4(210,50,64,0),20 }, + { IPv4(210,50,104,0),21 }, + { IPv4(210,50,124,0),22 }, + { IPv4(210,51,0,0),16 }, + { IPv4(210,52,0,0),16 }, + { IPv4(210,53,0,0),16 }, + { IPv4(210,54,128,0),17 }, + { IPv4(210,54,211,0),24 }, + { IPv4(210,55,0,0),17 }, + { IPv4(210,55,5,0),24 }, + { IPv4(210,55,111,0),24 }, + { IPv4(210,55,128,0),17 }, + { IPv4(210,55,155,0),24 }, + { IPv4(210,55,157,0),24 }, + { IPv4(210,55,202,0),24 }, + { IPv4(210,55,254,0),24 }, + { IPv4(210,56,0,0),19 }, + { IPv4(210,56,2,0),24 }, + { IPv4(210,56,6,0),24 }, + { IPv4(210,56,7,0),24 }, + { IPv4(210,56,9,0),24 }, + { IPv4(210,56,10,0),24 }, + { IPv4(210,56,15,0),24 }, + { IPv4(210,56,16,0),24 }, + { IPv4(210,56,17,0),24 }, + { IPv4(210,56,18,0),24 }, + { IPv4(210,56,19,0),24 }, + { IPv4(210,56,20,0),24 }, + { IPv4(210,56,21,0),24 }, + { IPv4(210,56,22,0),24 }, + { IPv4(210,56,23,0),24 }, + { IPv4(210,58,0,0),16 }, + { IPv4(210,58,0,0),18 }, + { IPv4(210,58,64,0),18 }, + { IPv4(210,58,128,0),18 }, + { IPv4(210,58,192,0),18 }, + { IPv4(210,59,0,0),17 }, + { IPv4(210,60,0,0),16 }, + { IPv4(210,60,225,0),24 }, + { IPv4(210,62,64,0),19 }, + { IPv4(210,62,128,0),19 }, + { IPv4(210,62,160,0),20 }, + { IPv4(210,62,224,0),20 }, + { IPv4(210,62,240,0),21 }, + { IPv4(210,63,64,0),18 }, + { IPv4(210,64,0,0),24 }, + { IPv4(210,64,0,0),16 }, + { IPv4(210,64,0,0),18 }, + { IPv4(210,64,192,0),18 }, + { IPv4(210,66,0,0),16 }, + { IPv4(210,66,64,0),18 }, + { IPv4(210,66,128,0),18 }, + { IPv4(210,67,64,0),19 }, + { IPv4(210,67,248,0),21 }, + { IPv4(210,68,0,0),16 }, + { IPv4(210,68,0,0),24 }, + { IPv4(210,69,0,0),16 }, + { IPv4(210,70,0,0),16 }, + { IPv4(210,71,0,0),17 }, + { IPv4(210,72,0,0),19 }, + { IPv4(210,72,32,0),19 }, + { IPv4(210,72,64,0),18 }, + { IPv4(210,72,128,0),19 }, + { IPv4(210,72,160,0),19 }, + { IPv4(210,72,192,0),19 }, + { IPv4(210,72,224,0),19 }, + { IPv4(210,73,0,0),18 }, + { IPv4(210,73,64,0),19 }, + { IPv4(210,73,96,0),19 }, + { IPv4(210,73,128,0),19 }, + { IPv4(210,73,160,0),19 }, + { IPv4(210,73,224,0),19 }, + { IPv4(210,74,32,0),19 }, + { IPv4(210,74,64,0),23 }, + { IPv4(210,74,160,0),19 }, + { IPv4(210,74,192,0),19 }, + { IPv4(210,74,224,0),19 }, + { IPv4(210,75,32,0),19 }, + { IPv4(210,75,96,0),19 }, + { IPv4(210,75,128,0),19 }, + { IPv4(210,75,192,0),19 }, + { IPv4(210,75,224,0),22 }, + { IPv4(210,75,240,0),20 }, + { IPv4(210,76,32,0),19 }, + { IPv4(210,76,96,0),19 }, + { IPv4(210,76,192,0),19 }, + { IPv4(210,77,0,0),19 }, + { IPv4(210,77,32,0),21 }, + { IPv4(210,77,40,0),21 }, + { IPv4(210,77,48,0),20 }, + { IPv4(210,77,128,0),19 }, + { IPv4(210,77,160,0),20 }, + { IPv4(210,77,192,0),19 }, + { IPv4(210,77,224,0),19 }, + { IPv4(210,77,224,0),20 }, + { IPv4(210,77,240,0),20 }, + { IPv4(210,78,2,0),24 }, + { IPv4(210,78,4,0),22 }, + { IPv4(210,78,8,0),21 }, + { IPv4(210,78,16,0),20 }, + { IPv4(210,78,128,0),19 }, + { IPv4(210,79,224,0),19 }, + { IPv4(210,80,129,0),24 }, + { IPv4(210,82,0,0),16 }, + { IPv4(210,83,0,0),16 }, + { IPv4(210,85,0,0),18 }, + { IPv4(210,85,0,0),16 }, + { IPv4(210,85,64,0),18 }, + { IPv4(210,85,128,0),18 }, + { IPv4(210,85,192,0),18 }, + { IPv4(210,88,0,0),17 }, + { IPv4(210,88,128,0),18 }, + { IPv4(210,88,192,0),19 }, + { IPv4(210,90,0,0),17 }, + { IPv4(210,90,0,0),24 }, + { IPv4(210,90,0,0),16 }, + { IPv4(210,90,21,0),24 }, + { IPv4(210,90,128,0),17 }, + { IPv4(210,91,0,0),16 }, + { IPv4(210,91,8,0),24 }, + { IPv4(210,92,0,0),18 }, + { IPv4(210,92,0,0),24 }, + { IPv4(210,92,1,0),24 }, + { IPv4(210,92,2,0),24 }, + { IPv4(210,92,3,0),24 }, + { IPv4(210,92,4,0),24 }, + { IPv4(210,92,5,0),24 }, + { IPv4(210,92,6,0),24 }, + { IPv4(210,92,7,0),24 }, + { IPv4(210,92,8,0),24 }, + { IPv4(210,92,9,0),24 }, + { IPv4(210,92,10,0),24 }, + { IPv4(210,92,12,0),24 }, + { IPv4(210,92,13,0),24 }, + { IPv4(210,92,14,0),24 }, + { IPv4(210,92,40,0),24 }, + { IPv4(210,92,64,0),18 }, + { IPv4(210,92,73,0),24 }, + { IPv4(210,92,91,0),24 }, + { IPv4(210,92,114,0),24 }, + { IPv4(210,92,127,0),24 }, + { IPv4(210,92,128,0),17 }, + { IPv4(210,93,0,0),17 }, + { IPv4(210,93,6,0),23 }, + { IPv4(210,93,8,0),21 }, + { IPv4(210,93,68,0),24 }, + { IPv4(210,93,69,0),24 }, + { IPv4(210,93,70,0),24 }, + { IPv4(210,93,83,0),24 }, + { IPv4(210,93,84,0),22 }, + { IPv4(210,93,84,0),24 }, + { IPv4(210,93,85,0),24 }, + { IPv4(210,93,86,0),24 }, + { IPv4(210,93,87,0),24 }, + { IPv4(210,93,112,0),20 }, + { IPv4(210,93,128,0),23 }, + { IPv4(210,93,130,0),24 }, + { IPv4(210,93,131,0),24 }, + { IPv4(210,93,132,0),22 }, + { IPv4(210,93,136,0),21 }, + { IPv4(210,93,144,0),20 }, + { IPv4(210,93,160,0),19 }, + { IPv4(210,94,0,0),19 }, + { IPv4(210,94,64,0),18 }, + { IPv4(210,94,128,0),19 }, + { IPv4(210,94,160,0),19 }, + { IPv4(210,94,224,0),19 }, + { IPv4(210,94,245,0),24 }, + { IPv4(210,94,246,0),24 }, + { IPv4(210,95,0,0),17 }, + { IPv4(210,95,0,0),16 }, + { IPv4(210,95,128,0),17 }, + { IPv4(210,95,192,0),24 }, + { IPv4(210,95,193,0),24 }, + { IPv4(210,95,194,0),24 }, + { IPv4(210,95,199,0),24 }, + { IPv4(210,96,0,0),17 }, + { IPv4(210,96,0,0),18 }, + { IPv4(210,96,64,0),18 }, + { IPv4(210,96,128,0),17 }, + { IPv4(210,96,132,0),24 }, + { IPv4(210,96,162,0),24 }, + { IPv4(210,96,163,0),24 }, + { IPv4(210,96,164,0),24 }, + { IPv4(210,96,165,0),24 }, + { IPv4(210,96,166,0),24 }, + { IPv4(210,96,214,0),24 }, + { IPv4(210,96,235,0),24 }, + { IPv4(210,97,0,0),17 }, + { IPv4(210,97,0,0),18 }, + { IPv4(210,97,64,0),18 }, + { IPv4(210,97,68,0),23 }, + { IPv4(210,97,128,0),19 }, + { IPv4(210,97,140,0),23 }, + { IPv4(210,97,142,0),24 }, + { IPv4(210,97,224,0),20 }, + { IPv4(210,97,240,0),20 }, + { IPv4(210,98,0,0),19 }, + { IPv4(210,98,16,0),21 }, + { IPv4(210,98,38,0),24 }, + { IPv4(210,98,39,0),24 }, + { IPv4(210,98,40,0),21 }, + { IPv4(210,98,40,0),22 }, + { IPv4(210,98,45,0),24 }, + { IPv4(210,98,48,0),20 }, + { IPv4(210,98,64,0),18 }, + { IPv4(210,98,128,0),18 }, + { IPv4(210,98,192,0),19 }, + { IPv4(210,98,224,0),19 }, + { IPv4(210,99,0,0),17 }, + { IPv4(210,99,64,0),18 }, + { IPv4(210,99,128,0),18 }, + { IPv4(210,99,128,0),17 }, + { IPv4(210,99,187,0),24 }, + { IPv4(210,99,192,0),18 }, + { IPv4(210,100,0,0),17 }, + { IPv4(210,100,0,0),18 }, + { IPv4(210,100,64,0),18 }, + { IPv4(210,100,128,0),17 }, + { IPv4(210,100,128,0),18 }, + { IPv4(210,100,192,0),18 }, + { IPv4(210,101,0,0),18 }, + { IPv4(210,101,0,0),19 }, + { IPv4(210,101,32,0),19 }, + { IPv4(210,101,64,0),18 }, + { IPv4(210,101,84,0),24 }, + { IPv4(210,101,85,0),24 }, + { IPv4(210,101,128,0),18 }, + { IPv4(210,101,192,0),19 }, + { IPv4(210,101,224,0),20 }, + { IPv4(210,101,240,0),21 }, + { IPv4(210,101,248,0),22 }, + { IPv4(210,101,252,0),23 }, + { IPv4(210,101,254,0),23 }, + { IPv4(210,102,32,0),19 }, + { IPv4(210,102,64,0),19 }, + { IPv4(210,102,96,0),19 }, + { IPv4(210,102,128,0),17 }, + { IPv4(210,102,136,0),22 }, + { IPv4(210,102,208,0),21 }, + { IPv4(210,102,216,0),22 }, + { IPv4(210,103,0,0),18 }, + { IPv4(210,103,0,0),17 }, + { IPv4(210,103,64,0),18 }, + { IPv4(210,103,73,0),24 }, + { IPv4(210,104,0,0),16 }, + { IPv4(210,104,0,0),17 }, + { IPv4(210,104,128,0),24 }, + { IPv4(210,104,128,0),17 }, + { IPv4(210,104,129,0),24 }, + { IPv4(210,104,132,0),22 }, + { IPv4(210,104,132,0),24 }, + { IPv4(210,104,133,0),24 }, + { IPv4(210,104,134,0),24 }, + { IPv4(210,104,135,0),24 }, + { IPv4(210,104,203,0),24 }, + { IPv4(210,105,0,0),16 }, + { IPv4(210,105,108,0),24 }, + { IPv4(210,106,0,0),18 }, + { IPv4(210,106,64,0),18 }, + { IPv4(210,106,76,0),22 }, + { IPv4(210,106,80,0),22 }, + { IPv4(210,106,87,0),24 }, + { IPv4(210,106,96,0),21 }, + { IPv4(210,106,104,0),22 }, + { IPv4(210,106,108,0),23 }, + { IPv4(210,106,128,0),18 }, + { IPv4(210,106,192,0),19 }, + { IPv4(210,106,224,0),19 }, + { IPv4(210,107,0,0),17 }, + { IPv4(210,107,66,0),24 }, + { IPv4(210,107,75,0),24 }, + { IPv4(210,107,128,0),18 }, + { IPv4(210,107,192,0),19 }, + { IPv4(210,107,192,0),20 }, + { IPv4(210,107,199,0),24 }, + { IPv4(210,107,201,0),24 }, + { IPv4(210,107,202,0),24 }, + { IPv4(210,107,208,0),24 }, + { IPv4(210,107,209,0),24 }, + { IPv4(210,107,210,0),24 }, + { IPv4(210,107,211,0),24 }, + { IPv4(210,107,212,0),22 }, + { IPv4(210,107,224,0),20 }, + { IPv4(210,107,240,0),22 }, + { IPv4(210,107,240,0),20 }, + { IPv4(210,107,244,0),23 }, + { IPv4(210,108,0,0),16 }, + { IPv4(210,108,26,0),24 }, + { IPv4(210,108,27,0),24 }, + { IPv4(210,108,80,0),22 }, + { IPv4(210,108,84,0),24 }, + { IPv4(210,108,137,0),24 }, + { IPv4(210,108,149,0),24 }, + { IPv4(210,108,230,0),24 }, + { IPv4(210,108,231,0),24 }, + { IPv4(210,109,128,0),22 }, + { IPv4(210,109,149,0),24 }, + { IPv4(210,110,0,0),17 }, + { IPv4(210,110,80,0),21 }, + { IPv4(210,110,88,0),22 }, + { IPv4(210,110,128,0),17 }, + { IPv4(210,110,128,0),18 }, + { IPv4(210,110,136,0),23 }, + { IPv4(210,110,138,0),24 }, + { IPv4(210,110,139,0),24 }, + { IPv4(210,110,140,0),24 }, + { IPv4(210,110,160,0),20 }, + { IPv4(210,110,176,0),22 }, + { IPv4(210,110,180,0),23 }, + { IPv4(210,110,182,0),23 }, + { IPv4(210,110,184,0),21 }, + { IPv4(210,110,192,0),19 }, + { IPv4(210,110,200,0),22 }, + { IPv4(210,110,240,0),22 }, + { IPv4(210,110,248,0),22 }, + { IPv4(210,110,253,0),24 }, + { IPv4(210,111,0,0),17 }, + { IPv4(210,111,0,0),18 }, + { IPv4(210,111,27,0),24 }, + { IPv4(210,111,28,0),24 }, + { IPv4(210,111,36,0),24 }, + { IPv4(210,111,37,0),24 }, + { IPv4(210,111,64,0),18 }, + { IPv4(210,111,192,0),19 }, + { IPv4(210,111,224,0),19 }, + { IPv4(210,112,0,0),17 }, + { IPv4(210,112,128,0),19 }, + { IPv4(210,112,177,0),24 }, + { IPv4(210,113,0,0),16 }, + { IPv4(210,113,104,0),24 }, + { IPv4(210,114,0,0),18 }, + { IPv4(210,114,80,0),20 }, + { IPv4(210,114,96,0),19 }, + { IPv4(210,114,106,0),24 }, + { IPv4(210,114,117,0),24 }, + { IPv4(210,114,128,0),17 }, + { IPv4(210,114,128,0),18 }, + { IPv4(210,114,192,0),18 }, + { IPv4(210,115,0,0),20 }, + { IPv4(210,115,16,0),20 }, + { IPv4(210,115,32,0),19 }, + { IPv4(210,115,128,0),19 }, + { IPv4(210,115,136,0),22 }, + { IPv4(210,115,140,0),22 }, + { IPv4(210,115,150,0),24 }, + { IPv4(210,115,151,0),24 }, + { IPv4(210,115,160,0),19 }, + { IPv4(210,115,192,0),19 }, + { IPv4(210,115,192,0),24 }, + { IPv4(210,115,193,0),24 }, + { IPv4(210,115,194,0),24 }, + { IPv4(210,115,222,0),24 }, + { IPv4(210,115,224,0),19 }, + { IPv4(210,116,128,0),17 }, + { IPv4(210,117,0,0),17 }, + { IPv4(210,117,128,0),18 }, + { IPv4(210,117,192,0),18 }, + { IPv4(210,118,128,0),17 }, + { IPv4(210,118,128,0),18 }, + { IPv4(210,118,175,0),24 }, + { IPv4(210,118,177,0),24 }, + { IPv4(210,118,192,0),18 }, + { IPv4(210,119,0,0),20 }, + { IPv4(210,119,0,0),18 }, + { IPv4(210,119,0,0),17 }, + { IPv4(210,119,16,0),22 }, + { IPv4(210,119,20,0),23 }, + { IPv4(210,119,22,0),24 }, + { IPv4(210,119,24,0),21 }, + { IPv4(210,119,32,0),22 }, + { IPv4(210,119,64,0),20 }, + { IPv4(210,119,76,0),22 }, + { IPv4(210,119,80,0),22 }, + { IPv4(210,119,102,0),23 }, + { IPv4(210,119,104,0),21 }, + { IPv4(210,119,112,0),24 }, + { IPv4(210,119,114,0),24 }, + { IPv4(210,119,116,0),24 }, + { IPv4(210,119,117,0),24 }, + { IPv4(210,119,118,0),24 }, + { IPv4(210,119,119,0),24 }, + { IPv4(210,119,120,0),24 }, + { IPv4(210,119,121,0),24 }, + { IPv4(210,119,122,0),24 }, + { IPv4(210,119,128,0),17 }, + { IPv4(210,119,188,0),22 }, + { IPv4(210,120,0,0),16 }, + { IPv4(210,120,14,0),24 }, + { IPv4(210,120,25,0),24 }, + { IPv4(210,120,73,0),24 }, + { IPv4(210,120,88,0),24 }, + { IPv4(210,120,89,0),24 }, + { IPv4(210,120,90,0),24 }, + { IPv4(210,121,0,0),17 }, + { IPv4(210,121,128,0),17 }, + { IPv4(210,121,183,0),24 }, + { IPv4(210,121,223,0),24 }, + { IPv4(210,122,16,0),20 }, + { IPv4(210,122,68,0),24 }, + { IPv4(210,122,94,0),24 }, + { IPv4(210,122,96,0),24 }, + { IPv4(210,122,97,0),24 }, + { IPv4(210,122,98,0),24 }, + { IPv4(210,122,99,0),24 }, + { IPv4(210,122,100,0),24 }, + { IPv4(210,122,101,0),24 }, + { IPv4(210,123,0,0),16 }, + { IPv4(210,123,14,0),24 }, + { IPv4(210,123,80,0),24 }, + { IPv4(210,123,108,0),24 }, + { IPv4(210,123,121,0),24 }, + { IPv4(210,124,0,0),16 }, + { IPv4(210,124,12,0),24 }, + { IPv4(210,124,13,0),24 }, + { IPv4(210,124,36,0),24 }, + { IPv4(210,124,155,0),24 }, + { IPv4(210,124,169,0),24 }, + { IPv4(210,124,170,0),24 }, + { IPv4(210,124,204,0),24 }, + { IPv4(210,124,205,0),24 }, + { IPv4(210,125,0,0),24 }, + { IPv4(210,125,0,0),18 }, + { IPv4(210,125,1,0),24 }, + { IPv4(210,125,2,0),24 }, + { IPv4(210,125,3,0),24 }, + { IPv4(210,125,4,0),24 }, + { IPv4(210,125,5,0),24 }, + { IPv4(210,125,6,0),24 }, + { IPv4(210,125,7,0),24 }, + { IPv4(210,125,16,0),20 }, + { IPv4(210,125,56,0),24 }, + { IPv4(210,125,56,0),22 }, + { IPv4(210,125,57,0),24 }, + { IPv4(210,125,58,0),24 }, + { IPv4(210,125,60,0),22 }, + { IPv4(210,125,60,0),23 }, + { IPv4(210,125,62,0),23 }, + { IPv4(210,125,64,0),19 }, + { IPv4(210,125,64,0),22 }, + { IPv4(210,125,64,0),21 }, + { IPv4(210,125,68,0),22 }, + { IPv4(210,125,72,0),22 }, + { IPv4(210,125,76,0),22 }, + { IPv4(210,125,82,0),24 }, + { IPv4(210,125,84,0),22 }, + { IPv4(210,125,88,0),21 }, + { IPv4(210,125,96,0),21 }, + { IPv4(210,125,104,0),22 }, + { IPv4(210,125,108,0),22 }, + { IPv4(210,125,112,0),20 }, + { IPv4(210,125,128,0),17 }, + { IPv4(210,125,160,0),24 }, + { IPv4(210,125,161,0),24 }, + { IPv4(210,125,162,0),24 }, + { IPv4(210,125,176,0),21 }, + { IPv4(210,125,184,0),24 }, + { IPv4(210,125,192,0),24 }, + { IPv4(210,125,193,0),24 }, + { IPv4(210,125,194,0),24 }, + { IPv4(210,125,195,0),24 }, + { IPv4(210,125,196,0),24 }, + { IPv4(210,125,197,0),24 }, + { IPv4(210,125,198,0),24 }, + { IPv4(210,125,199,0),24 }, + { IPv4(210,125,240,0),21 }, + { IPv4(210,126,0,0),17 }, + { IPv4(210,126,140,0),24 }, + { IPv4(210,126,155,0),24 }, + { IPv4(210,126,206,0),24 }, + { IPv4(210,127,0,0),19 }, + { IPv4(210,127,32,0),24 }, + { IPv4(210,127,33,0),24 }, + { IPv4(210,127,34,0),23 }, + { IPv4(210,127,36,0),24 }, + { IPv4(210,127,37,0),24 }, + { IPv4(210,127,38,0),23 }, + { IPv4(210,127,40,0),24 }, + { IPv4(210,127,41,0),24 }, + { IPv4(210,127,42,0),23 }, + { IPv4(210,127,44,0),24 }, + { IPv4(210,127,45,0),24 }, + { IPv4(210,127,46,0),23 }, + { IPv4(210,127,48,0),23 }, + { IPv4(210,127,50,0),24 }, + { IPv4(210,127,51,0),24 }, + { IPv4(210,127,52,0),22 }, + { IPv4(210,127,128,0),18 }, + { IPv4(210,127,192,0),18 }, + { IPv4(210,127,197,0),24 }, + { IPv4(210,127,201,0),24 }, + { IPv4(210,127,208,0),21 }, + { IPv4(210,127,216,0),22 }, + { IPv4(210,127,220,0),23 }, + { IPv4(210,127,233,0),24 }, + { IPv4(210,131,128,0),17 }, + { IPv4(210,132,64,0),19 }, + { IPv4(210,132,96,0),19 }, + { IPv4(210,133,96,0),19 }, + { IPv4(210,133,192,0),18 }, + { IPv4(210,134,128,0),19 }, + { IPv4(210,134,160,0),19 }, + { IPv4(210,134,224,0),19 }, + { IPv4(210,135,0,0),20 }, + { IPv4(210,135,32,0),20 }, + { IPv4(210,135,48,0),21 }, + { IPv4(210,135,56,0),22 }, + { IPv4(210,135,192,0),20 }, + { IPv4(210,135,208,0),20 }, + { IPv4(210,141,0,0),20 }, + { IPv4(210,141,32,0),19 }, + { IPv4(210,141,64,0),19 }, + { IPv4(210,141,96,0),19 }, + { IPv4(210,141,128,0),19 }, + { IPv4(210,141,160,0),19 }, + { IPv4(210,142,0,0),18 }, + { IPv4(210,142,224,0),19 }, + { IPv4(210,143,64,0),19 }, + { IPv4(210,143,96,0),19 }, + { IPv4(210,146,64,0),18 }, + { IPv4(210,146,128,0),17 }, + { IPv4(210,147,0,0),16 }, + { IPv4(210,151,128,0),17 }, + { IPv4(210,155,0,0),17 }, + { IPv4(210,155,128,0),19 }, + { IPv4(210,157,128,0),20 }, + { IPv4(210,157,128,0),19 }, + { IPv4(210,157,160,0),24 }, + { IPv4(210,157,161,0),24 }, + { IPv4(210,157,162,0),24 }, + { IPv4(210,157,163,0),24 }, + { IPv4(210,157,166,0),24 }, + { IPv4(210,157,168,0),24 }, + { IPv4(210,157,171,0),24 }, + { IPv4(210,157,176,0),24 }, + { IPv4(210,157,178,0),24 }, + { IPv4(210,157,179,0),24 }, + { IPv4(210,157,180,0),24 }, + { IPv4(210,157,181,0),24 }, + { IPv4(210,157,182,0),24 }, + { IPv4(210,157,192,0),19 }, + { IPv4(210,157,224,0),19 }, + { IPv4(210,158,64,0),19 }, + { IPv4(210,158,224,0),19 }, + { IPv4(210,159,64,0),19 }, + { IPv4(210,166,32,0),19 }, + { IPv4(210,166,64,0),19 }, + { IPv4(210,166,224,0),19 }, + { IPv4(210,168,192,0),18 }, + { IPv4(210,169,0,0),17 }, + { IPv4(210,170,0,0),18 }, + { IPv4(210,171,0,0),19 }, + { IPv4(210,171,0,0),20 }, + { IPv4(210,171,64,0),19 }, + { IPv4(210,171,192,0),19 }, + { IPv4(210,171,224,0),19 }, + { IPv4(210,172,64,0),18 }, + { IPv4(210,172,192,0),19 }, + { IPv4(210,172,224,0),20 }, + { IPv4(210,172,238,0),23 }, + { IPv4(210,173,0,0),19 }, + { IPv4(210,173,128,0),22 }, + { IPv4(210,174,64,0),18 }, + { IPv4(210,174,128,0),17 }, + { IPv4(210,175,128,0),23 }, + { IPv4(210,175,138,0),23 }, + { IPv4(210,175,152,0),21 }, + { IPv4(210,175,160,0),19 }, + { IPv4(210,175,164,0),24 }, + { IPv4(210,175,179,0),24 }, + { IPv4(210,175,188,0),24 }, + { IPv4(210,178,0,0),17 }, + { IPv4(210,178,0,0),16 }, + { IPv4(210,178,128,0),17 }, + { IPv4(210,179,0,0),17 }, + { IPv4(210,179,0,0),16 }, + { IPv4(210,179,128,0),17 }, + { IPv4(210,180,0,0),19 }, + { IPv4(210,180,64,0),19 }, + { IPv4(210,180,96,0),19 }, + { IPv4(210,180,128,0),18 }, + { IPv4(210,180,192,0),19 }, + { IPv4(210,180,224,0),19 }, + { IPv4(210,181,0,0),19 }, + { IPv4(210,181,28,0),24 }, + { IPv4(210,181,32,0),19 }, + { IPv4(210,181,64,0),18 }, + { IPv4(210,181,128,0),18 }, + { IPv4(210,181,142,0),23 }, + { IPv4(210,181,144,0),22 }, + { IPv4(210,181,148,0),22 }, + { IPv4(210,181,164,0),23 }, + { IPv4(210,181,166,0),24 }, + { IPv4(210,181,188,0),23 }, + { IPv4(210,181,190,0),24 }, + { IPv4(210,181,192,0),19 }, + { IPv4(210,182,0,0),16 }, + { IPv4(210,182,144,0),24 }, + { IPv4(210,183,0,0),16 }, + { IPv4(210,186,0,0),16 }, + { IPv4(210,186,0,0),17 }, + { IPv4(210,187,0,0),16 }, + { IPv4(210,188,192,0),19 }, + { IPv4(210,189,160,0),21 }, + { IPv4(210,191,64,0),18 }, + { IPv4(210,191,192,0),19 }, + { IPv4(210,192,0,0),18 }, + { IPv4(210,192,96,0),19 }, + { IPv4(210,193,0,0),20 }, + { IPv4(210,193,0,0),19 }, + { IPv4(210,195,0,0),16 }, + { IPv4(210,196,0,0),16 }, + { IPv4(210,198,128,0),17 }, + { IPv4(210,199,96,0),19 }, + { IPv4(210,200,32,0),19 }, + { IPv4(210,204,0,0),16 }, + { IPv4(210,204,0,0),17 }, + { IPv4(210,204,128,0),17 }, + { IPv4(210,204,248,0),22 }, + { IPv4(210,204,252,0),22 }, + { IPv4(210,205,0,0),18 }, + { IPv4(210,205,64,0),19 }, + { IPv4(210,205,128,0),17 }, + { IPv4(210,205,236,0),24 }, + { IPv4(210,206,0,0),16 }, + { IPv4(210,207,0,0),16 }, + { IPv4(210,207,87,0),24 }, + { IPv4(210,207,195,0),24 }, + { IPv4(210,207,198,0),24 }, + { IPv4(210,207,199,0),24 }, + { IPv4(210,208,0,0),21 }, + { IPv4(210,208,8,0),21 }, + { IPv4(210,208,32,0),19 }, + { IPv4(210,208,80,0),20 }, + { IPv4(210,208,96,0),19 }, + { IPv4(210,208,160,0),19 }, + { IPv4(210,209,0,0),18 }, + { IPv4(210,210,0,0),20 }, + { IPv4(210,210,32,0),22 }, + { IPv4(210,210,36,0),22 }, + { IPv4(210,210,39,0),24 }, + { IPv4(210,210,40,0),22 }, + { IPv4(210,210,40,0),24 }, + { IPv4(210,210,41,0),24 }, + { IPv4(210,210,43,0),24 }, + { IPv4(210,210,44,0),22 }, + { IPv4(210,210,48,0),24 }, + { IPv4(210,210,49,0),24 }, + { IPv4(210,214,0,0),20 }, + { IPv4(210,214,4,0),23 }, + { IPv4(210,214,16,0),21 }, + { IPv4(210,214,24,0),21 }, + { IPv4(210,214,48,0),20 }, + { IPv4(210,214,64,0),23 }, + { IPv4(210,214,64,0),20 }, + { IPv4(210,214,80,0),20 }, + { IPv4(210,214,96,0),21 }, + { IPv4(210,214,106,0),24 }, + { IPv4(210,214,108,0),22 }, + { IPv4(210,214,112,0),20 }, + { IPv4(210,214,128,0),20 }, + { IPv4(210,214,144,0),24 }, + { IPv4(210,214,144,0),21 }, + { IPv4(210,214,145,0),24 }, + { IPv4(210,214,146,0),24 }, + { IPv4(210,214,147,0),24 }, + { IPv4(210,214,149,0),24 }, + { IPv4(210,214,152,0),21 }, + { IPv4(210,214,160,0),20 }, + { IPv4(210,214,165,0),24 }, + { IPv4(210,214,168,0),23 }, + { IPv4(210,214,172,0),22 }, + { IPv4(210,214,178,0),24 }, + { IPv4(210,214,184,0),21 }, + { IPv4(210,214,192,0),20 }, + { IPv4(210,214,197,0),24 }, + { IPv4(210,214,208,0),20 }, + { IPv4(210,214,240,0),20 }, + { IPv4(210,216,0,0),16 }, + { IPv4(210,216,13,0),24 }, + { IPv4(210,217,0,0),17 }, + { IPv4(210,217,128,0),19 }, + { IPv4(210,217,160,0),19 }, + { IPv4(210,217,183,0),24 }, + { IPv4(210,217,192,0),19 }, + { IPv4(210,217,224,0),19 }, + { IPv4(210,218,0,0),18 }, + { IPv4(210,218,64,0),19 }, + { IPv4(210,218,128,0),18 }, + { IPv4(210,218,192,0),19 }, + { IPv4(210,218,195,0),24 }, + { IPv4(210,218,224,0),19 }, + { IPv4(210,219,0,0),19 }, + { IPv4(210,219,32,0),22 }, + { IPv4(210,219,36,0),24 }, + { IPv4(210,219,37,0),24 }, + { IPv4(210,219,38,0),23 }, + { IPv4(210,219,40,0),21 }, + { IPv4(210,219,48,0),20 }, + { IPv4(210,219,128,0),18 }, + { IPv4(210,220,0,0),19 }, + { IPv4(210,220,13,0),24 }, + { IPv4(210,220,21,0),24 }, + { IPv4(210,220,22,0),24 }, + { IPv4(210,220,32,0),21 }, + { IPv4(210,220,40,0),22 }, + { IPv4(210,220,64,0),19 }, + { IPv4(210,220,96,0),19 }, + { IPv4(210,220,128,0),19 }, + { IPv4(210,220,160,0),19 }, + { IPv4(210,221,0,0),17 }, + { IPv4(210,221,128,0),19 }, + { IPv4(210,221,160,0),20 }, + { IPv4(210,221,176,0),20 }, + { IPv4(210,221,192,0),19 }, + { IPv4(210,221,224,0),19 }, + { IPv4(210,222,0,0),15 }, + { IPv4(210,224,160,0),19 }, + { IPv4(210,224,192,0),18 }, + { IPv4(210,229,160,0),19 }, + { IPv4(210,230,0,0),17 }, + { IPv4(210,230,70,0),23 }, + { IPv4(210,230,72,0),23 }, + { IPv4(210,230,74,0),24 }, + { IPv4(210,230,128,0),17 }, + { IPv4(210,233,0,0),18 }, + { IPv4(210,234,0,0),16 }, + { IPv4(210,235,160,0),19 }, + { IPv4(210,236,0,0),19 }, + { IPv4(210,236,32,0),20 }, + { IPv4(210,236,64,0),19 }, + { IPv4(210,236,128,0),19 }, + { IPv4(210,236,192,0),19 }, + { IPv4(210,237,32,0),19 }, + { IPv4(210,238,32,0),19 }, + { IPv4(210,238,128,0),17 }, + { IPv4(210,239,96,0),19 }, + { IPv4(210,239,128,0),17 }, + { IPv4(210,240,0,0),16 }, + { IPv4(210,241,0,0),17 }, + { IPv4(210,241,128,0),19 }, + { IPv4(210,241,160,0),24 }, + { IPv4(210,241,192,0),19 }, + { IPv4(210,243,0,0),18 }, + { IPv4(210,243,128,0),17 }, + { IPv4(210,243,128,0),22 }, + { IPv4(210,243,182,0),23 }, + { IPv4(210,243,192,0),18 }, + { IPv4(210,244,0,0),18 }, + { IPv4(210,244,0,0),17 }, + { IPv4(210,244,28,0),22 }, + { IPv4(210,244,128,0),18 }, + { IPv4(210,244,192,0),20 }, + { IPv4(210,244,208,0),20 }, + { IPv4(210,244,224,0),19 }, + { IPv4(210,249,0,0),17 }, + { IPv4(210,249,224,0),19 }, + { IPv4(210,251,0,0),17 }, + { IPv4(210,251,192,0),20 }, + { IPv4(210,251,224,0),22 }, + { IPv4(210,252,0,0),18 }, + { IPv4(210,253,32,0),19 }, + { IPv4(210,253,192,0),19 }, + { IPv4(210,255,0,0),17 }, + { IPv4(210,255,160,0),19 }, + { IPv4(210,255,224,0),19 }, + { IPv4(211,1,0,0),22 }, + { IPv4(211,1,128,0),19 }, + { IPv4(211,1,160,0),23 }, + { IPv4(211,1,192,0),19 }, + { IPv4(211,2,160,0),19 }, + { IPv4(211,4,0,0),16 }, + { IPv4(211,4,243,0),24 }, + { IPv4(211,5,0,0),16 }, + { IPv4(211,7,96,0),19 }, + { IPv4(211,7,224,0),19 }, + { IPv4(211,9,64,0),18 }, + { IPv4(211,12,192,0),22 }, + { IPv4(211,12,192,0),19 }, + { IPv4(211,12,196,0),22 }, + { IPv4(211,12,200,0),22 }, + { IPv4(211,12,208,0),22 }, + { IPv4(211,12,212,0),22 }, + { IPv4(211,12,216,0),22 }, + { IPv4(211,12,224,0),19 }, + { IPv4(211,13,0,0),17 }, + { IPv4(211,13,160,0),19 }, + { IPv4(211,13,224,0),19 }, + { IPv4(211,14,192,0),19 }, + { IPv4(211,14,224,0),19 }, + { IPv4(211,15,64,0),19 }, + { IPv4(211,18,0,0),16 }, + { IPv4(211,23,0,0),16 }, + { IPv4(211,32,0,0),16 }, + { IPv4(211,32,7,0),24 }, + { IPv4(211,32,32,0),24 }, + { IPv4(211,32,160,0),24 }, + { IPv4(211,33,0,0),17 }, + { IPv4(211,33,128,0),17 }, + { IPv4(211,33,130,0),24 }, + { IPv4(211,34,0,0),16 }, + { IPv4(211,34,0,0),17 }, + { IPv4(211,34,110,0),24 }, + { IPv4(211,34,111,0),24 }, + { IPv4(211,34,112,0),24 }, + { IPv4(211,34,128,0),17 }, + { IPv4(211,35,0,0),18 }, + { IPv4(211,35,42,0),24 }, + { IPv4(211,35,43,0),24 }, + { IPv4(211,35,64,0),19 }, + { IPv4(211,35,128,0),17 }, + { IPv4(211,36,0,0),22 }, + { IPv4(211,36,96,0),19 }, + { IPv4(211,36,128,0),19 }, + { IPv4(211,36,160,0),19 }, + { IPv4(211,36,192,0),22 }, + { IPv4(211,36,196,0),22 }, + { IPv4(211,36,200,0),22 }, + { IPv4(211,36,204,0),22 }, + { IPv4(211,36,208,0),20 }, + { IPv4(211,36,224,0),19 }, + { IPv4(211,37,0,0),17 }, + { IPv4(211,37,93,0),24 }, + { IPv4(211,37,128,0),18 }, + { IPv4(211,37,192,0),19 }, + { IPv4(211,37,224,0),19 }, + { IPv4(211,38,0,0),16 }, + { IPv4(211,38,9,0),24 }, + { IPv4(211,39,64,0),18 }, + { IPv4(211,39,90,0),24 }, + { IPv4(211,39,128,0),22 }, + { IPv4(211,39,132,0),22 }, + { IPv4(211,39,136,0),22 }, + { IPv4(211,39,140,0),22 }, + { IPv4(211,39,144,0),20 }, + { IPv4(211,39,192,0),19 }, + { IPv4(211,39,224,0),19 }, + { IPv4(211,40,0,0),16 }, + { IPv4(211,41,0,0),19 }, + { IPv4(211,41,32,0),19 }, + { IPv4(211,41,64,0),19 }, + { IPv4(211,41,89,0),24 }, + { IPv4(211,41,90,0),24 }, + { IPv4(211,41,96,0),19 }, + { IPv4(211,41,128,0),19 }, + { IPv4(211,41,160,0),19 }, + { IPv4(211,41,192,0),19 }, + { IPv4(211,41,224,0),22 }, + { IPv4(211,41,228,0),22 }, + { IPv4(211,41,232,0),22 }, + { IPv4(211,41,236,0),22 }, + { IPv4(211,41,240,0),22 }, + { IPv4(211,41,244,0),22 }, + { IPv4(211,41,248,0),22 }, + { IPv4(211,41,252,0),22 }, + { IPv4(211,42,0,0),17 }, + { IPv4(211,42,10,0),24 }, + { IPv4(211,42,16,0),24 }, + { IPv4(211,42,23,0),24 }, + { IPv4(211,42,72,0),22 }, + { IPv4(211,42,84,0),23 }, + { IPv4(211,42,86,0),24 }, + { IPv4(211,42,116,0),22 }, + { IPv4(211,42,120,0),24 }, + { IPv4(211,42,124,0),22 }, + { IPv4(211,42,128,0),19 }, + { IPv4(211,42,160,0),19 }, + { IPv4(211,42,192,0),19 }, + { IPv4(211,42,224,0),19 }, + { IPv4(211,43,3,0),24 }, + { IPv4(211,43,4,0),24 }, + { IPv4(211,43,7,0),24 }, + { IPv4(211,43,8,0),21 }, + { IPv4(211,43,15,0),24 }, + { IPv4(211,43,16,0),21 }, + { IPv4(211,43,20,0),24 }, + { IPv4(211,43,21,0),24 }, + { IPv4(211,43,22,0),24 }, + { IPv4(211,43,23,0),24 }, + { IPv4(211,43,29,0),24 }, + { IPv4(211,43,30,0),23 }, + { IPv4(211,43,32,0),19 }, + { IPv4(211,43,64,0),21 }, + { IPv4(211,43,72,0),22 }, + { IPv4(211,43,76,0),22 }, + { IPv4(211,43,80,0),20 }, + { IPv4(211,43,96,0),20 }, + { IPv4(211,43,112,0),21 }, + { IPv4(211,43,120,0),24 }, + { IPv4(211,43,128,0),20 }, + { IPv4(211,43,144,0),20 }, + { IPv4(211,44,0,0),16 }, + { IPv4(211,44,62,0),24 }, + { IPv4(211,45,32,0),19 }, + { IPv4(211,45,64,0),19 }, + { IPv4(211,45,96,0),19 }, + { IPv4(211,45,128,0),18 }, + { IPv4(211,45,192,0),18 }, + { IPv4(211,46,0,0),17 }, + { IPv4(211,46,0,0),16 }, + { IPv4(211,46,128,0),17 }, + { IPv4(211,47,0,0),18 }, + { IPv4(211,47,80,0),20 }, + { IPv4(211,47,96,0),19 }, + { IPv4(211,47,128,0),19 }, + { IPv4(211,47,160,0),24 }, + { IPv4(211,47,161,0),24 }, + { IPv4(211,47,162,0),24 }, + { IPv4(211,47,176,0),20 }, + { IPv4(211,47,192,0),19 }, + { IPv4(211,47,224,0),19 }, + { IPv4(211,48,0,0),16 }, + { IPv4(211,49,0,0),17 }, + { IPv4(211,49,128,0),17 }, + { IPv4(211,50,0,0),16 }, + { IPv4(211,51,0,0),16 }, + { IPv4(211,51,28,0),24 }, + { IPv4(211,51,39,0),24 }, + { IPv4(211,52,0,0),18 }, + { IPv4(211,52,128,0),17 }, + { IPv4(211,53,213,0),24 }, + { IPv4(211,54,0,0),15 }, + { IPv4(211,54,123,0),24 }, + { IPv4(211,55,45,0),24 }, + { IPv4(211,55,46,0),24 }, + { IPv4(211,56,0,0),17 }, + { IPv4(211,56,102,0),23 }, + { IPv4(211,56,128,0),18 }, + { IPv4(211,56,192,0),19 }, + { IPv4(211,56,224,0),19 }, + { IPv4(211,57,0,0),16 }, + { IPv4(211,57,0,0),17 }, + { IPv4(211,57,128,0),17 }, + { IPv4(211,58,0,0),16 }, + { IPv4(211,58,248,0),24 }, + { IPv4(211,58,249,0),24 }, + { IPv4(211,59,0,0),16 }, + { IPv4(211,60,0,0),16 }, + { IPv4(211,60,213,0),24 }, + { IPv4(211,61,0,0),18 }, + { IPv4(211,61,48,0),24 }, + { IPv4(211,61,49,0),24 }, + { IPv4(211,61,50,0),24 }, + { IPv4(211,61,51,0),24 }, + { IPv4(211,61,64,0),19 }, + { IPv4(211,61,64,0),18 }, + { IPv4(211,61,96,0),19 }, + { IPv4(211,61,128,0),17 }, + { IPv4(211,61,247,0),24 }, + { IPv4(211,62,0,0),18 }, + { IPv4(211,62,64,0),18 }, + { IPv4(211,62,103,0),24 }, + { IPv4(211,62,128,0),17 }, + { IPv4(211,63,0,0),19 }, + { IPv4(211,63,32,0),19 }, + { IPv4(211,63,64,0),19 }, + { IPv4(211,63,96,0),19 }, + { IPv4(211,63,128,0),17 }, + { IPv4(211,63,175,0),24 }, + { IPv4(211,72,0,0),16 }, + { IPv4(211,73,0,0),19 }, + { IPv4(211,73,96,0),19 }, + { IPv4(211,73,128,0),19 }, + { IPv4(211,73,160,0),20 }, + { IPv4(211,73,192,0),18 }, + { IPv4(211,74,0,0),16 }, + { IPv4(211,74,0,0),18 }, + { IPv4(211,74,64,0),18 }, + { IPv4(211,74,128,0),18 }, + { IPv4(211,74,146,0),24 }, + { IPv4(211,74,150,0),24 }, + { IPv4(211,74,192,0),18 }, + { IPv4(211,74,192,0),19 }, + { IPv4(211,74,224,0),19 }, + { IPv4(211,76,0,0),19 }, + { IPv4(211,76,64,0),20 }, + { IPv4(211,76,80,0),20 }, + { IPv4(211,76,112,0),20 }, + { IPv4(211,76,128,0),20 }, + { IPv4(211,77,0,0),18 }, + { IPv4(211,77,64,0),18 }, + { IPv4(211,77,128,0),17 }, + { IPv4(211,78,0,0),20 }, + { IPv4(211,78,16,0),20 }, + { IPv4(211,78,32,0),20 }, + { IPv4(211,78,48,0),21 }, + { IPv4(211,78,48,0),20 }, + { IPv4(211,78,56,0),21 }, + { IPv4(211,78,80,0),22 }, + { IPv4(211,78,96,0),19 }, + { IPv4(211,78,96,0),24 }, + { IPv4(211,78,97,0),24 }, + { IPv4(211,78,160,0),19 }, + { IPv4(211,79,32,0),20 }, + { IPv4(211,79,128,0),19 }, + { IPv4(211,79,160,0),19 }, + { IPv4(211,79,192,0),20 }, + { IPv4(211,79,208,0),20 }, + { IPv4(211,79,240,0),20 }, + { IPv4(211,88,0,0),16 }, + { IPv4(211,98,0,0),17 }, + { IPv4(211,99,0,0),19 }, + { IPv4(211,99,32,0),19 }, + { IPv4(211,99,64,0),19 }, + { IPv4(211,99,160,0),19 }, + { IPv4(211,99,192,0),19 }, + { IPv4(211,99,224,0),19 }, + { IPv4(211,100,0,0),19 }, + { IPv4(211,100,32,0),19 }, + { IPv4(211,100,64,0),19 }, + { IPv4(211,100,96,0),19 }, + { IPv4(211,101,0,0),18 }, + { IPv4(211,101,128,0),17 }, + { IPv4(211,101,128,0),19 }, + { IPv4(211,101,160,0),19 }, + { IPv4(211,101,192,0),19 }, + { IPv4(211,101,224,0),19 }, + { IPv4(211,102,0,0),19 }, + { IPv4(211,102,32,0),19 }, + { IPv4(211,102,64,0),19 }, + { IPv4(211,102,96,0),19 }, + { IPv4(211,104,0,0),14 }, + { IPv4(211,104,34,0),24 }, + { IPv4(211,106,22,0),24 }, + { IPv4(211,108,0,0),16 }, + { IPv4(211,109,0,0),16 }, + { IPv4(211,110,0,0),16 }, + { IPv4(211,111,0,0),17 }, + { IPv4(211,111,128,0),20 }, + { IPv4(211,111,144,0),20 }, + { IPv4(211,111,160,0),20 }, + { IPv4(211,111,176,0),20 }, + { IPv4(211,111,192,0),20 }, + { IPv4(211,111,208,0),20 }, + { IPv4(211,111,224,0),19 }, + { IPv4(211,112,64,0),19 }, + { IPv4(211,112,96,0),19 }, + { IPv4(211,112,128,0),17 }, + { IPv4(211,112,166,0),23 }, + { IPv4(211,113,0,0),17 }, + { IPv4(211,113,1,0),24 }, + { IPv4(211,113,128,0),18 }, + { IPv4(211,113,128,0),17 }, + { IPv4(211,113,192,0),18 }, + { IPv4(211,114,0,0),16 }, + { IPv4(211,114,0,0),17 }, + { IPv4(211,114,35,0),24 }, + { IPv4(211,114,45,0),24 }, + { IPv4(211,114,98,0),24 }, + { IPv4(211,114,99,0),24 }, + { IPv4(211,114,128,0),17 }, + { IPv4(211,115,0,0),19 }, + { IPv4(211,115,32,0),19 }, + { IPv4(211,115,64,0),19 }, + { IPv4(211,115,128,0),18 }, + { IPv4(211,115,192,0),19 }, + { IPv4(211,115,224,0),19 }, + { IPv4(211,116,0,0),18 }, + { IPv4(211,116,128,0),19 }, + { IPv4(211,116,160,0),22 }, + { IPv4(211,116,176,0),20 }, + { IPv4(211,116,224,0),19 }, + { IPv4(211,117,0,0),16 }, + { IPv4(211,118,0,0),16 }, + { IPv4(211,118,128,0),24 }, + { IPv4(211,119,0,0),16 }, + { IPv4(211,120,224,0),20 }, + { IPv4(211,125,192,0),22 }, + { IPv4(211,125,200,0),21 }, + { IPv4(211,125,208,0),21 }, + { IPv4(211,126,0,0),16 }, + { IPv4(211,128,254,0),23 }, + { IPv4(211,130,96,0),19 }, + { IPv4(211,132,96,0),19 }, + { IPv4(211,133,144,0),20 }, + { IPv4(211,133,160,0),19 }, + { IPv4(211,133,224,0),20 }, + { IPv4(211,134,0,0),16 }, + { IPv4(211,135,128,0),17 }, + { IPv4(211,144,0,0),20 }, + { IPv4(211,144,32,0),20 }, + { IPv4(211,144,224,0),19 }, + { IPv4(211,146,0,0),16 }, + { IPv4(211,147,0,0),19 }, + { IPv4(211,147,32,0),19 }, + { IPv4(211,147,96,0),19 }, + { IPv4(211,148,128,0),19 }, + { IPv4(211,149,0,0),16 }, + { IPv4(211,151,0,0),17 }, + { IPv4(211,151,128,0),18 }, + { IPv4(211,152,64,0),19 }, + { IPv4(211,152,96,0),19 }, + { IPv4(211,152,128,0),20 }, + { IPv4(211,152,144,0),21 }, + { IPv4(211,152,184,0),21 }, + { IPv4(211,152,192,0),19 }, + { IPv4(211,154,32,0),19 }, + { IPv4(211,154,192,0),18 }, + { IPv4(211,155,16,0),20 }, + { IPv4(211,155,32,0),19 }, + { IPv4(211,155,64,0),19 }, + { IPv4(211,155,160,0),20 }, + { IPv4(211,155,224,0),20 }, + { IPv4(211,156,0,0),19 }, + { IPv4(211,156,128,0),19 }, + { IPv4(211,156,160,0),19 }, + { IPv4(211,157,32,0),19 }, + { IPv4(211,157,64,0),19 }, + { IPv4(211,159,64,0),20 }, + { IPv4(211,159,80,0),20 }, + { IPv4(211,159,96,0),19 }, + { IPv4(211,160,0,0),16 }, + { IPv4(211,164,0,0),16 }, + { IPv4(211,165,0,0),16 }, + { IPv4(211,166,0,0),16 }, + { IPv4(211,167,0,0),19 }, + { IPv4(211,167,64,0),19 }, + { IPv4(211,167,128,0),19 }, + { IPv4(211,167,160,0),20 }, + { IPv4(211,167,192,0),19 }, + { IPv4(211,167,224,0),19 }, + { IPv4(211,168,0,0),16 }, + { IPv4(211,168,255,0),24 }, + { IPv4(211,169,0,0),16 }, + { IPv4(211,169,225,0),24 }, + { IPv4(211,170,0,0),16 }, + { IPv4(211,171,0,0),16 }, + { IPv4(211,171,206,0),24 }, + { IPv4(211,172,0,0),18 }, + { IPv4(211,172,80,0),22 }, + { IPv4(211,172,84,0),22 }, + { IPv4(211,172,88,0),22 }, + { IPv4(211,172,92,0),22 }, + { IPv4(211,172,112,0),20 }, + { IPv4(211,172,128,0),20 }, + { IPv4(211,172,144,0),20 }, + { IPv4(211,172,176,0),21 }, + { IPv4(211,172,184,0),21 }, + { IPv4(211,172,192,0),20 }, + { IPv4(211,172,224,0),20 }, + { IPv4(211,172,240,0),20 }, + { IPv4(211,173,0,0),17 }, + { IPv4(211,173,17,0),24 }, + { IPv4(211,173,78,0),23 }, + { IPv4(211,174,0,0),20 }, + { IPv4(211,174,32,0),21 }, + { IPv4(211,174,40,0),22 }, + { IPv4(211,174,44,0),23 }, + { IPv4(211,174,46,0),23 }, + { IPv4(211,174,48,0),20 }, + { IPv4(211,174,64,0),19 }, + { IPv4(211,174,96,0),19 }, + { IPv4(211,175,0,0),16 }, + { IPv4(211,175,239,0),24 }, + { IPv4(211,176,0,0),14 }, + { IPv4(211,176,30,0),24 }, + { IPv4(211,180,0,0),16 }, + { IPv4(211,181,0,0),16 }, + { IPv4(211,181,249,0),24 }, + { IPv4(211,182,0,0),16 }, + { IPv4(211,183,0,0),16 }, + { IPv4(211,183,106,0),24 }, + { IPv4(211,183,107,0),24 }, + { IPv4(211,183,108,0),24 }, + { IPv4(211,184,0,0),15 }, + { IPv4(211,184,0,0),16 }, + { IPv4(211,185,0,0),16 }, + { IPv4(211,186,0,0),16 }, + { IPv4(211,187,0,0),16 }, + { IPv4(211,188,0,0),17 }, + { IPv4(211,188,32,0),24 }, + { IPv4(211,188,128,0),17 }, + { IPv4(211,189,128,0),19 }, + { IPv4(211,189,160,0),19 }, + { IPv4(211,189,192,0),19 }, + { IPv4(211,189,224,0),19 }, + { IPv4(211,190,0,0),17 }, + { IPv4(211,190,0,0),16 }, + { IPv4(211,190,30,0),24 }, + { IPv4(211,190,31,0),24 }, + { IPv4(211,190,128,0),17 }, + { IPv4(211,191,0,0),16 }, + { IPv4(211,191,0,0),17 }, + { IPv4(211,191,128,0),17 }, + { IPv4(211,192,0,0),24 }, + { IPv4(211,192,0,0),13 }, + { IPv4(211,192,45,0),24 }, + { IPv4(211,192,169,0),24 }, + { IPv4(211,195,85,0),24 }, + { IPv4(211,200,0,0),13 }, + { IPv4(211,205,67,0),24 }, + { IPv4(211,205,77,0),24 }, + { IPv4(211,208,0,0),14 }, + { IPv4(211,212,0,0),14 }, + { IPv4(211,214,60,0),24 }, + { IPv4(211,214,68,0),24 }, + { IPv4(211,214,70,0),24 }, + { IPv4(211,216,0,0),13 }, + { IPv4(211,216,216,0),24 }, + { IPv4(211,217,8,0),24 }, + { IPv4(211,217,9,0),24 }, + { IPv4(211,217,10,0),24 }, + { IPv4(211,217,11,0),24 }, + { IPv4(211,217,12,0),24 }, + { IPv4(211,217,13,0),24 }, + { IPv4(211,217,14,0),24 }, + { IPv4(211,217,15,0),24 }, + { IPv4(211,217,16,0),24 }, + { IPv4(211,217,17,0),24 }, + { IPv4(211,217,18,0),24 }, + { IPv4(211,217,20,0),24 }, + { IPv4(211,217,21,0),24 }, + { IPv4(211,217,22,0),24 }, + { IPv4(211,218,235,0),24 }, + { IPv4(211,218,236,0),24 }, + { IPv4(211,218,237,0),24 }, + { IPv4(211,219,24,0),24 }, + { IPv4(211,219,66,0),24 }, + { IPv4(211,224,0,0),13 }, + { IPv4(211,226,77,0),24 }, + { IPv4(211,232,0,0),17 }, + { IPv4(211,232,128,0),18 }, + { IPv4(211,233,0,0),18 }, + { IPv4(211,233,64,0),20 }, + { IPv4(211,233,80,0),20 }, + { IPv4(211,233,128,0),18 }, + { IPv4(211,234,0,0),17 }, + { IPv4(211,234,128,0),17 }, + { IPv4(211,235,192,0),21 }, + { IPv4(211,235,200,0),21 }, + { IPv4(211,235,208,0),22 }, + { IPv4(211,235,212,0),23 }, + { IPv4(211,235,214,0),23 }, + { IPv4(211,235,216,0),22 }, + { IPv4(211,235,220,0),23 }, + { IPv4(211,235,222,0),24 }, + { IPv4(211,235,223,0),24 }, + { IPv4(211,235,224,0),19 }, + { IPv4(211,236,0,0),19 }, + { IPv4(211,236,32,0),19 }, + { IPv4(211,236,64,0),18 }, + { IPv4(211,236,128,0),19 }, + { IPv4(211,236,160,0),19 }, + { IPv4(211,236,192,0),19 }, + { IPv4(211,237,0,0),21 }, + { IPv4(211,237,8,0),22 }, + { IPv4(211,237,12,0),23 }, + { IPv4(211,237,14,0),23 }, + { IPv4(211,237,16,0),20 }, + { IPv4(211,237,32,0),20 }, + { IPv4(211,237,48,0),20 }, + { IPv4(211,237,64,0),20 }, + { IPv4(211,237,80,0),20 }, + { IPv4(211,237,96,0),20 }, + { IPv4(211,237,112,0),20 }, + { IPv4(211,237,128,0),20 }, + { IPv4(211,237,176,0),20 }, + { IPv4(211,237,192,0),20 }, + { IPv4(211,237,224,0),20 }, + { IPv4(211,237,240,0),20 }, + { IPv4(211,238,0,0),20 }, + { IPv4(211,238,16,0),21 }, + { IPv4(211,238,24,0),21 }, + { IPv4(211,238,32,0),19 }, + { IPv4(211,238,55,0),24 }, + { IPv4(211,238,64,0),19 }, + { IPv4(211,238,96,0),19 }, + { IPv4(211,238,128,0),19 }, + { IPv4(211,238,160,0),20 }, + { IPv4(211,238,176,0),20 }, + { IPv4(211,238,176,0),21 }, + { IPv4(211,238,192,0),20 }, + { IPv4(211,238,224,0),22 }, + { IPv4(211,238,228,0),22 }, + { IPv4(211,238,232,0),22 }, + { IPv4(211,238,236,0),22 }, + { IPv4(211,239,0,0),17 }, + { IPv4(211,239,128,0),18 }, + { IPv4(211,240,128,0),17 }, + { IPv4(211,241,0,0),17 }, + { IPv4(211,242,0,0),17 }, + { IPv4(211,242,20,0),24 }, + { IPv4(211,242,21,0),24 }, + { IPv4(211,242,128,0),18 }, + { IPv4(211,242,141,0),24 }, + { IPv4(211,243,0,0),16 }, + { IPv4(211,245,0,0),17 }, + { IPv4(211,245,128,0),17 }, + { IPv4(211,246,0,0),15 }, + { IPv4(211,248,0,0),17 }, + { IPv4(211,248,0,0),16 }, + { IPv4(211,248,128,0),17 }, + { IPv4(211,250,0,0),15 }, + { IPv4(211,250,0,0),16 }, + { IPv4(211,251,0,0),16 }, + { IPv4(211,251,216,0),21 }, + { IPv4(211,252,0,0),16 }, + { IPv4(211,252,0,0),15 }, + { IPv4(211,252,208,0),21 }, + { IPv4(211,252,220,0),24 }, + { IPv4(211,253,0,0),16 }, + { IPv4(211,254,0,0),17 }, + { IPv4(211,254,128,0),18 }, + { IPv4(211,255,0,0),19 }, + { IPv4(211,255,32,0),19 }, + { IPv4(211,255,64,0),21 }, + { IPv4(211,255,72,0),23 }, + { IPv4(211,255,74,0),23 }, + { IPv4(211,255,76,0),22 }, + { IPv4(211,255,80,0),22 }, + { IPv4(211,255,84,0),24 }, + { IPv4(211,255,85,0),24 }, + { IPv4(211,255,86,0),23 }, + { IPv4(211,255,88,0),22 }, + { IPv4(211,255,92,0),23 }, + { IPv4(211,255,94,0),23 }, + { IPv4(211,255,114,0),23 }, + { IPv4(211,255,116,0),22 }, + { IPv4(211,255,120,0),23 }, + { IPv4(211,255,121,0),24 }, + { IPv4(211,255,126,0),24 }, + { IPv4(211,255,127,0),24 }, + { IPv4(211,255,128,0),19 }, + { IPv4(211,255,160,0),19 }, + { IPv4(211,255,192,0),21 }, + { IPv4(211,255,200,0),21 }, + { IPv4(211,255,224,0),20 }, + { IPv4(211,255,240,0),20 }, + { IPv4(212,0,128,0),19 }, + { IPv4(212,1,0,0),19 }, + { IPv4(212,3,32,0),19 }, + { IPv4(212,3,44,0),23 }, + { IPv4(212,3,160,0),19 }, + { IPv4(212,4,64,0),19 }, + { IPv4(212,4,208,0),24 }, + { IPv4(212,7,32,0),19 }, + { IPv4(212,7,64,0),19 }, + { IPv4(212,8,0,0),19 }, + { IPv4(212,8,160,0),19 }, + { IPv4(212,9,96,0),22 }, + { IPv4(212,9,96,0),19 }, + { IPv4(212,9,160,0),19 }, + { IPv4(212,11,224,0),19 }, + { IPv4(212,12,128,0),19 }, + { IPv4(212,14,64,0),19 }, + { IPv4(212,14,89,0),24 }, + { IPv4(212,14,96,0),19 }, + { IPv4(212,15,32,0),21 }, + { IPv4(212,15,64,0),19 }, + { IPv4(212,15,128,0),19 }, + { IPv4(212,16,32,0),19 }, + { IPv4(212,16,96,0),19 }, + { IPv4(212,16,192,0),19 }, + { IPv4(212,16,202,0),24 }, + { IPv4(212,16,208,0),24 }, + { IPv4(212,16,209,0),24 }, + { IPv4(212,16,212,0),24 }, + { IPv4(212,17,32,0),19 }, + { IPv4(212,17,128,0),19 }, + { IPv4(212,17,192,0),19 }, + { IPv4(212,18,96,0),19 }, + { IPv4(212,18,192,0),19 }, + { IPv4(212,19,32,0),19 }, + { IPv4(212,20,160,0),19 }, + { IPv4(212,20,224,0),19 }, + { IPv4(212,20,228,0),24 }, + { IPv4(212,21,0,0),19 }, + { IPv4(212,21,64,0),19 }, + { IPv4(212,21,96,0),19 }, + { IPv4(212,21,192,0),19 }, + { IPv4(212,21,196,0),22 }, + { IPv4(212,21,200,0),23 }, + { IPv4(212,21,202,0),23 }, + { IPv4(212,22,32,0),19 }, + { IPv4(212,22,96,0),24 }, + { IPv4(212,22,160,0),19 }, + { IPv4(212,22,192,0),19 }, + { IPv4(212,23,0,0),19 }, + { IPv4(212,23,128,0),19 }, + { IPv4(212,24,64,0),19 }, + { IPv4(212,24,192,0),19 }, + { IPv4(212,25,0,0),19 }, + { IPv4(212,25,224,0),19 }, + { IPv4(212,28,0,0),20 }, + { IPv4(212,28,16,0),20 }, + { IPv4(212,28,96,0),19 }, + { IPv4(212,30,0,0),19 }, + { IPv4(212,30,32,0),19 }, + { IPv4(212,31,0,0),19 }, + { IPv4(212,31,64,0),19 }, + { IPv4(212,31,106,0),24 }, + { IPv4(212,31,107,0),24 }, + { IPv4(212,31,128,0),19 }, + { IPv4(212,31,192,0),21 }, + { IPv4(212,31,200,0),21 }, + { IPv4(212,31,206,0),23 }, + { IPv4(212,32,224,0),19 }, + { IPv4(212,33,32,0),19 }, + { IPv4(212,33,166,0),24 }, + { IPv4(212,34,160,0),19 }, + { IPv4(212,35,32,0),19 }, + { IPv4(212,35,128,0),19 }, + { IPv4(212,36,32,0),19 }, + { IPv4(212,36,96,0),19 }, + { IPv4(212,37,32,0),19 }, + { IPv4(212,37,64,0),19 }, + { IPv4(212,38,160,0),19 }, + { IPv4(212,39,224,0),19 }, + { IPv4(212,40,224,0),19 }, + { IPv4(212,41,64,0),18 }, + { IPv4(212,41,128,0),19 }, + { IPv4(212,41,224,0),19 }, + { IPv4(212,42,64,0),19 }, + { IPv4(212,42,224,0),19 }, + { IPv4(212,43,128,0),19 }, + { IPv4(212,43,160,0),19 }, + { IPv4(212,43,192,0),18 }, + { IPv4(212,44,0,0),18 }, + { IPv4(212,44,160,0),19 }, + { IPv4(212,46,64,0),19 }, + { IPv4(212,46,128,0),19 }, + { IPv4(212,47,160,0),19 }, + { IPv4(212,47,170,0),23 }, + { IPv4(212,47,172,0),23 }, + { IPv4(212,48,32,0),19 }, + { IPv4(212,48,224,0),19 }, + { IPv4(212,49,128,0),19 }, + { IPv4(212,49,128,0),18 }, + { IPv4(212,49,160,0),19 }, + { IPv4(212,49,192,0),19 }, + { IPv4(212,49,224,0),19 }, + { IPv4(212,50,160,0),19 }, + { IPv4(212,51,32,0),19 }, + { IPv4(212,53,0,0),19 }, + { IPv4(212,54,160,0),19 }, + { IPv4(212,54,160,0),24 }, + { IPv4(212,54,167,0),24 }, + { IPv4(212,54,170,0),24 }, + { IPv4(212,54,179,0),24 }, + { IPv4(212,54,185,0),24 }, + { IPv4(212,55,153,0),24 }, + { IPv4(212,56,0,0),19 }, + { IPv4(212,56,0,0),24 }, + { IPv4(212,56,1,0),24 }, + { IPv4(212,56,2,0),24 }, + { IPv4(212,56,3,0),24 }, + { IPv4(212,56,5,0),24 }, + { IPv4(212,56,6,0),24 }, + { IPv4(212,56,7,0),24 }, + { IPv4(212,56,9,0),24 }, + { IPv4(212,56,13,0),24 }, + { IPv4(212,56,14,0),24 }, + { IPv4(212,56,16,0),22 }, + { IPv4(212,56,22,0),23 }, + { IPv4(212,56,24,0),21 }, + { IPv4(212,56,160,0),19 }, + { IPv4(212,56,224,0),19 }, + { IPv4(212,57,0,0),19 }, + { IPv4(212,57,34,0),24 }, + { IPv4(212,58,32,0),19 }, + { IPv4(212,58,128,0),19 }, + { IPv4(212,58,146,0),24 }, + { IPv4(212,59,64,0),19 }, + { IPv4(212,59,192,0),19 }, + { IPv4(212,60,32,0),19 }, + { IPv4(212,60,96,0),19 }, + { IPv4(212,61,0,0),16 }, + { IPv4(212,66,64,0),19 }, + { IPv4(212,66,128,0),19 }, + { IPv4(212,67,96,0),20 }, + { IPv4(212,67,112,0),22 }, + { IPv4(212,67,116,0),24 }, + { IPv4(212,67,118,0),24 }, + { IPv4(212,67,118,0),23 }, + { IPv4(212,67,192,0),19 }, + { IPv4(212,68,160,0),19 }, + { IPv4(212,68,192,0),19 }, + { IPv4(212,69,160,0),19 }, + { IPv4(212,70,64,0),19 }, + { IPv4(212,70,96,0),19 }, + { IPv4(212,70,192,0),19 }, + { IPv4(212,71,192,0),19 }, + { IPv4(212,71,224,0),22 }, + { IPv4(212,71,240,0),22 }, + { IPv4(212,71,252,0),22 }, + { IPv4(212,73,32,0),19 }, + { IPv4(212,75,224,0),19 }, + { IPv4(212,76,128,0),19 }, + { IPv4(212,76,224,0),19 }, + { IPv4(212,77,160,0),19 }, + { IPv4(212,77,224,0),19 }, + { IPv4(212,79,128,0),19 }, + { IPv4(212,79,224,0),19 }, + { IPv4(212,80,96,0),19 }, + { IPv4(212,82,0,0),19 }, + { IPv4(212,82,224,0),19 }, + { IPv4(212,83,32,0),19 }, + { IPv4(212,84,96,0),19 }, + { IPv4(212,85,224,0),19 }, + { IPv4(212,86,32,0),19 }, + { IPv4(212,86,64,0),19 }, + { IPv4(212,88,0,0),19 }, + { IPv4(212,88,96,0),24 }, + { IPv4(212,88,97,0),24 }, + { IPv4(212,88,98,0),24 }, + { IPv4(212,88,99,0),24 }, + { IPv4(212,88,128,0),19 }, + { IPv4(212,88,160,0),19 }, + { IPv4(212,88,192,0),19 }, + { IPv4(212,88,224,0),19 }, + { IPv4(212,89,64,0),22 }, + { IPv4(212,89,69,0),24 }, + { IPv4(212,89,82,0),24 }, + { IPv4(212,89,83,0),24 }, + { IPv4(212,89,128,0),19 }, + { IPv4(212,89,160,0),19 }, + { IPv4(212,89,164,0),24 }, + { IPv4(212,90,32,0),20 }, + { IPv4(212,90,64,0),19 }, + { IPv4(212,91,0,0),19 }, + { IPv4(212,92,64,0),19 }, + { IPv4(212,92,192,0),22 }, + { IPv4(212,93,0,0),19 }, + { IPv4(212,95,144,0),20 }, + { IPv4(212,95,192,0),19 }, + { IPv4(212,95,224,0),19 }, + { IPv4(212,96,0,0),23 }, + { IPv4(212,96,2,0),24 }, + { IPv4(212,96,3,0),24 }, + { IPv4(212,96,4,0),24 }, + { IPv4(212,96,5,0),24 }, + { IPv4(212,96,6,0),23 }, + { IPv4(212,96,8,0),21 }, + { IPv4(212,96,16,0),21 }, + { IPv4(212,96,24,0),22 }, + { IPv4(212,96,30,0),24 }, + { IPv4(212,96,128,0),19 }, + { IPv4(212,97,0,0),19 }, + { IPv4(212,97,160,0),19 }, + { IPv4(212,98,0,0),20 }, + { IPv4(212,98,16,0),20 }, + { IPv4(212,98,64,0),18 }, + { IPv4(212,98,128,0),24 }, + { IPv4(212,98,128,0),19 }, + { IPv4(212,98,129,0),24 }, + { IPv4(212,98,130,0),24 }, + { IPv4(212,98,134,0),24 }, + { IPv4(212,98,135,0),24 }, + { IPv4(212,98,136,0),24 }, + { IPv4(212,98,139,0),24 }, + { IPv4(212,98,140,0),24 }, + { IPv4(212,98,141,0),24 }, + { IPv4(212,98,142,0),24 }, + { IPv4(212,98,143,0),24 }, + { IPv4(212,98,144,0),24 }, + { IPv4(212,98,145,0),24 }, + { IPv4(212,98,146,0),24 }, + { IPv4(212,98,147,0),24 }, + { IPv4(212,98,148,0),24 }, + { IPv4(212,98,149,0),24 }, + { IPv4(212,98,150,0),24 }, + { IPv4(212,98,151,0),24 }, + { IPv4(212,98,152,0),24 }, + { IPv4(212,98,153,0),24 }, + { IPv4(212,98,154,0),24 }, + { IPv4(212,98,157,0),24 }, + { IPv4(212,98,158,0),24 }, + { IPv4(212,98,200,0),21 }, + { IPv4(212,98,201,0),24 }, + { IPv4(212,98,202,0),24 }, + { IPv4(212,98,219,0),24 }, + { IPv4(212,98,223,0),24 }, + { IPv4(212,98,234,0),24 }, + { IPv4(212,98,245,0),24 }, + { IPv4(212,98,252,0),24 }, + { IPv4(212,100,0,0),19 }, + { IPv4(212,100,64,0),24 }, + { IPv4(212,100,66,0),24 }, + { IPv4(212,100,67,0),24 }, + { IPv4(212,100,68,0),22 }, + { IPv4(212,100,96,0),24 }, + { IPv4(212,100,97,0),24 }, + { IPv4(212,100,98,0),24 }, + { IPv4(212,101,192,0),19 }, + { IPv4(212,102,67,0),24 }, + { IPv4(212,102,68,0),24 }, + { IPv4(212,102,192,0),20 }, + { IPv4(212,102,224,0),19 }, + { IPv4(212,103,32,0),19 }, + { IPv4(212,103,64,0),19 }, + { IPv4(212,104,64,0),21 }, + { IPv4(212,104,72,0),22 }, + { IPv4(212,104,76,0),22 }, + { IPv4(212,104,80,0),21 }, + { IPv4(212,105,128,0),19 }, + { IPv4(212,106,192,0),18 }, + { IPv4(212,107,0,0),19 }, + { IPv4(212,108,0,0),19 }, + { IPv4(212,108,8,0),21 }, + { IPv4(212,108,10,0),23 }, + { IPv4(212,108,14,0),23 }, + { IPv4(212,108,16,0),22 }, + { IPv4(212,108,20,0),22 }, + { IPv4(212,108,64,0),19 }, + { IPv4(212,108,128,0),20 }, + { IPv4(212,108,160,0),19 }, + { IPv4(212,109,224,0),19 }, + { IPv4(212,110,32,0),19 }, + { IPv4(212,110,67,0),24 }, + { IPv4(212,110,68,0),24 }, + { IPv4(212,110,69,0),24 }, + { IPv4(212,110,70,0),24 }, + { IPv4(212,110,71,0),24 }, + { IPv4(212,110,72,0),24 }, + { IPv4(212,110,73,0),24 }, + { IPv4(212,110,75,0),24 }, + { IPv4(212,110,76,0),24 }, + { IPv4(212,110,77,0),24 }, + { IPv4(212,110,78,0),24 }, + { IPv4(212,110,79,0),24 }, + { IPv4(212,110,80,0),24 }, + { IPv4(212,110,81,0),24 }, + { IPv4(212,110,82,0),24 }, + { IPv4(212,110,83,0),24 }, + { IPv4(212,110,84,0),24 }, + { IPv4(212,110,85,0),24 }, + { IPv4(212,110,91,0),24 }, + { IPv4(212,110,92,0),24 }, + { IPv4(212,110,93,0),24 }, + { IPv4(212,110,94,0),24 }, + { IPv4(212,110,95,0),24 }, + { IPv4(212,111,32,0),19 }, + { IPv4(212,113,32,0),19 }, + { IPv4(212,113,36,0),22 }, + { IPv4(212,113,40,0),22 }, + { IPv4(212,113,64,0),19 }, + { IPv4(212,113,192,0),19 }, + { IPv4(212,113,224,0),19 }, + { IPv4(212,114,96,0),19 }, + { IPv4(212,115,0,0),19 }, + { IPv4(212,115,32,0),19 }, + { IPv4(212,115,96,0),19 }, + { IPv4(212,115,192,0),19 }, + { IPv4(212,117,64,0),19 }, + { IPv4(212,118,64,0),19 }, + { IPv4(212,118,160,0),19 }, + { IPv4(212,120,32,0),19 }, + { IPv4(212,120,64,0),18 }, + { IPv4(212,120,224,0),20 }, + { IPv4(212,121,0,0),19 }, + { IPv4(212,122,96,0),19 }, + { IPv4(212,125,96,0),19 }, + { IPv4(212,126,128,0),19 }, + { IPv4(212,126,192,0),19 }, + { IPv4(212,127,0,0),19 }, + { IPv4(212,127,16,0),20 }, + { IPv4(212,127,21,0),24 }, + { IPv4(212,127,22,0),24 }, + { IPv4(212,127,23,0),24 }, + { IPv4(212,127,24,0),21 }, + { IPv4(212,127,32,0),19 }, + { IPv4(212,129,64,0),24 }, + { IPv4(212,132,0,0),16 }, + { IPv4(212,133,128,0),18 }, + { IPv4(212,140,0,0),16 }, + { IPv4(212,147,0,0),17 }, + { IPv4(212,148,236,0),24 }, + { IPv4(212,161,128,0),17 }, + { IPv4(212,162,64,0),18 }, + { IPv4(212,162,192,0),20 }, + { IPv4(212,162,195,0),24 }, + { IPv4(212,162,196,0),24 }, + { IPv4(212,162,200,0),22 }, + { IPv4(212,162,208,0),20 }, + { IPv4(212,162,216,0),22 }, + { IPv4(212,162,224,0),22 }, + { IPv4(212,162,228,0),22 }, + { IPv4(212,162,232,0),24 }, + { IPv4(212,162,235,0),24 }, + { IPv4(212,162,236,0),22 }, + { IPv4(212,162,240,0),20 }, + { IPv4(212,162,240,0),24 }, + { IPv4(212,163,0,0),16 }, + { IPv4(212,163,0,0),17 }, + { IPv4(212,163,35,0),24 }, + { IPv4(212,163,36,0),24 }, + { IPv4(212,163,200,0),22 }, + { IPv4(212,166,64,0),19 }, + { IPv4(212,166,96,0),19 }, + { IPv4(212,166,128,0),17 }, + { IPv4(212,168,0,0),16 }, + { IPv4(212,169,0,0),18 }, + { IPv4(212,173,0,0),16 }, + { IPv4(212,183,192,0),18 }, + { IPv4(212,188,128,0),19 }, + { IPv4(212,188,160,0),19 }, + { IPv4(212,188,176,0),20 }, + { IPv4(212,189,0,0),17 }, + { IPv4(212,189,128,0),17 }, + { IPv4(212,197,128,0),19 }, + { IPv4(212,203,0,0),19 }, + { IPv4(212,204,0,0),19 }, + { IPv4(212,204,128,0),18 }, + { IPv4(212,207,0,0),16 }, + { IPv4(212,211,128,0),17 }, + { IPv4(212,225,0,0),17 }, + { IPv4(212,228,0,0),15 }, + { IPv4(212,230,0,0),15 }, + { IPv4(212,236,0,0),16 }, + { IPv4(212,238,0,0),16 }, + { IPv4(212,239,0,0),17 }, + { IPv4(212,239,128,0),17 }, + { IPv4(212,240,0,0),16 }, + { IPv4(212,241,64,0),18 }, + { IPv4(212,250,0,0),16 }, + { IPv4(212,252,0,0),16 }, + { IPv4(212,252,0,0),15 }, + { IPv4(212,252,0,0),22 }, + { IPv4(212,252,4,0),22 }, + { IPv4(212,252,168,0),23 }, + { IPv4(212,252,172,0),22 }, + { IPv4(212,253,0,0),22 }, + { IPv4(212,253,0,0),17 }, + { IPv4(212,253,4,0),22 }, + { IPv4(212,253,128,0),18 }, + { IPv4(212,253,192,0),18 }, + { IPv4(213,1,0,0),16 }, + { IPv4(213,2,0,0),16 }, + { IPv4(213,2,216,0),21 }, + { IPv4(213,2,224,0),20 }, + { IPv4(213,2,224,0),19 }, + { IPv4(213,5,128,0),17 }, + { IPv4(213,9,128,0),17 }, + { IPv4(213,9,193,0),24 }, + { IPv4(213,10,0,0),16 }, + { IPv4(213,15,0,0),17 }, + { IPv4(213,15,128,0),18 }, + { IPv4(213,15,192,0),19 }, + { IPv4(213,15,224,0),19 }, + { IPv4(213,18,0,0),16 }, + { IPv4(213,31,0,0),16 }, + { IPv4(213,31,192,0),22 }, + { IPv4(213,33,76,0),24 }, + { IPv4(213,35,0,0),16 }, + { IPv4(213,35,96,0),19 }, + { IPv4(213,37,0,0),16 }, + { IPv4(213,43,0,0),16 }, + { IPv4(213,48,0,0),16 }, + { IPv4(213,51,0,0),16 }, + { IPv4(213,52,128,0),19 }, + { IPv4(213,52,128,0),17 }, + { IPv4(213,52,160,0),21 }, + { IPv4(213,52,192,0),21 }, + { IPv4(213,60,0,0),16 }, + { IPv4(213,63,0,0),17 }, + { IPv4(213,68,222,0),23 }, + { IPv4(213,69,21,0),24 }, + { IPv4(213,72,0,0),16 }, + { IPv4(213,74,0,0),16 }, + { IPv4(213,75,0,0),16 }, + { IPv4(213,78,10,0),23 }, + { IPv4(213,78,12,0),22 }, + { IPv4(213,78,16,0),20 }, + { IPv4(213,78,32,0),21 }, + { IPv4(213,78,40,0),22 }, + { IPv4(213,81,0,0),17 }, + { IPv4(213,83,0,0),18 }, + { IPv4(213,92,0,0),17 }, + { IPv4(213,94,0,0),18 }, + { IPv4(213,104,0,0),14 }, + { IPv4(213,109,0,0),16 }, + { IPv4(213,112,0,0),14 }, + { IPv4(213,120,0,0),14 }, + { IPv4(213,120,83,0),24 }, + { IPv4(213,120,89,0),24 }, + { IPv4(213,120,90,0),24 }, + { IPv4(213,121,50,0),24 }, + { IPv4(213,128,32,0),19 }, + { IPv4(213,128,128,0),19 }, + { IPv4(213,129,64,0),19 }, + { IPv4(213,130,32,0),19 }, + { IPv4(213,130,160,0),19 }, + { IPv4(213,133,32,0),19 }, + { IPv4(213,133,64,0),19 }, + { IPv4(213,136,0,0),19 }, + { IPv4(213,136,128,0),19 }, + { IPv4(213,137,0,0),20 }, + { IPv4(213,137,16,0),20 }, + { IPv4(213,137,64,0),19 }, + { IPv4(213,137,65,0),24 }, + { IPv4(213,137,71,0),24 }, + { IPv4(213,137,73,0),24 }, + { IPv4(213,137,79,0),24 }, + { IPv4(213,137,95,0),24 }, + { IPv4(213,137,128,0),22 }, + { IPv4(213,137,160,0),22 }, + { IPv4(213,137,164,0),22 }, + { IPv4(213,137,168,0),22 }, + { IPv4(213,137,176,0),22 }, + { IPv4(213,137,180,0),22 }, + { IPv4(213,137,184,0),22 }, + { IPv4(213,137,188,0),22 }, + { IPv4(213,137,192,0),19 }, + { IPv4(213,138,0,0),19 }, + { IPv4(213,138,160,0),19 }, + { IPv4(213,141,0,0),19 }, + { IPv4(213,141,64,0),19 }, + { IPv4(213,142,0,0),19 }, + { IPv4(213,142,128,0),19 }, + { IPv4(213,142,160,0),19 }, + { IPv4(213,143,0,0),19 }, + { IPv4(213,143,128,0),19 }, + { IPv4(213,144,32,0),19 }, + { IPv4(213,145,128,0),19 }, + { IPv4(213,147,0,0),19 }, + { IPv4(213,147,64,0),24 }, + { IPv4(213,147,65,0),24 }, + { IPv4(213,147,66,0),24 }, + { IPv4(213,147,160,0),19 }, + { IPv4(213,148,64,0),21 }, + { IPv4(213,153,128,0),17 }, + { IPv4(213,154,0,0),19 }, + { IPv4(213,154,147,0),24 }, + { IPv4(213,156,0,0),19 }, + { IPv4(213,156,68,0),24 }, + { IPv4(213,156,69,0),24 }, + { IPv4(213,156,70,0),24 }, + { IPv4(213,156,73,0),24 }, + { IPv4(213,157,64,0),19 }, + { IPv4(213,157,192,0),19 }, + { IPv4(213,160,32,0),19 }, + { IPv4(213,160,64,0),19 }, + { IPv4(213,160,192,0),19 }, + { IPv4(213,160,199,0),24 }, + { IPv4(213,161,64,0),19 }, + { IPv4(213,161,70,0),24 }, + { IPv4(213,161,128,0),20 }, + { IPv4(213,161,144,0),20 }, + { IPv4(213,161,192,0),19 }, + { IPv4(213,164,128,0),19 }, + { IPv4(213,166,14,0),24 }, + { IPv4(213,166,17,0),24 }, + { IPv4(213,166,20,0),24 }, + { IPv4(213,166,21,0),24 }, + { IPv4(213,166,22,0),24 }, + { IPv4(213,166,23,0),24 }, + { IPv4(213,166,24,0),24 }, + { IPv4(213,166,29,0),24 }, + { IPv4(213,166,70,0),24 }, + { IPv4(213,166,74,0),24 }, + { IPv4(213,166,75,0),24 }, + { IPv4(213,166,76,0),24 }, + { IPv4(213,166,77,0),24 }, + { IPv4(213,166,78,0),24 }, + { IPv4(213,166,79,0),24 }, + { IPv4(213,166,80,0),24 }, + { IPv4(213,167,160,0),19 }, + { IPv4(213,170,50,0),23 }, + { IPv4(213,170,192,0),19 }, + { IPv4(213,171,192,0),19 }, + { IPv4(213,172,32,0),19 }, + { IPv4(213,174,192,0),19 }, + { IPv4(213,177,160,0),21 }, + { IPv4(213,178,128,0),19 }, + { IPv4(213,178,160,0),19 }, + { IPv4(213,181,64,0),19 }, + { IPv4(213,182,0,0),19 }, + { IPv4(213,185,0,0),19 }, + { IPv4(213,185,64,0),19 }, + { IPv4(213,185,128,0),19 }, + { IPv4(213,185,192,0),19 }, + { IPv4(213,186,128,0),19 }, + { IPv4(213,190,0,0),19 }, + { IPv4(213,191,0,0),19 }, + { IPv4(213,193,64,0),18 }, + { IPv4(213,194,0,0),18 }, + { IPv4(213,199,144,0),20 }, + { IPv4(213,200,0,0),19 }, + { IPv4(213,200,1,0),24 }, + { IPv4(213,201,0,0),17 }, + { IPv4(213,204,64,0),18 }, + { IPv4(213,207,0,0),18 }, + { IPv4(213,207,64,0),18 }, + { IPv4(213,208,64,0),18 }, + { IPv4(213,209,128,0),19 }, + { IPv4(213,212,128,0),18 }, + { IPv4(213,213,0,0),18 }, + { IPv4(213,213,64,0),18 }, + { IPv4(213,217,0,0),19 }, + { IPv4(213,217,128,0),18 }, + { IPv4(213,218,0,0),19 }, + { IPv4(213,219,0,0),18 }, + { IPv4(213,219,62,0),24 }, + { IPv4(213,219,63,0),24 }, + { IPv4(213,221,64,0),18 }, + { IPv4(213,221,128,0),19 }, + { IPv4(213,222,0,0),19 }, + { IPv4(213,222,64,0),18 }, + { IPv4(213,226,64,0),18 }, + { IPv4(213,227,0,0),18 }, + { IPv4(213,231,128,0),19 }, + { IPv4(213,232,64,0),23 }, + { IPv4(213,232,112,0),24 }, + { IPv4(213,233,64,0),24 }, + { IPv4(213,233,65,0),24 }, + { IPv4(213,233,66,0),24 }, + { IPv4(213,233,68,0),24 }, + { IPv4(213,233,69,0),24 }, + { IPv4(213,233,71,0),24 }, + { IPv4(213,233,72,0),24 }, + { IPv4(213,233,73,0),24 }, + { IPv4(213,233,74,0),24 }, + { IPv4(213,233,75,0),24 }, + { IPv4(213,233,76,0),24 }, + { IPv4(213,233,77,0),24 }, + { IPv4(213,233,78,0),24 }, + { IPv4(213,233,79,0),24 }, + { IPv4(213,233,80,0),24 }, + { IPv4(213,233,81,0),24 }, + { IPv4(213,233,82,0),24 }, + { IPv4(213,233,83,0),24 }, + { IPv4(213,233,84,0),24 }, + { IPv4(213,233,85,0),24 }, + { IPv4(213,233,86,0),24 }, + { IPv4(213,233,96,0),24 }, + { IPv4(213,233,98,0),24 }, + { IPv4(213,233,99,0),24 }, + { IPv4(213,233,100,0),24 }, + { IPv4(213,233,101,0),24 }, + { IPv4(213,233,102,0),24 }, + { IPv4(213,233,103,0),24 }, + { IPv4(213,233,104,0),24 }, + { IPv4(213,233,105,0),24 }, + { IPv4(213,233,106,0),24 }, + { IPv4(213,233,107,0),24 }, + { IPv4(213,233,108,0),24 }, + { IPv4(213,233,109,0),24 }, + { IPv4(213,233,111,0),24 }, + { IPv4(213,233,122,0),24 }, + { IPv4(213,233,123,0),24 }, + { IPv4(213,233,124,0),24 }, + { IPv4(213,233,125,0),24 }, + { IPv4(213,233,126,0),24 }, + { IPv4(213,233,127,0),24 }, + { IPv4(213,236,0,0),19 }, + { IPv4(213,236,64,0),18 }, + { IPv4(213,239,42,0),24 }, + { IPv4(213,239,56,0),22 }, + { IPv4(213,239,60,0),24 }, + { IPv4(213,239,128,0),18 }, + { IPv4(213,243,0,0),19 }, + { IPv4(213,243,160,0),19 }, + { IPv4(213,244,124,0),22 }, + { IPv4(213,246,128,0),18 }, + { IPv4(213,249,0,0),18 }, + { IPv4(213,249,128,0),18 }, + { IPv4(213,251,64,0),18 }, + { IPv4(213,253,0,0),18 }, + { IPv4(213,253,128,0),18 }, + { IPv4(213,254,64,0),18 }, + { IPv4(213,254,160,0),19 }, + { IPv4(213,255,0,0),19 }, + { IPv4(213,255,0,0),18 }, + { IPv4(213,255,64,0),18 }, + { IPv4(214,0,0,0),8 }, + { IPv4(214,1,70,0),24 }, + { IPv4(214,3,0,0),24 }, + { IPv4(214,3,50,0),24 }, + { IPv4(214,3,153,0),24 }, + { IPv4(214,3,154,0),24 }, + { IPv4(215,0,0,0),9 }, + { IPv4(215,1,1,0),24 }, + { IPv4(215,1,2,0),24 }, + { IPv4(215,1,3,0),24 }, + { IPv4(215,1,4,0),24 }, + { IPv4(215,1,8,0),24 }, + { IPv4(215,1,9,0),24 }, + { IPv4(215,1,11,0),24 }, + { IPv4(215,1,12,0),24 }, + { IPv4(215,1,13,0),24 }, + { IPv4(215,1,14,0),24 }, + { IPv4(215,1,15,0),24 }, + { IPv4(215,1,33,0),24 }, + { IPv4(216,0,1,0),24 }, + { IPv4(216,1,56,0),21 }, + { IPv4(216,1,196,0),22 }, + { IPv4(216,2,32,0),21 }, + { IPv4(216,2,40,0),23 }, + { IPv4(216,3,78,0),24 }, + { IPv4(216,5,16,0),20 }, + { IPv4(216,5,92,0),23 }, + { IPv4(216,6,8,0),22 }, + { IPv4(216,7,0,0),19 }, + { IPv4(216,7,31,0),24 }, + { IPv4(216,7,32,0),21 }, + { IPv4(216,7,40,0),22 }, + { IPv4(216,7,44,0),22 }, + { IPv4(216,8,0,0),18 }, + { IPv4(216,8,20,0),22 }, + { IPv4(216,8,24,0),22 }, + { IPv4(216,8,32,0),21 }, + { IPv4(216,8,64,0),19 }, + { IPv4(216,8,70,0),23 }, + { IPv4(216,8,72,0),23 }, + { IPv4(216,9,160,0),24 }, + { IPv4(216,9,161,0),24 }, + { IPv4(216,9,162,0),24 }, + { IPv4(216,9,163,0),24 }, + { IPv4(216,9,164,0),24 }, + { IPv4(216,9,165,0),24 }, + { IPv4(216,9,166,0),24 }, + { IPv4(216,9,167,0),24 }, + { IPv4(216,9,168,0),24 }, + { IPv4(216,9,169,0),24 }, + { IPv4(216,9,170,0),24 }, + { IPv4(216,9,172,0),24 }, + { IPv4(216,9,173,0),24 }, + { IPv4(216,9,174,0),24 }, + { IPv4(216,9,175,0),24 }, + { IPv4(216,10,192,0),20 }, + { IPv4(216,10,208,0),20 }, + { IPv4(216,12,128,0),20 }, + { IPv4(216,13,0,0),16 }, + { IPv4(216,13,18,0),24 }, + { IPv4(216,14,10,0),24 }, + { IPv4(216,14,62,0),23 }, + { IPv4(216,15,0,0),17 }, + { IPv4(216,15,128,0),19 }, + { IPv4(216,15,160,0),19 }, + { IPv4(216,15,192,0),19 }, + { IPv4(216,15,224,0),19 }, + { IPv4(216,17,0,0),19 }, + { IPv4(216,17,32,0),19 }, + { IPv4(216,17,64,0),19 }, + { IPv4(216,17,76,0),24 }, + { IPv4(216,19,111,0),24 }, + { IPv4(216,20,160,0),19 }, + { IPv4(216,21,0,0),20 }, + { IPv4(216,21,160,0),24 }, + { IPv4(216,21,163,0),24 }, + { IPv4(216,21,165,0),24 }, + { IPv4(216,21,166,0),24 }, + { IPv4(216,21,168,0),24 }, + { IPv4(216,21,170,0),24 }, + { IPv4(216,21,171,0),24 }, + { IPv4(216,21,172,0),24 }, + { IPv4(216,21,173,0),24 }, + { IPv4(216,21,174,0),24 }, + { IPv4(216,21,175,0),24 }, + { IPv4(216,21,196,0),24 }, + { IPv4(216,21,201,0),24 }, + { IPv4(216,21,202,0),24 }, + { IPv4(216,21,206,0),23 }, + { IPv4(216,21,224,0),20 }, + { IPv4(216,21,224,0),22 }, + { IPv4(216,21,228,0),22 }, + { IPv4(216,21,232,0),24 }, + { IPv4(216,21,233,0),24 }, + { IPv4(216,21,235,0),24 }, + { IPv4(216,21,238,0),24 }, + { IPv4(216,21,239,0),24 }, + { IPv4(216,22,0,0),19 }, + { IPv4(216,22,32,0),20 }, + { IPv4(216,22,128,0),17 }, + { IPv4(216,23,128,0),19 }, + { IPv4(216,23,205,0),24 }, + { IPv4(216,23,224,0),21 }, + { IPv4(216,24,128,0),19 }, + { IPv4(216,25,0,0),17 }, + { IPv4(216,25,128,0),19 }, + { IPv4(216,25,192,0),21 }, + { IPv4(216,25,200,0),22 }, + { IPv4(216,25,204,0),24 }, + { IPv4(216,25,205,0),24 }, + { IPv4(216,25,206,0),24 }, + { IPv4(216,25,207,0),24 }, + { IPv4(216,26,80,0),20 }, + { IPv4(216,26,128,0),18 }, + { IPv4(216,26,152,0),21 }, + { IPv4(216,27,128,0),19 }, + { IPv4(216,27,176,0),20 }, + { IPv4(216,28,0,0),15 }, + { IPv4(216,28,0,0),21 }, + { IPv4(216,28,34,0),24 }, + { IPv4(216,28,38,0),24 }, + { IPv4(216,28,39,0),24 }, + { IPv4(216,28,46,0),24 }, + { IPv4(216,28,48,0),20 }, + { IPv4(216,28,64,0),22 }, + { IPv4(216,28,69,0),24 }, + { IPv4(216,28,70,0),23 }, + { IPv4(216,28,80,0),20 }, + { IPv4(216,28,98,0),24 }, + { IPv4(216,28,104,0),24 }, + { IPv4(216,28,106,0),24 }, + { IPv4(216,28,107,0),24 }, + { IPv4(216,28,109,0),24 }, + { IPv4(216,28,120,0),24 }, + { IPv4(216,28,131,0),24 }, + { IPv4(216,28,139,0),24 }, + { IPv4(216,28,151,0),24 }, + { IPv4(216,28,152,0),24 }, + { IPv4(216,28,184,0),24 }, + { IPv4(216,28,192,0),22 }, + { IPv4(216,28,223,0),24 }, + { IPv4(216,28,244,0),24 }, + { IPv4(216,28,249,0),24 }, + { IPv4(216,28,251,0),24 }, + { IPv4(216,29,2,0),24 }, + { IPv4(216,29,16,0),23 }, + { IPv4(216,29,52,0),23 }, + { IPv4(216,29,71,0),24 }, + { IPv4(216,29,78,0),23 }, + { IPv4(216,29,80,0),24 }, + { IPv4(216,29,88,0),24 }, + { IPv4(216,29,110,0),23 }, + { IPv4(216,29,153,0),24 }, + { IPv4(216,29,162,0),24 }, + { IPv4(216,29,164,0),24 }, + { IPv4(216,29,165,0),24 }, + { IPv4(216,29,166,0),24 }, + { IPv4(216,29,171,0),24 }, + { IPv4(216,29,184,0),24 }, + { IPv4(216,29,185,0),24 }, + { IPv4(216,29,216,0),24 }, + { IPv4(216,29,217,0),24 }, + { IPv4(216,29,240,0),24 }, + { IPv4(216,30,0,0),17 }, + { IPv4(216,30,128,0),20 }, + { IPv4(216,31,128,0),18 }, + { IPv4(216,32,10,0),23 }, + { IPv4(216,32,114,0),24 }, + { IPv4(216,32,120,0),24 }, + { IPv4(216,32,180,0),22 }, + { IPv4(216,32,240,0),22 }, + { IPv4(216,32,252,0),23 }, + { IPv4(216,33,9,0),24 }, + { IPv4(216,33,16,0),22 }, + { IPv4(216,33,60,0),23 }, + { IPv4(216,33,86,0),24 }, + { IPv4(216,33,148,0),22 }, + { IPv4(216,33,151,0),24 }, + { IPv4(216,33,156,0),23 }, + { IPv4(216,33,171,0),24 }, + { IPv4(216,33,236,0),22 }, + { IPv4(216,33,240,0),22 }, + { IPv4(216,33,244,0),22 }, + { IPv4(216,34,60,0),22 }, + { IPv4(216,34,60,0),23 }, + { IPv4(216,34,72,0),22 }, + { IPv4(216,35,59,0),24 }, + { IPv4(216,36,64,0),20 }, + { IPv4(216,36,128,0),20 }, + { IPv4(216,36,144,0),20 }, + { IPv4(216,36,160,0),20 }, + { IPv4(216,36,176,0),20 }, + { IPv4(216,37,96,0),22 }, + { IPv4(216,37,100,0),22 }, + { IPv4(216,37,110,0),23 }, + { IPv4(216,37,128,0),19 }, + { IPv4(216,37,160,0),19 }, + { IPv4(216,37,192,0),19 }, + { IPv4(216,37,224,0),19 }, + { IPv4(216,38,0,0),19 }, + { IPv4(216,38,32,0),20 }, + { IPv4(216,38,64,0),20 }, + { IPv4(216,38,96,0),20 }, + { IPv4(216,38,192,0),19 }, + { IPv4(216,38,224,0),23 }, + { IPv4(216,39,0,0),19 }, + { IPv4(216,39,224,0),20 }, + { IPv4(216,39,240,0),20 }, + { IPv4(216,39,240,0),24 }, + { IPv4(216,40,48,0),22 }, + { IPv4(216,40,52,0),22 }, + { IPv4(216,41,0,0),17 }, + { IPv4(216,41,0,0),20 }, + { IPv4(216,41,85,0),24 }, + { IPv4(216,41,106,0),24 }, + { IPv4(216,41,107,0),24 }, + { IPv4(216,42,0,0),16 }, + { IPv4(216,44,0,0),16 }, + { IPv4(216,46,96,0),19 }, + { IPv4(216,46,160,0),19 }, + { IPv4(216,47,0,0),20 }, + { IPv4(216,47,64,0),19 }, + { IPv4(216,47,106,0),24 }, + { IPv4(216,47,128,0),19 }, + { IPv4(216,47,160,0),19 }, + { IPv4(216,49,32,0),19 }, + { IPv4(216,49,80,0),20 }, + { IPv4(216,49,80,0),21 }, + { IPv4(216,49,88,0),21 }, + { IPv4(216,49,202,0),23 }, + { IPv4(216,49,204,0),23 }, + { IPv4(216,49,224,0),20 }, + { IPv4(216,50,0,0),16 }, + { IPv4(216,51,0,0),17 }, + { IPv4(216,51,128,0),18 }, + { IPv4(216,51,192,0),19 }, + { IPv4(216,52,0,0),19 }, + { IPv4(216,52,10,0),24 }, + { IPv4(216,52,11,0),24 }, + { IPv4(216,52,14,0),24 }, + { IPv4(216,52,15,0),24 }, + { IPv4(216,52,18,0),24 }, + { IPv4(216,52,23,0),24 }, + { IPv4(216,52,25,0),24 }, + { IPv4(216,52,28,0),24 }, + { IPv4(216,52,32,0),20 }, + { IPv4(216,52,45,0),24 }, + { IPv4(216,52,48,0),20 }, + { IPv4(216,52,50,0),24 }, + { IPv4(216,52,51,0),24 }, + { IPv4(216,52,54,0),24 }, + { IPv4(216,52,57,0),24 }, + { IPv4(216,52,69,0),24 }, + { IPv4(216,52,74,0),23 }, + { IPv4(216,52,80,0),21 }, + { IPv4(216,52,83,0),24 }, + { IPv4(216,52,84,0),24 }, + { IPv4(216,52,87,0),24 }, + { IPv4(216,52,96,0),20 }, + { IPv4(216,52,101,0),24 }, + { IPv4(216,52,105,0),24 }, + { IPv4(216,52,106,0),24 }, + { IPv4(216,52,107,0),24 }, + { IPv4(216,52,112,0),20 }, + { IPv4(216,52,116,0),24 }, + { IPv4(216,52,117,0),24 }, + { IPv4(216,52,123,0),24 }, + { IPv4(216,52,128,0),19 }, + { IPv4(216,52,133,0),24 }, + { IPv4(216,52,134,0),24 }, + { IPv4(216,52,136,0),24 }, + { IPv4(216,52,139,0),24 }, + { IPv4(216,52,146,0),23 }, + { IPv4(216,52,149,0),24 }, + { IPv4(216,52,152,0),24 }, + { IPv4(216,52,160,0),21 }, + { IPv4(216,52,168,0),21 }, + { IPv4(216,52,174,0),24 }, + { IPv4(216,52,176,0),20 }, + { IPv4(216,52,183,0),24 }, + { IPv4(216,52,185,0),24 }, + { IPv4(216,52,188,0),24 }, + { IPv4(216,52,192,0),20 }, + { IPv4(216,52,204,0),24 }, + { IPv4(216,52,208,0),20 }, + { IPv4(216,52,216,0),24 }, + { IPv4(216,52,224,0),24 }, + { IPv4(216,52,224,0),19 }, + { IPv4(216,52,225,0),24 }, + { IPv4(216,52,229,0),24 }, + { IPv4(216,52,236,0),24 }, + { IPv4(216,52,238,0),24 }, + { IPv4(216,52,241,0),24 }, + { IPv4(216,52,247,0),24 }, + { IPv4(216,52,249,0),24 }, + { IPv4(216,52,251,0),24 }, + { IPv4(216,53,0,0),18 }, + { IPv4(216,53,0,0),17 }, + { IPv4(216,53,50,0),24 }, + { IPv4(216,53,64,0),19 }, + { IPv4(216,53,71,0),24 }, + { IPv4(216,53,96,0),24 }, + { IPv4(216,53,99,0),24 }, + { IPv4(216,53,100,0),24 }, + { IPv4(216,54,0,0),18 }, + { IPv4(216,54,64,0),19 }, + { IPv4(216,54,96,0),19 }, + { IPv4(216,54,154,0),24 }, + { IPv4(216,54,192,0),22 }, + { IPv4(216,54,234,0),24 }, + { IPv4(216,54,235,0),24 }, + { IPv4(216,54,236,0),23 }, + { IPv4(216,54,238,0),24 }, + { IPv4(216,55,0,0),18 }, + { IPv4(216,57,0,0),18 }, + { IPv4(216,57,6,0),23 }, + { IPv4(216,57,8,0),21 }, + { IPv4(216,57,12,0),23 }, + { IPv4(216,57,14,0),23 }, + { IPv4(216,57,20,0),22 }, + { IPv4(216,57,26,0),23 }, + { IPv4(216,57,128,0),19 }, + { IPv4(216,57,144,0),24 }, + { IPv4(216,57,146,0),23 }, + { IPv4(216,57,192,0),20 }, + { IPv4(216,57,208,0),20 }, + { IPv4(216,58,0,0),17 }, + { IPv4(216,58,64,0),18 }, + { IPv4(216,58,128,0),19 }, + { IPv4(216,58,160,0),20 }, + { IPv4(216,59,93,0),24 }, + { IPv4(216,59,184,0),21 }, + { IPv4(216,59,192,0),19 }, + { IPv4(216,61,0,0),19 }, + { IPv4(216,63,144,0),21 }, + { IPv4(216,63,152,0),21 }, + { IPv4(216,63,158,0),24 }, + { IPv4(216,64,128,0),21 }, + { IPv4(216,64,132,0),24 }, + { IPv4(216,64,134,0),23 }, + { IPv4(216,64,136,0),21 }, + { IPv4(216,64,136,0),23 }, + { IPv4(216,64,138,0),23 }, + { IPv4(216,64,140,0),23 }, + { IPv4(216,64,142,0),23 }, + { IPv4(216,64,144,0),21 }, + { IPv4(216,64,145,0),24 }, + { IPv4(216,64,146,0),24 }, + { IPv4(216,64,147,0),24 }, + { IPv4(216,64,149,0),24 }, + { IPv4(216,64,152,0),22 }, + { IPv4(216,64,153,0),24 }, + { IPv4(216,64,154,0),24 }, + { IPv4(216,64,155,0),24 }, + { IPv4(216,64,156,0),22 }, + { IPv4(216,64,158,0),24 }, + { IPv4(216,64,159,0),24 }, + { IPv4(216,64,160,0),21 }, + { IPv4(216,64,161,0),24 }, + { IPv4(216,64,176,0),21 }, + { IPv4(216,64,178,0),24 }, + { IPv4(216,64,179,0),24 }, + { IPv4(216,64,180,0),24 }, + { IPv4(216,64,181,0),24 }, + { IPv4(216,64,184,0),21 }, + { IPv4(216,65,0,0),17 }, + { IPv4(216,66,0,0),19 }, + { IPv4(216,66,32,0),22 }, + { IPv4(216,66,32,0),19 }, + { IPv4(216,66,64,0),19 }, + { IPv4(216,66,74,0),23 }, + { IPv4(216,66,96,0),20 }, + { IPv4(216,67,192,0),20 }, + { IPv4(216,67,208,0),20 }, + { IPv4(216,68,0,0),16 }, + { IPv4(216,68,76,0),24 }, + { IPv4(216,68,77,0),24 }, + { IPv4(216,68,78,0),24 }, + { IPv4(216,68,84,0),24 }, + { IPv4(216,68,136,0),22 }, + { IPv4(216,68,140,0),22 }, + { IPv4(216,68,160,0),22 }, + { IPv4(216,68,164,0),22 }, + { IPv4(216,68,168,0),21 }, + { IPv4(216,68,176,0),21 }, + { IPv4(216,68,184,0),22 }, + { IPv4(216,69,96,0),24 }, + { IPv4(216,70,128,0),18 }, + { IPv4(216,70,163,0),24 }, + { IPv4(216,70,171,0),24 }, + { IPv4(216,70,172,0),24 }, + { IPv4(216,70,190,0),24 }, + { IPv4(216,70,224,0),19 }, + { IPv4(216,71,43,0),24 }, + { IPv4(216,71,53,0),24 }, + { IPv4(216,71,54,0),23 }, + { IPv4(216,72,10,0),24 }, + { IPv4(216,72,16,0),24 }, + { IPv4(216,72,17,0),24 }, + { IPv4(216,72,18,0),24 }, + { IPv4(216,72,19,0),24 }, + { IPv4(216,72,20,0),24 }, + { IPv4(216,72,95,0),24 }, + { IPv4(216,73,0,0),18 }, + { IPv4(216,73,5,0),24 }, + { IPv4(216,73,10,0),24 }, + { IPv4(216,73,11,0),24 }, + { IPv4(216,73,12,0),24 }, + { IPv4(216,73,13,0),24 }, + { IPv4(216,73,14,0),24 }, + { IPv4(216,73,15,0),24 }, + { IPv4(216,73,16,0),24 }, + { IPv4(216,73,17,0),24 }, + { IPv4(216,73,26,0),24 }, + { IPv4(216,73,96,0),20 }, + { IPv4(216,73,128,0),18 }, + { IPv4(216,73,176,0),20 }, + { IPv4(216,74,0,0),18 }, + { IPv4(216,75,64,0),19 }, + { IPv4(216,75,132,0),22 }, + { IPv4(216,75,136,0),24 }, + { IPv4(216,75,137,0),24 }, + { IPv4(216,79,170,0),24 }, + { IPv4(216,80,0,0),17 }, + { IPv4(216,80,38,0),24 }, + { IPv4(216,81,128,0),17 }, + { IPv4(216,82,80,0),24 }, + { IPv4(216,82,80,0),21 }, + { IPv4(216,82,104,0),22 }, + { IPv4(216,82,108,0),24 }, + { IPv4(216,82,113,0),24 }, + { IPv4(216,82,114,0),24 }, + { IPv4(216,82,114,0),23 }, + { IPv4(216,82,115,0),24 }, + { IPv4(216,82,116,0),23 }, + { IPv4(216,82,118,0),24 }, + { IPv4(216,82,123,0),24 }, + { IPv4(216,82,124,0),23 }, + { IPv4(216,82,126,0),24 }, + { IPv4(216,82,192,0),20 }, + { IPv4(216,82,224,0),24 }, + { IPv4(216,82,228,0),24 }, + { IPv4(216,82,229,0),24 }, + { IPv4(216,82,233,0),24 }, + { IPv4(216,82,236,0),24 }, + { IPv4(216,82,237,0),24 }, + { IPv4(216,82,238,0),24 }, + { IPv4(216,82,239,0),24 }, + { IPv4(216,83,0,0),19 }, + { IPv4(216,83,64,0),20 }, + { IPv4(216,83,160,0),19 }, + { IPv4(216,84,0,0),16 }, + { IPv4(216,84,78,0),24 }, + { IPv4(216,84,92,0),24 }, + { IPv4(216,84,93,0),24 }, + { IPv4(216,84,94,0),24 }, + { IPv4(216,84,218,0),24 }, + { IPv4(216,85,0,0),16 }, + { IPv4(216,85,126,0),24 }, + { IPv4(216,85,127,0),24 }, + { IPv4(216,85,228,0),24 }, + { IPv4(216,85,233,0),24 }, + { IPv4(216,85,234,0),23 }, + { IPv4(216,85,236,0),22 }, + { IPv4(216,85,240,0),20 }, + { IPv4(216,86,32,0),19 }, + { IPv4(216,86,64,0),19 }, + { IPv4(216,86,96,0),19 }, + { IPv4(216,86,128,0),20 }, + { IPv4(216,86,160,0),20 }, + { IPv4(216,86,224,0),20 }, + { IPv4(216,86,240,0),20 }, + { IPv4(216,87,64,0),19 }, + { IPv4(216,87,128,0),19 }, + { IPv4(216,87,192,0),20 }, + { IPv4(216,87,208,0),20 }, + { IPv4(216,88,200,0),21 }, + { IPv4(216,88,211,0),24 }, + { IPv4(216,89,68,0),22 }, + { IPv4(216,89,72,0),21 }, + { IPv4(216,89,80,0),21 }, + { IPv4(216,89,238,0),23 }, + { IPv4(216,89,244,0),23 }, + { IPv4(216,90,0,0),23 }, + { IPv4(216,90,40,0),22 }, + { IPv4(216,90,72,0),22 }, + { IPv4(216,90,228,0),22 }, + { IPv4(216,91,114,0),23 }, + { IPv4(216,91,116,0),22 }, + { IPv4(216,91,130,0),24 }, + { IPv4(216,92,0,0),16 }, + { IPv4(216,94,86,0),24 }, + { IPv4(216,94,112,0),24 }, + { IPv4(216,94,168,0),24 }, + { IPv4(216,94,179,0),24 }, + { IPv4(216,94,180,0),24 }, + { IPv4(216,96,128,0),18 }, + { IPv4(216,97,128,0),19 }, + { IPv4(216,98,32,0),20 }, + { IPv4(216,98,128,0),19 }, + { IPv4(216,98,160,0),20 }, + { IPv4(216,98,176,0),24 }, + { IPv4(216,98,177,0),24 }, + { IPv4(216,98,178,0),24 }, + { IPv4(216,98,179,0),24 }, + { IPv4(216,98,180,0),24 }, + { IPv4(216,98,181,0),24 }, + { IPv4(216,98,182,0),24 }, + { IPv4(216,98,183,0),24 }, + { IPv4(216,98,184,0),24 }, + { IPv4(216,98,185,0),24 }, + { IPv4(216,98,186,0),24 }, + { IPv4(216,98,187,0),24 }, + { IPv4(216,98,188,0),24 }, + { IPv4(216,98,189,0),24 }, + { IPv4(216,98,190,0),24 }, + { IPv4(216,98,191,0),24 }, + { IPv4(216,98,192,0),20 }, + { IPv4(216,99,32,0),19 }, + { IPv4(216,99,103,0),24 }, + { IPv4(216,99,128,0),21 }, + { IPv4(216,99,136,0),22 }, + { IPv4(216,99,224,0),19 }, + { IPv4(216,100,88,0),21 }, + { IPv4(216,100,96,0),19 }, + { IPv4(216,101,0,0),20 }, + { IPv4(216,101,95,0),24 }, + { IPv4(216,101,143,0),24 }, + { IPv4(216,101,254,0),24 }, + { IPv4(216,102,102,0),24 }, + { IPv4(216,102,182,0),24 }, + { IPv4(216,102,255,0),24 }, + { IPv4(216,103,160,0),20 }, + { IPv4(216,104,48,0),21 }, + { IPv4(216,104,96,0),19 }, + { IPv4(216,104,160,0),19 }, + { IPv4(216,105,0,0),19 }, + { IPv4(216,105,128,0),19 }, + { IPv4(216,105,160,0),20 }, + { IPv4(216,105,192,0),20 }, + { IPv4(216,106,192,0),22 }, + { IPv4(216,107,0,0),18 }, + { IPv4(216,108,192,0),20 }, + { IPv4(216,109,104,0),24 }, + { IPv4(216,109,128,0),19 }, + { IPv4(216,109,224,0),20 }, + { IPv4(216,109,240,0),20 }, + { IPv4(216,110,32,0),20 }, + { IPv4(216,110,128,0),18 }, + { IPv4(216,111,74,0),24 }, + { IPv4(216,111,138,0),24 }, + { IPv4(216,111,144,0),23 }, + { IPv4(216,111,166,0),23 }, + { IPv4(216,111,243,0),24 }, + { IPv4(216,112,0,0),16 }, + { IPv4(216,112,17,0),24 }, + { IPv4(216,112,28,0),24 }, + { IPv4(216,112,40,0),21 }, + { IPv4(216,112,52,0),24 }, + { IPv4(216,112,54,0),24 }, + { IPv4(216,112,56,0),24 }, + { IPv4(216,112,116,0),24 }, + { IPv4(216,112,126,0),23 }, + { IPv4(216,112,132,0),22 }, + { IPv4(216,112,152,0),24 }, + { IPv4(216,112,176,0),22 }, + { IPv4(216,112,188,0),22 }, + { IPv4(216,112,194,0),23 }, + { IPv4(216,112,196,0),23 }, + { IPv4(216,112,199,0),24 }, + { IPv4(216,112,240,0),22 }, + { IPv4(216,113,64,0),22 }, + { IPv4(216,113,128,0),19 }, + { IPv4(216,113,192,0),20 }, + { IPv4(216,114,128,0),18 }, + { IPv4(216,115,48,0),20 }, + { IPv4(216,115,128,0),19 }, + { IPv4(216,115,224,0),19 }, + { IPv4(216,116,64,0),20 }, + { IPv4(216,116,96,0),19 }, + { IPv4(216,116,160,0),19 }, + { IPv4(216,116,191,0),24 }, + { IPv4(216,117,76,0),24 }, + { IPv4(216,117,98,0),23 }, + { IPv4(216,117,128,0),18 }, + { IPv4(216,118,64,0),18 }, + { IPv4(216,118,118,0),24 }, + { IPv4(216,118,196,0),22 }, + { IPv4(216,119,96,0),19 }, + { IPv4(216,119,192,0),20 }, + { IPv4(216,120,0,0),17 }, + { IPv4(216,120,4,0),23 }, + { IPv4(216,120,16,0),23 }, + { IPv4(216,120,128,0),20 }, + { IPv4(216,120,144,0),20 }, + { IPv4(216,120,160,0),21 }, + { IPv4(216,120,168,0),24 }, + { IPv4(216,120,169,0),24 }, + { IPv4(216,120,170,0),24 }, + { IPv4(216,120,171,0),24 }, + { IPv4(216,120,172,0),22 }, + { IPv4(216,120,176,0),20 }, + { IPv4(216,120,192,0),21 }, + { IPv4(216,120,204,0),22 }, + { IPv4(216,121,224,0),19 }, + { IPv4(216,123,0,0),18 }, + { IPv4(216,123,0,0),19 }, + { IPv4(216,123,0,0),17 }, + { IPv4(216,123,8,0),22 }, + { IPv4(216,123,20,0),24 }, + { IPv4(216,123,31,0),24 }, + { IPv4(216,123,32,0),20 }, + { IPv4(216,123,40,0),24 }, + { IPv4(216,123,48,0),21 }, + { IPv4(216,123,56,0),24 }, + { IPv4(216,123,56,0),21 }, + { IPv4(216,123,57,0),24 }, + { IPv4(216,123,64,0),19 }, + { IPv4(216,123,80,0),20 }, + { IPv4(216,123,85,0),24 }, + { IPv4(216,123,101,0),24 }, + { IPv4(216,123,102,0),24 }, + { IPv4(216,123,104,0),24 }, + { IPv4(216,123,105,0),24 }, + { IPv4(216,123,107,0),24 }, + { IPv4(216,123,108,0),24 }, + { IPv4(216,123,118,0),24 }, + { IPv4(216,123,119,0),24 }, + { IPv4(216,123,120,0),24 }, + { IPv4(216,123,121,0),24 }, + { IPv4(216,123,122,0),24 }, + { IPv4(216,123,128,0),18 }, + { IPv4(216,124,0,0),16 }, + { IPv4(216,124,160,0),20 }, + { IPv4(216,124,208,0),21 }, + { IPv4(216,125,0,0),16 }, + { IPv4(216,125,56,0),21 }, + { IPv4(216,126,0,0),19 }, + { IPv4(216,127,0,0),19 }, + { IPv4(216,127,128,0),22 }, + { IPv4(216,127,128,0),19 }, + { IPv4(216,127,224,0),22 }, + { IPv4(216,129,0,0),18 }, + { IPv4(216,129,1,0),24 }, + { IPv4(216,129,33,0),24 }, + { IPv4(216,129,56,0),23 }, + { IPv4(216,129,64,0),19 }, + { IPv4(216,129,192,0),19 }, + { IPv4(216,130,16,0),20 }, + { IPv4(216,130,128,0),19 }, + { IPv4(216,131,64,0),19 }, + { IPv4(216,131,80,0),20 }, + { IPv4(216,132,36,0),24 }, + { IPv4(216,132,36,0),23 }, + { IPv4(216,132,96,0),22 }, + { IPv4(216,133,17,0),24 }, + { IPv4(216,135,128,0),17 }, + { IPv4(216,135,144,0),22 }, + { IPv4(216,135,152,0),21 }, + { IPv4(216,135,160,0),20 }, + { IPv4(216,135,196,0),22 }, + { IPv4(216,136,8,0),24 }, + { IPv4(216,136,57,0),24 }, + { IPv4(216,136,85,0),24 }, + { IPv4(216,136,96,0),19 }, + { IPv4(216,136,116,0),23 }, + { IPv4(216,136,154,0),23 }, + { IPv4(216,136,192,0),21 }, + { IPv4(216,136,194,0),23 }, + { IPv4(216,136,199,0),24 }, + { IPv4(216,137,0,0),19 }, + { IPv4(216,137,8,0),21 }, + { IPv4(216,137,16,0),21 }, + { IPv4(216,137,36,0),22 }, + { IPv4(216,137,144,0),20 }, + { IPv4(216,137,192,0),18 }, + { IPv4(216,138,192,0),19 }, + { IPv4(216,138,224,0),19 }, + { IPv4(216,139,96,0),20 }, + { IPv4(216,139,128,0),19 }, + { IPv4(216,140,0,0),14 }, + { IPv4(216,140,57,0),24 }, + { IPv4(216,140,58,0),23 }, + { IPv4(216,140,128,0),18 }, + { IPv4(216,140,178,0),23 }, + { IPv4(216,140,180,0),24 }, + { IPv4(216,140,203,0),24 }, + { IPv4(216,141,0,0),18 }, + { IPv4(216,141,0,0),19 }, + { IPv4(216,141,24,0),23 }, + { IPv4(216,141,60,0),23 }, + { IPv4(216,141,64,0),20 }, + { IPv4(216,141,82,0),23 }, + { IPv4(216,141,86,0),23 }, + { IPv4(216,141,88,0),23 }, + { IPv4(216,141,234,0),23 }, + { IPv4(216,142,16,0),20 }, + { IPv4(216,142,32,0),22 }, + { IPv4(216,142,36,0),22 }, + { IPv4(216,142,48,0),21 }, + { IPv4(216,142,56,0),22 }, + { IPv4(216,142,92,0),24 }, + { IPv4(216,142,133,0),25 }, + { IPv4(216,142,137,0),24 }, + { IPv4(216,142,156,0),22 }, + { IPv4(216,142,172,0),22 }, + { IPv4(216,142,176,0),22 }, + { IPv4(216,142,188,0),23 }, + { IPv4(216,142,200,0),21 }, + { IPv4(216,142,208,0),20 }, + { IPv4(216,142,224,0),22 }, + { IPv4(216,142,228,0),22 }, + { IPv4(216,142,236,0),22 }, + { IPv4(216,142,240,0),21 }, + { IPv4(216,142,244,0),22 }, + { IPv4(216,142,248,0),22 }, + { IPv4(216,142,248,0),21 }, + { IPv4(216,142,252,0),22 }, + { IPv4(216,143,0,0),22 }, + { IPv4(216,143,4,0),22 }, + { IPv4(216,143,8,0),21 }, + { IPv4(216,143,16,0),22 }, + { IPv4(216,143,24,0),22 }, + { IPv4(216,143,76,0),22 }, + { IPv4(216,143,90,0),24 }, + { IPv4(216,143,120,0),22 }, + { IPv4(216,143,134,0),24 }, + { IPv4(216,143,138,0),23 }, + { IPv4(216,143,140,0),22 }, + { IPv4(216,143,160,0),21 }, + { IPv4(216,143,172,0),22 }, + { IPv4(216,143,224,0),22 }, + { IPv4(216,143,238,0),23 }, + { IPv4(216,143,240,0),23 }, + { IPv4(216,143,244,0),22 }, + { IPv4(216,144,128,0),19 }, + { IPv4(216,144,160,0),19 }, + { IPv4(216,145,28,0),24 }, + { IPv4(216,145,32,0),20 }, + { IPv4(216,145,130,0),24 }, + { IPv4(216,145,131,0),24 }, + { IPv4(216,145,132,0),24 }, + { IPv4(216,145,133,0),24 }, + { IPv4(216,145,134,0),24 }, + { IPv4(216,145,135,0),24 }, + { IPv4(216,146,36,0),24 }, + { IPv4(216,146,37,0),24 }, + { IPv4(216,146,38,0),24 }, + { IPv4(216,146,39,0),24 }, + { IPv4(216,146,40,0),24 }, + { IPv4(216,146,41,0),24 }, + { IPv4(216,146,42,0),24 }, + { IPv4(216,146,43,0),24 }, + { IPv4(216,146,44,0),24 }, + { IPv4(216,146,45,0),24 }, + { IPv4(216,146,46,0),23 }, + { IPv4(216,146,131,0),24 }, + { IPv4(216,146,132,0),24 }, + { IPv4(216,146,134,0),24 }, + { IPv4(216,146,140,0),24 }, + { IPv4(216,146,141,0),24 }, + { IPv4(216,146,142,0),24 }, + { IPv4(216,146,143,0),24 }, + { IPv4(216,146,150,0),24 }, + { IPv4(216,146,176,0),24 }, + { IPv4(216,146,179,0),24 }, + { IPv4(216,146,192,0),19 }, + { IPv4(216,147,136,0),24 }, + { IPv4(216,147,137,0),24 }, + { IPv4(216,147,141,0),24 }, + { IPv4(216,148,0,0),16 }, + { IPv4(216,148,4,0),24 }, + { IPv4(216,148,5,0),24 }, + { IPv4(216,148,6,0),24 }, + { IPv4(216,148,7,0),24 }, + { IPv4(216,148,40,0),24 }, + { IPv4(216,148,47,0),24 }, + { IPv4(216,148,50,0),24 }, + { IPv4(216,148,78,0),24 }, + { IPv4(216,148,79,0),24 }, + { IPv4(216,148,88,0),23 }, + { IPv4(216,148,90,0),24 }, + { IPv4(216,148,91,0),24 }, + { IPv4(216,148,92,0),24 }, + { IPv4(216,148,93,0),24 }, + { IPv4(216,148,94,0),24 }, + { IPv4(216,148,95,0),24 }, + { IPv4(216,148,101,0),24 }, + { IPv4(216,148,104,0),23 }, + { IPv4(216,148,106,0),24 }, + { IPv4(216,148,128,0),24 }, + { IPv4(216,148,130,0),24 }, + { IPv4(216,148,164,0),23 }, + { IPv4(216,148,208,0),20 }, + { IPv4(216,148,224,0),22 }, + { IPv4(216,148,224,0),19 }, + { IPv4(216,149,0,0),16 }, + { IPv4(216,149,0,0),19 }, + { IPv4(216,149,32,0),19 }, + { IPv4(216,149,64,0),19 }, + { IPv4(216,149,96,0),19 }, + { IPv4(216,149,128,0),19 }, + { IPv4(216,149,160,0),19 }, + { IPv4(216,149,192,0),19 }, + { IPv4(216,150,0,0),19 }, + { IPv4(216,150,96,0),20 }, + { IPv4(216,150,128,0),19 }, + { IPv4(216,150,192,0),19 }, + { IPv4(216,151,0,0),19 }, + { IPv4(216,151,18,0),23 }, + { IPv4(216,151,64,0),18 }, + { IPv4(216,151,82,0),23 }, + { IPv4(216,151,84,0),24 }, + { IPv4(216,151,85,0),24 }, + { IPv4(216,151,128,0),18 }, + { IPv4(216,151,192,0),19 }, + { IPv4(216,152,0,0),18 }, + { IPv4(216,152,64,0),20 }, + { IPv4(216,153,0,0),17 }, + { IPv4(216,156,0,0),16 }, + { IPv4(216,157,40,0),21 }, + { IPv4(216,157,48,0),21 }, + { IPv4(216,157,64,0),21 }, + { IPv4(216,157,72,0),21 }, + { IPv4(216,157,88,0),21 }, + { IPv4(216,157,96,0),21 }, + { IPv4(216,157,104,0),21 }, + { IPv4(216,158,0,0),18 }, + { IPv4(216,158,64,0),24 }, + { IPv4(216,158,65,0),24 }, + { IPv4(216,158,66,0),24 }, + { IPv4(216,158,72,0),24 }, + { IPv4(216,158,74,0),24 }, + { IPv4(216,158,75,0),24 }, + { IPv4(216,158,78,0),24 }, + { IPv4(216,158,80,0),24 }, + { IPv4(216,158,81,0),24 }, + { IPv4(216,158,82,0),24 }, + { IPv4(216,158,84,0),24 }, + { IPv4(216,158,89,0),24 }, + { IPv4(216,158,90,0),24 }, + { IPv4(216,158,91,0),24 }, + { IPv4(216,158,92,0),24 }, + { IPv4(216,158,128,0),19 }, + { IPv4(216,159,0,0),17 }, + { IPv4(216,159,128,0),18 }, + { IPv4(216,159,130,0),23 }, + { IPv4(216,159,132,0),23 }, + { IPv4(216,160,0,0),15 }, + { IPv4(216,160,229,0),24 }, + { IPv4(216,161,196,0),22 }, + { IPv4(216,162,32,0),21 }, + { IPv4(216,162,40,0),22 }, + { IPv4(216,162,44,0),23 }, + { IPv4(216,162,46,0),24 }, + { IPv4(216,162,47,0),24 }, + { IPv4(216,162,96,0),19 }, + { IPv4(216,162,128,0),20 }, + { IPv4(216,163,32,0),20 }, + { IPv4(216,163,48,0),20 }, + { IPv4(216,163,64,0),19 }, + { IPv4(216,163,96,0),19 }, + { IPv4(216,163,102,0),24 }, + { IPv4(216,163,103,0),24 }, + { IPv4(216,163,112,0),24 }, + { IPv4(216,163,113,0),24 }, + { IPv4(216,163,114,0),24 }, + { IPv4(216,163,117,0),24 }, + { IPv4(216,163,120,0),23 }, + { IPv4(216,163,122,0),24 }, + { IPv4(216,163,123,0),24 }, + { IPv4(216,163,124,0),24 }, + { IPv4(216,163,125,0),24 }, + { IPv4(216,163,126,0),24 }, + { IPv4(216,163,160,0),20 }, + { IPv4(216,163,176,0),21 }, + { IPv4(216,163,176,0),20 }, + { IPv4(216,163,184,0),21 }, + { IPv4(216,163,192,0),20 }, + { IPv4(216,163,205,0),24 }, + { IPv4(216,163,208,0),20 }, + { IPv4(216,163,248,0),21 }, + { IPv4(216,164,0,0),16 }, + { IPv4(216,165,0,0),17 }, + { IPv4(216,165,192,0),19 }, + { IPv4(216,166,0,0),17 }, + { IPv4(216,166,128,0),18 }, + { IPv4(216,167,0,0),17 }, + { IPv4(216,167,156,0),23 }, + { IPv4(216,167,192,0),20 }, + { IPv4(216,168,136,0),24 }, + { IPv4(216,168,137,0),24 }, + { IPv4(216,168,160,0),24 }, + { IPv4(216,168,161,0),24 }, + { IPv4(216,168,162,0),23 }, + { IPv4(216,168,164,0),22 }, + { IPv4(216,168,168,0),22 }, + { IPv4(216,168,192,0),19 }, + { IPv4(216,168,224,0),19 }, + { IPv4(216,168,252,0),24 }, + { IPv4(216,168,253,0),24 }, + { IPv4(216,168,254,0),24 }, + { IPv4(216,169,32,0),19 }, + { IPv4(216,169,136,0),24 }, + { IPv4(216,169,144,0),20 }, + { IPv4(216,169,160,0),19 }, + { IPv4(216,169,242,0),23 }, + { IPv4(216,170,64,0),19 }, + { IPv4(216,170,188,0),22 }, + { IPv4(216,171,42,0),23 }, + { IPv4(216,171,64,0),20 }, + { IPv4(216,171,128,0),19 }, + { IPv4(216,171,141,0),24 }, + { IPv4(216,172,36,0),24 }, + { IPv4(216,172,76,0),24 }, + { IPv4(216,172,110,0),24 }, + { IPv4(216,172,111,0),24 }, + { IPv4(216,172,198,0),24 }, + { IPv4(216,172,199,0),24 }, + { IPv4(216,173,0,0),18 }, + { IPv4(216,173,16,0),23 }, + { IPv4(216,173,128,0),19 }, + { IPv4(216,173,136,0),24 }, + { IPv4(216,174,64,0),20 }, + { IPv4(216,174,80,0),21 }, + { IPv4(216,174,88,0),22 }, + { IPv4(216,174,93,0),24 }, + { IPv4(216,174,94,0),23 }, + { IPv4(216,174,96,0),20 }, + { IPv4(216,174,116,0),22 }, + { IPv4(216,174,120,0),21 }, + { IPv4(216,174,160,0),22 }, + { IPv4(216,174,164,0),22 }, + { IPv4(216,174,168,0),23 }, + { IPv4(216,174,192,0),18 }, + { IPv4(216,174,228,0),22 }, + { IPv4(216,175,40,0),21 }, + { IPv4(216,175,41,0),24 }, + { IPv4(216,175,48,0),20 }, + { IPv4(216,175,56,0),22 }, + { IPv4(216,175,60,0),23 }, + { IPv4(216,175,62,0),24 }, + { IPv4(216,175,63,0),24 }, + { IPv4(216,175,80,0),20 }, + { IPv4(216,175,96,0),19 }, + { IPv4(216,176,160,0),20 }, + { IPv4(216,176,168,0),24 }, + { IPv4(216,176,169,0),24 }, + { IPv4(216,176,170,0),24 }, + { IPv4(216,176,171,0),24 }, + { IPv4(216,176,224,0),22 }, + { IPv4(216,176,224,0),20 }, + { IPv4(216,176,232,0),22 }, + { IPv4(216,176,236,0),23 }, + { IPv4(216,176,239,0),24 }, + { IPv4(216,177,0,0),19 }, + { IPv4(216,177,160,0),19 }, + { IPv4(216,178,0,0),19 }, + { IPv4(216,178,0,0),24 }, + { IPv4(216,178,5,0),24 }, + { IPv4(216,178,64,0),19 }, + { IPv4(216,178,98,0),23 }, + { IPv4(216,178,100,0),23 }, + { IPv4(216,178,105,0),24 }, + { IPv4(216,178,106,0),23 }, + { IPv4(216,178,108,0),24 }, + { IPv4(216,178,113,0),24 }, + { IPv4(216,178,140,0),22 }, + { IPv4(216,179,0,0),18 }, + { IPv4(216,179,0,0),19 }, + { IPv4(216,179,12,0),22 }, + { IPv4(216,179,32,0),19 }, + { IPv4(216,179,64,0),19 }, + { IPv4(216,179,128,0),19 }, + { IPv4(216,179,139,0),24 }, + { IPv4(216,179,160,0),19 }, + { IPv4(216,179,192,0),18 }, + { IPv4(216,180,0,0),17 }, + { IPv4(216,180,112,0),20 }, + { IPv4(216,180,128,0),19 }, + { IPv4(216,181,0,0),16 }, + { IPv4(216,183,32,0),20 }, + { IPv4(216,183,96,0),22 }, + { IPv4(216,183,100,0),23 }, + { IPv4(216,183,102,0),24 }, + { IPv4(216,183,104,0),23 }, + { IPv4(216,183,106,0),24 }, + { IPv4(216,183,107,0),24 }, + { IPv4(216,183,108,0),23 }, + { IPv4(216,183,110,0),23 }, + { IPv4(216,183,114,0),23 }, + { IPv4(216,183,116,0),24 }, + { IPv4(216,183,117,0),24 }, + { IPv4(216,183,118,0),24 }, + { IPv4(216,183,119,0),24 }, + { IPv4(216,183,120,0),22 }, + { IPv4(216,183,124,0),24 }, + { IPv4(216,183,125,0),24 }, + { IPv4(216,183,126,0),23 }, + { IPv4(216,184,64,0),19 }, + { IPv4(216,185,32,0),19 }, + { IPv4(216,185,64,0),20 }, + { IPv4(216,185,64,0),19 }, + { IPv4(216,185,80,0),22 }, + { IPv4(216,185,84,0),22 }, + { IPv4(216,185,88,0),21 }, + { IPv4(216,185,88,0),22 }, + { IPv4(216,185,92,0),22 }, + { IPv4(216,185,96,0),20 }, + { IPv4(216,185,96,0),19 }, + { IPv4(216,185,112,0),20 }, + { IPv4(216,185,192,0),20 }, + { IPv4(216,187,64,0),23 }, + { IPv4(216,187,66,0),23 }, + { IPv4(216,187,68,0),23 }, + { IPv4(216,187,70,0),23 }, + { IPv4(216,187,72,0),21 }, + { IPv4(216,187,76,0),23 }, + { IPv4(216,187,80,0),22 }, + { IPv4(216,187,84,0),22 }, + { IPv4(216,187,89,0),24 }, + { IPv4(216,187,90,0),24 }, + { IPv4(216,187,91,0),24 }, + { IPv4(216,187,92,0),22 }, + { IPv4(216,187,96,0),21 }, + { IPv4(216,187,104,0),22 }, + { IPv4(216,187,108,0),22 }, + { IPv4(216,187,112,0),23 }, + { IPv4(216,187,114,0),24 }, + { IPv4(216,187,115,0),24 }, + { IPv4(216,187,116,0),22 }, + { IPv4(216,187,120,0),24 }, + { IPv4(216,187,122,0),23 }, + { IPv4(216,187,124,0),23 }, + { IPv4(216,187,126,0),23 }, + { IPv4(216,188,0,0),17 }, + { IPv4(216,188,36,0),24 }, + { IPv4(216,188,76,0),24 }, + { IPv4(216,188,128,0),18 }, + { IPv4(216,189,22,0),23 }, + { IPv4(216,189,26,0),23 }, + { IPv4(216,189,160,0),24 }, + { IPv4(216,189,160,0),20 }, + { IPv4(216,189,192,0),20 }, + { IPv4(216,190,24,0),21 }, + { IPv4(216,190,80,0),24 }, + { IPv4(216,190,81,0),24 }, + { IPv4(216,190,82,0),24 }, + { IPv4(216,190,83,0),24 }, + { IPv4(216,190,84,0),24 }, + { IPv4(216,190,85,0),24 }, + { IPv4(216,190,86,0),24 }, + { IPv4(216,190,87,0),24 }, + { IPv4(216,190,140,0),22 }, + { IPv4(216,190,152,0),24 }, + { IPv4(216,190,153,0),24 }, + { IPv4(216,190,164,0),23 }, + { IPv4(216,190,200,0),24 }, + { IPv4(216,190,240,0),21 }, + { IPv4(216,191,0,0),16 }, + { IPv4(216,191,76,0),24 }, + { IPv4(216,191,77,0),24 }, + { IPv4(216,194,0,0),19 }, + { IPv4(216,194,192,0),19 }, + { IPv4(216,195,0,0),19 }, + { IPv4(216,196,0,0),24 }, + { IPv4(216,196,0,0),18 }, + { IPv4(216,196,35,0),24 }, + { IPv4(216,196,128,0),21 }, + { IPv4(216,196,128,0),17 }, + { IPv4(216,196,128,0),18 }, + { IPv4(216,196,136,0),21 }, + { IPv4(216,196,144,0),21 }, + { IPv4(216,196,152,0),21 }, + { IPv4(216,196,160,0),21 }, + { IPv4(216,196,168,0),21 }, + { IPv4(216,196,192,0),18 }, + { IPv4(216,196,224,0),22 }, + { IPv4(216,196,228,0),24 }, + { IPv4(216,197,128,0),19 }, + { IPv4(216,198,73,0),24 }, + { IPv4(216,198,96,0),20 }, + { IPv4(216,198,96,0),24 }, + { IPv4(216,198,98,0),24 }, + { IPv4(216,198,107,0),24 }, + { IPv4(216,198,110,0),24 }, + { IPv4(216,198,111,0),24 }, + { IPv4(216,198,112,0),24 }, + { IPv4(216,198,113,0),24 }, + { IPv4(216,198,114,0),24 }, + { IPv4(216,198,115,0),24 }, + { IPv4(216,198,117,0),24 }, + { IPv4(216,198,192,0),19 }, + { IPv4(216,198,224,0),19 }, + { IPv4(216,200,0,0),16 }, + { IPv4(216,200,25,0),24 }, + { IPv4(216,200,68,0),22 }, + { IPv4(216,200,72,0),21 }, + { IPv4(216,200,80,0),22 }, + { IPv4(216,200,160,0),20 }, + { IPv4(216,200,206,0),24 }, + { IPv4(216,200,246,0),24 }, + { IPv4(216,200,247,0),24 }, + { IPv4(216,201,0,0),18 }, + { IPv4(216,201,128,0),18 }, + { IPv4(216,201,192,0),19 }, + { IPv4(216,201,224,0),20 }, + { IPv4(216,202,3,0),24 }, + { IPv4(216,202,4,0),24 }, + { IPv4(216,202,5,0),24 }, + { IPv4(216,202,92,0),24 }, + { IPv4(216,202,93,0),24 }, + { IPv4(216,202,104,0),22 }, + { IPv4(216,203,0,0),18 }, + { IPv4(216,203,128,0),17 }, + { IPv4(216,205,192,0),20 }, + { IPv4(216,206,17,0),24 }, + { IPv4(216,206,18,0),23 }, + { IPv4(216,206,24,0),24 }, + { IPv4(216,206,41,0),24 }, + { IPv4(216,206,52,0),24 }, + { IPv4(216,206,80,0),23 }, + { IPv4(216,206,96,0),22 }, + { IPv4(216,206,100,0),24 }, + { IPv4(216,206,158,0),24 }, + { IPv4(216,206,203,0),24 }, + { IPv4(216,206,210,0),24 }, + { IPv4(216,206,215,0),24 }, + { IPv4(216,207,45,0),24 }, + { IPv4(216,207,72,0),21 }, + { IPv4(216,207,146,0),23 }, + { IPv4(216,207,212,0),23 }, + { IPv4(216,207,214,0),23 }, + { IPv4(216,207,252,0),22 }, + { IPv4(216,208,175,0),24 }, + { IPv4(216,208,176,0),24 }, + { IPv4(216,210,96,0),20 }, + { IPv4(216,210,128,0),17 }, + { IPv4(216,211,0,0),17 }, + { IPv4(216,211,0,0),18 }, + { IPv4(216,211,64,0),20 }, + { IPv4(216,211,80,0),20 }, + { IPv4(216,211,96,0),20 }, + { IPv4(216,211,112,0),20 }, + { IPv4(216,211,224,0),22 }, + { IPv4(216,211,228,0),22 }, + { IPv4(216,211,232,0),22 }, + { IPv4(216,211,236,0),22 }, + { IPv4(216,214,12,0),22 }, + { IPv4(216,216,0,0),15 }, + { IPv4(216,216,7,0),24 }, + { IPv4(216,216,23,0),24 }, + { IPv4(216,216,127,0),24 }, + { IPv4(216,216,164,0),24 }, + { IPv4(216,216,204,0),22 }, + { IPv4(216,216,224,0),22 }, + { IPv4(216,216,232,0),22 }, + { IPv4(216,216,239,0),24 }, + { IPv4(216,216,254,0),23 }, + { IPv4(216,217,8,0),24 }, + { IPv4(216,217,88,0),24 }, + { IPv4(216,217,112,0),20 }, + { IPv4(216,217,129,0),24 }, + { IPv4(216,217,168,0),24 }, + { IPv4(216,217,169,0),24 }, + { IPv4(216,217,170,0),24 }, + { IPv4(216,217,171,0),24 }, + { IPv4(216,217,172,0),24 }, + { IPv4(216,217,173,0),24 }, + { IPv4(216,217,174,0),24 }, + { IPv4(216,217,175,0),24 }, + { IPv4(216,217,185,0),24 }, + { IPv4(216,217,204,0),24 }, + { IPv4(216,217,222,0),24 }, + { IPv4(216,218,64,0),19 }, + { IPv4(216,218,128,0),17 }, + { IPv4(216,218,207,0),24 }, + { IPv4(216,219,128,0),17 }, + { IPv4(216,220,32,0),20 }, + { IPv4(216,220,46,0),24 }, + { IPv4(216,220,64,0),20 }, + { IPv4(216,220,128,0),19 }, + { IPv4(216,220,140,0),23 }, + { IPv4(216,220,142,0),23 }, + { IPv4(216,220,144,0),23 }, + { IPv4(216,220,160,0),20 }, + { IPv4(216,220,176,0),20 }, + { IPv4(216,220,192,0),20 }, + { IPv4(216,220,224,0),19 }, + { IPv4(216,221,32,0),24 }, + { IPv4(216,221,33,0),24 }, + { IPv4(216,221,34,0),24 }, + { IPv4(216,221,35,0),24 }, + { IPv4(216,221,36,0),24 }, + { IPv4(216,221,37,0),24 }, + { IPv4(216,221,38,0),24 }, + { IPv4(216,221,39,0),24 }, + { IPv4(216,221,40,0),24 }, + { IPv4(216,221,41,0),24 }, + { IPv4(216,221,42,0),23 }, + { IPv4(216,221,44,0),24 }, + { IPv4(216,221,45,0),24 }, + { IPv4(216,221,46,0),24 }, + { IPv4(216,221,47,0),24 }, + { IPv4(216,221,48,0),24 }, + { IPv4(216,221,49,0),24 }, + { IPv4(216,221,50,0),24 }, + { IPv4(216,221,51,0),24 }, + { IPv4(216,221,52,0),24 }, + { IPv4(216,221,53,0),24 }, + { IPv4(216,221,54,0),24 }, + { IPv4(216,221,55,0),24 }, + { IPv4(216,221,56,0),24 }, + { IPv4(216,221,57,0),24 }, + { IPv4(216,221,58,0),24 }, + { IPv4(216,221,59,0),24 }, + { IPv4(216,221,60,0),24 }, + { IPv4(216,221,61,0),24 }, + { IPv4(216,221,62,0),24 }, + { IPv4(216,221,63,0),24 }, + { IPv4(216,221,64,0),19 }, + { IPv4(216,221,80,0),20 }, + { IPv4(216,221,224,0),21 }, + { IPv4(216,221,232,0),24 }, + { IPv4(216,221,234,0),24 }, + { IPv4(216,221,237,0),24 }, + { IPv4(216,221,239,0),24 }, + { IPv4(216,221,240,0),24 }, + { IPv4(216,222,34,0),23 }, + { IPv4(216,222,64,0),21 }, + { IPv4(216,222,72,0),22 }, + { IPv4(216,222,76,0),22 }, + { IPv4(216,222,111,0),24 }, + { IPv4(216,222,124,0),22 }, + { IPv4(216,222,128,0),19 }, + { IPv4(216,222,160,0),24 }, + { IPv4(216,222,160,0),20 }, + { IPv4(216,222,224,0),19 }, + { IPv4(216,223,0,0),19 }, + { IPv4(216,223,3,0),24 }, + { IPv4(216,223,8,0),21 }, + { IPv4(216,223,10,0),24 }, + { IPv4(216,223,11,0),24 }, + { IPv4(216,223,16,0),23 }, + { IPv4(216,223,18,0),24 }, + { IPv4(216,223,32,0),24 }, + { IPv4(216,223,32,0),20 }, + { IPv4(216,223,32,0),19 }, + { IPv4(216,223,33,0),24 }, + { IPv4(216,223,34,0),24 }, + { IPv4(216,223,35,0),24 }, + { IPv4(216,223,40,0),22 }, + { IPv4(216,223,44,0),24 }, + { IPv4(216,223,46,0),24 }, + { IPv4(216,223,64,0),18 }, + { IPv4(216,223,72,0),24 }, + { IPv4(216,223,80,0),24 }, + { IPv4(216,223,81,0),24 }, + { IPv4(216,223,82,0),24 }, + { IPv4(216,223,83,0),24 }, + { IPv4(216,223,86,0),24 }, + { IPv4(216,223,94,0),24 }, + { IPv4(216,223,95,0),24 }, + { IPv4(216,223,100,0),24 }, + { IPv4(216,223,101,0),24 }, + { IPv4(216,223,102,0),24 }, + { IPv4(216,223,103,0),24 }, + { IPv4(216,223,192,0),19 }, + { IPv4(216,223,224,0),20 }, + { IPv4(216,223,232,0),21 }, + { IPv4(216,223,233,0),24 }, + { IPv4(216,224,64,0),19 }, + { IPv4(216,224,224,0),20 }, + { IPv4(216,226,64,0),19 }, + { IPv4(216,226,128,0),19 }, + { IPv4(216,226,192,0),21 }, + { IPv4(216,226,199,0),24 }, + { IPv4(216,226,200,0),21 }, + { IPv4(216,226,208,0),22 }, + { IPv4(216,226,208,0),24 }, + { IPv4(216,226,210,0),24 }, + { IPv4(216,226,212,0),23 }, + { IPv4(216,226,213,0),24 }, + { IPv4(216,226,214,0),23 }, + { IPv4(216,226,220,0),23 }, + { IPv4(216,226,224,0),20 }, + { IPv4(216,226,238,0),23 }, + { IPv4(216,226,240,0),21 }, + { IPv4(216,226,248,0),21 }, + { IPv4(216,226,248,0),23 }, + { IPv4(216,226,252,0),22 }, + { IPv4(216,226,252,0),24 }, + { IPv4(216,226,253,0),24 }, + { IPv4(216,228,0,0),20 }, + { IPv4(216,228,3,0),24 }, + { IPv4(216,228,4,0),24 }, + { IPv4(216,228,5,0),24 }, + { IPv4(216,228,6,0),24 }, + { IPv4(216,228,7,0),24 }, + { IPv4(216,228,8,0),24 }, + { IPv4(216,228,10,0),24 }, + { IPv4(216,228,14,0),24 }, + { IPv4(216,228,16,0),20 }, + { IPv4(216,228,160,0),19 }, + { IPv4(216,228,192,0),20 }, + { IPv4(216,228,194,0),24 }, + { IPv4(216,228,195,0),24 }, + { IPv4(216,228,196,0),24 }, + { IPv4(216,228,197,0),24 }, + { IPv4(216,228,200,0),24 }, + { IPv4(216,228,201,0),24 }, + { IPv4(216,228,202,0),24 }, + { IPv4(216,228,203,0),24 }, + { IPv4(216,229,96,0),20 }, + { IPv4(216,229,224,0),20 }, + { IPv4(216,229,240,0),20 }, + { IPv4(216,230,128,0),20 }, + { IPv4(216,230,128,0),21 }, + { IPv4(216,230,128,0),24 }, + { IPv4(216,230,129,0),24 }, + { IPv4(216,230,130,0),24 }, + { IPv4(216,230,131,0),24 }, + { IPv4(216,230,132,0),24 }, + { IPv4(216,230,133,0),24 }, + { IPv4(216,230,134,0),24 }, + { IPv4(216,230,135,0),24 }, + { IPv4(216,230,136,0),24 }, + { IPv4(216,230,137,0),24 }, + { IPv4(216,230,138,0),24 }, + { IPv4(216,230,139,0),24 }, + { IPv4(216,230,140,0),24 }, + { IPv4(216,230,141,0),24 }, + { IPv4(216,230,142,0),24 }, + { IPv4(216,230,143,0),24 }, + { IPv4(216,230,144,0),24 }, + { IPv4(216,230,145,0),24 }, + { IPv4(216,230,146,0),24 }, + { IPv4(216,230,147,0),24 }, + { IPv4(216,230,148,0),24 }, + { IPv4(216,230,149,0),24 }, + { IPv4(216,230,150,0),24 }, + { IPv4(216,230,151,0),24 }, + { IPv4(216,230,152,0),24 }, + { IPv4(216,230,153,0),24 }, + { IPv4(216,230,154,0),24 }, + { IPv4(216,230,155,0),24 }, + { IPv4(216,230,156,0),24 }, + { IPv4(216,230,157,0),24 }, + { IPv4(216,230,158,0),24 }, + { IPv4(216,230,159,0),24 }, + { IPv4(216,230,160,0),20 }, + { IPv4(216,231,0,0),20 }, + { IPv4(216,231,4,0),22 }, + { IPv4(216,231,16,0),22 }, + { IPv4(216,231,16,0),20 }, + { IPv4(216,231,20,0),23 }, + { IPv4(216,231,32,0),24 }, + { IPv4(216,231,32,0),19 }, + { IPv4(216,231,96,0),19 }, + { IPv4(216,231,192,0),22 }, + { IPv4(216,231,192,0),23 }, + { IPv4(216,231,194,0),23 }, + { IPv4(216,231,201,0),24 }, + { IPv4(216,231,205,0),24 }, + { IPv4(216,231,207,0),24 }, + { IPv4(216,231,208,0),20 }, + { IPv4(216,231,224,0),20 }, + { IPv4(216,231,240,0),20 }, + { IPv4(216,234,224,0),20 }, + { IPv4(216,234,224,0),19 }, + { IPv4(216,234,240,0),20 }, + { IPv4(216,235,32,0),20 }, + { IPv4(216,235,32,0),19 }, + { IPv4(216,235,64,0),20 }, + { IPv4(216,235,96,0),19 }, + { IPv4(216,235,128,0),19 }, + { IPv4(216,235,160,0),20 }, + { IPv4(216,235,192,0),24 }, + { IPv4(216,235,194,0),24 }, + { IPv4(216,235,208,0),20 }, + { IPv4(216,235,240,0),20 }, + { IPv4(216,235,247,0),24 }, + { IPv4(216,236,160,0),20 }, + { IPv4(216,236,192,0),20 }, + { IPv4(216,236,202,0),24 }, + { IPv4(216,236,206,0),24 }, + { IPv4(216,236,208,0),21 }, + { IPv4(216,236,208,0),22 }, + { IPv4(216,236,220,0),22 }, + { IPv4(216,237,64,0),22 }, + { IPv4(216,237,68,0),22 }, + { IPv4(216,237,72,0),22 }, + { IPv4(216,237,76,0),22 }, + { IPv4(216,237,96,0),20 }, + { IPv4(216,237,128,0),18 }, + { IPv4(216,237,163,0),24 }, + { IPv4(216,239,32,0),23 }, + { IPv4(216,239,34,0),23 }, + { IPv4(216,239,34,0),24 }, + { IPv4(216,239,35,0),24 }, + { IPv4(216,239,36,0),23 }, + { IPv4(216,239,36,0),24 }, + { IPv4(216,239,37,0),24 }, + { IPv4(216,239,38,0),23 }, + { IPv4(216,239,40,0),24 }, + { IPv4(216,239,41,0),24 }, + { IPv4(216,239,46,0),24 }, + { IPv4(216,239,96,0),23 }, + { IPv4(216,239,99,0),24 }, + { IPv4(216,239,100,0),23 }, + { IPv4(216,239,102,0),23 }, + { IPv4(216,239,104,0),24 }, + { IPv4(216,239,105,0),24 }, + { IPv4(216,239,224,0),20 }, + { IPv4(216,239,240,0),20 }, + { IPv4(216,240,208,0),20 }, + { IPv4(216,241,0,0),19 }, + { IPv4(216,241,32,0),20 }, + { IPv4(216,241,96,0),20 }, + { IPv4(216,241,128,0),22 }, + { IPv4(216,241,132,0),23 }, + { IPv4(216,241,136,0),22 }, + { IPv4(216,241,140,0),23 }, + { IPv4(216,241,142,0),23 }, + { IPv4(216,241,144,0),22 }, + { IPv4(216,241,208,0),20 }, + { IPv4(216,242,24,0),24 }, + { IPv4(216,242,27,0),24 }, + { IPv4(216,242,38,0),24 }, + { IPv4(216,242,84,0),24 }, + { IPv4(216,243,0,0),24 }, + { IPv4(216,243,1,0),30 }, + { IPv4(216,243,8,0),24 }, + { IPv4(216,243,10,0),25 }, + { IPv4(216,243,11,0),24 }, + { IPv4(216,243,13,0),24 }, + { IPv4(216,243,15,0),24 }, + { IPv4(216,243,18,0),24 }, + { IPv4(216,243,19,0),24 }, + { IPv4(216,243,20,0),24 }, + { IPv4(216,243,21,0),24 }, + { IPv4(216,243,24,0),24 }, + { IPv4(216,243,25,0),24 }, + { IPv4(216,243,27,0),24 }, + { IPv4(216,243,28,0),24 }, + { IPv4(216,243,45,0),24 }, + { IPv4(216,243,46,0),24 }, + { IPv4(216,243,47,0),24 }, + { IPv4(216,243,48,0),24 }, + { IPv4(216,243,52,128),26 }, + { IPv4(216,243,53,128),26 }, + { IPv4(216,243,56,0),24 }, + { IPv4(216,243,59,0),24 }, + { IPv4(216,243,128,0),18 }, + { IPv4(216,243,192,0),19 }, + { IPv4(216,243,224,0),20 }, + { IPv4(216,244,0,0),18 }, + { IPv4(216,244,96,0),20 }, + { IPv4(216,244,110,0),24 }, + { IPv4(216,244,111,0),24 }, + { IPv4(216,244,128,0),19 }, + { IPv4(216,244,160,0),20 }, + { IPv4(216,244,176,0),21 }, + { IPv4(216,244,184,0),22 }, + { IPv4(216,244,188,0),23 }, + { IPv4(216,244,190,0),24 }, + { IPv4(216,244,191,0),24 }, + { IPv4(216,245,0,0),21 }, + { IPv4(216,245,12,0),22 }, + { IPv4(216,245,16,0),22 }, + { IPv4(216,245,22,0),24 }, + { IPv4(216,245,24,0),22 }, + { IPv4(216,245,28,0),22 }, + { IPv4(216,246,0,0),17 }, + { IPv4(216,247,0,0),16 }, + { IPv4(216,248,64,0),18 }, + { IPv4(216,248,193,0),24 }, + { IPv4(216,248,194,0),24 }, + { IPv4(216,248,195,0),24 }, + { IPv4(216,248,196,0),22 }, + { IPv4(216,248,200,0),22 }, + { IPv4(216,248,204,0),24 }, + { IPv4(216,248,205,0),24 }, + { IPv4(216,248,224,0),20 }, + { IPv4(216,249,64,0),19 }, + { IPv4(216,249,96,0),20 }, + { IPv4(216,249,136,0),24 }, + { IPv4(216,249,137,0),24 }, + { IPv4(216,249,138,0),24 }, + { IPv4(216,249,139,0),24 }, + { IPv4(216,249,140,0),24 }, + { IPv4(216,249,141,0),24 }, + { IPv4(216,250,64,0),19 }, + { IPv4(216,250,128,0),21 }, + { IPv4(216,250,128,0),20 }, + { IPv4(216,250,129,0),24 }, + { IPv4(216,250,136,0),21 }, + { IPv4(216,250,136,0),24 }, + { IPv4(216,250,139,0),24 }, + { IPv4(216,250,140,0),24 }, + { IPv4(216,250,141,0),24 }, + { IPv4(216,250,142,0),23 }, + { IPv4(216,250,224,0),19 }, + { IPv4(216,251,50,0),24 }, + { IPv4(216,251,128,0),20 }, + { IPv4(216,251,128,0),19 }, + { IPv4(216,252,0,0),18 }, + { IPv4(216,252,128,0),20 }, + { IPv4(216,252,140,0),22 }, + { IPv4(216,252,144,0),21 }, + { IPv4(216,252,152,0),21 }, + { IPv4(216,252,160,0),20 }, + { IPv4(216,252,174,0),24 }, + { IPv4(216,252,176,0),24 }, + { IPv4(216,252,176,0),22 }, + { IPv4(216,252,177,0),24 }, + { IPv4(216,252,179,0),24 }, + { IPv4(216,252,182,0),24 }, + { IPv4(216,252,182,0),23 }, + { IPv4(216,252,183,0),24 }, + { IPv4(216,252,184,0),22 }, + { IPv4(216,252,187,0),24 }, + { IPv4(216,252,188,0),22 }, + { IPv4(216,252,192,0),20 }, + { IPv4(216,252,197,0),24 }, + { IPv4(216,252,208,0),20 }, + { IPv4(216,252,220,0),23 }, + { IPv4(216,252,222,0),23 }, + { IPv4(216,252,224,0),21 }, + { IPv4(216,252,226,0),24 }, + { IPv4(216,252,227,0),24 }, + { IPv4(216,252,228,0),23 }, + { IPv4(216,252,232,0),21 }, + { IPv4(216,252,234,0),24 }, + { IPv4(216,252,235,0),24 }, + { IPv4(216,252,240,0),20 }, + { IPv4(216,253,0,0),16 }, + { IPv4(216,253,7,0),24 }, + { IPv4(216,253,8,0),24 }, + { IPv4(216,253,8,0),22 }, + { IPv4(216,253,9,0),24 }, + { IPv4(216,253,35,0),24 }, + { IPv4(216,253,80,0),24 }, + { IPv4(216,253,167,0),24 }, + { IPv4(216,254,0,0),18 }, + { IPv4(216,254,0,0),24 }, + { IPv4(216,254,64,0),18 }, + { IPv4(216,254,128,0),18 }, + { IPv4(216,255,0,0),20 }, + { IPv4(217,8,0,0),19 }, + { IPv4(217,8,96,0),20 }, + { IPv4(217,9,64,0),20 }, + { IPv4(217,10,64,0),20 }, + { IPv4(217,10,96,0),20 }, + { IPv4(217,10,192,0),24 }, + { IPv4(217,10,193,0),24 }, + { IPv4(217,10,195,0),24 }, + { IPv4(217,10,196,0),24 }, + { IPv4(217,10,197,0),24 }, + { IPv4(217,10,198,0),24 }, + { IPv4(217,10,199,0),24 }, + { IPv4(217,10,200,0),24 }, + { IPv4(217,10,201,0),24 }, + { IPv4(217,10,203,0),24 }, + { IPv4(217,10,204,0),24 }, + { IPv4(217,10,205,0),24 }, + { IPv4(217,10,206,0),24 }, + { IPv4(217,10,207,0),24 }, + { IPv4(217,10,208,0),24 }, + { IPv4(217,10,210,0),24 }, + { IPv4(217,10,211,0),24 }, + { IPv4(217,10,212,0),24 }, + { IPv4(217,10,213,0),24 }, + { IPv4(217,10,214,0),24 }, + { IPv4(217,10,215,0),24 }, + { IPv4(217,10,216,0),24 }, + { IPv4(217,10,217,0),24 }, + { IPv4(217,10,218,0),24 }, + { IPv4(217,10,219,0),24 }, + { IPv4(217,10,220,0),24 }, + { IPv4(217,10,221,0),24 }, + { IPv4(217,10,222,0),24 }, + { IPv4(217,10,234,0),24 }, + { IPv4(217,12,32,0),20 }, + { IPv4(217,14,0,0),20 }, + { IPv4(217,14,160,0),21 }, + { IPv4(217,14,160,0),20 }, + { IPv4(217,14,165,0),24 }, + { IPv4(217,14,166,0),24 }, + { IPv4(217,15,0,0),20 }, + { IPv4(217,15,32,0),20 }, + { IPv4(217,15,64,0),20 }, + { IPv4(217,15,160,0),21 }, + { IPv4(217,15,168,0),21 }, + { IPv4(217,17,192,0),20 }, + { IPv4(217,18,32,0),20 }, + { IPv4(217,18,192,0),20 }, + { IPv4(217,19,3,0),24 }, + { IPv4(217,19,4,0),24 }, + { IPv4(217,19,5,0),24 }, + { IPv4(217,19,6,0),24 }, + { IPv4(217,19,9,0),24 }, + { IPv4(217,19,10,0),24 }, + { IPv4(217,19,32,0),20 }, + { IPv4(217,19,224,0),20 }, + { IPv4(217,20,128,0),20 }, + { IPv4(217,21,0,0),24 }, + { IPv4(217,21,1,0),24 }, + { IPv4(217,21,2,0),24 }, + { IPv4(217,21,3,0),24 }, + { IPv4(217,21,4,0),24 }, + { IPv4(217,21,8,0),24 }, + { IPv4(217,21,51,0),24 }, + { IPv4(217,21,128,0),20 }, + { IPv4(217,22,0,0),20 }, + { IPv4(217,23,224,0),20 }, + { IPv4(217,24,128,0),20 }, + { IPv4(217,24,224,0),20 }, + { IPv4(217,25,64,0),20 }, + { IPv4(217,26,33,0),24 }, + { IPv4(217,26,160,0),24 }, + { IPv4(217,27,0,0),23 }, + { IPv4(217,27,2,0),23 }, + { IPv4(217,27,32,0),24 }, + { IPv4(217,27,33,0),24 }, + { IPv4(217,27,34,0),24 }, + { IPv4(217,27,35,0),24 }, + { IPv4(217,27,36,0),24 }, + { IPv4(217,27,37,0),24 }, + { IPv4(217,28,192,0),20 }, + { IPv4(217,29,32,0),20 }, + { IPv4(217,29,96,0),20 }, + { IPv4(217,29,192,0),23 }, + { IPv4(217,29,194,0),23 }, + { IPv4(217,31,64,0),20 }, + { IPv4(217,32,0,0),12 }, + { IPv4(217,64,96,0),20 }, + { IPv4(217,66,32,0),20 }, + { IPv4(217,66,128,0),20 }, + { IPv4(217,66,160,0),20 }, + { IPv4(217,67,64,0),20 }, + { IPv4(217,67,224,0),20 }, + { IPv4(217,68,32,0),20 }, + { IPv4(217,68,224,0),23 }, + { IPv4(217,69,0,0),20 }, + { IPv4(217,69,64,0),20 }, + { IPv4(217,71,0,0),22 }, + { IPv4(217,71,10,0),24 }, + { IPv4(217,75,64,0),20 }, + { IPv4(217,76,160,0),20 }, + { IPv4(217,76,192,0),20 }, + { IPv4(217,77,128,0),19 }, + { IPv4(217,114,160,0),20 }, + { IPv4(217,114,192,0),24 }, + { IPv4(217,115,192,0),20 }, + { IPv4(217,115,193,0),24 }, + { IPv4(217,115,197,0),24 }, + { IPv4(217,115,224,0),20 }, + { IPv4(217,116,0,0),20 }, + { IPv4(217,116,160,0),20 }, + { IPv4(217,117,0,0),20 }, + { IPv4(217,117,32,0),19 }, + { IPv4(217,117,96,0),20 }, + { IPv4(217,118,128,0),20 }, + { IPv4(217,119,96,0),19 }, + { IPv4(217,119,192,0),20 }, + { IPv4(217,120,0,0),14 }, + { IPv4(217,131,0,0),16 }, + { IPv4(217,131,0,0),17 }, + { IPv4(217,131,128,0),17 }, + { IPv4(217,137,250,0),24 }, + { IPv4(217,138,0,0),16 }, + { IPv4(217,140,0,0),20 }, + { IPv4(217,140,16,0),20 }, + { IPv4(217,145,64,0),20 }, + { IPv4(217,145,72,0),21 }, + { IPv4(217,146,96,0),20 }, + { IPv4(217,148,40,0),21 }, + { IPv4(217,148,160,0),20 }, + { IPv4(217,148,160,0),24 }, + { IPv4(217,148,161,0),24 }, + { IPv4(217,148,192,0),20 }, + { IPv4(217,149,64,0),20 }, + { IPv4(217,150,128,0),20 }, + { IPv4(217,151,0,0),20 }, + { IPv4(217,151,208,0),20 }, + { IPv4(217,154,0,0),16 }, + { IPv4(217,156,8,0),24 }, + { IPv4(217,156,18,0),24 }, + { IPv4(217,156,36,0),24 }, + { IPv4(217,156,42,0),24 }, + { IPv4(217,156,56,0),24 }, + { IPv4(217,156,75,0),24 }, + { IPv4(217,162,0,0),16 }, + { IPv4(217,166,0,0),16 }, + { IPv4(217,169,0,0),19 }, + { IPv4(217,169,160,0),20 }, + { IPv4(217,169,224,0),20 }, + { IPv4(217,170,32,0),20 }, + { IPv4(217,170,192,0),20 }, + { IPv4(217,171,224,0),20 }, + { IPv4(217,173,64,0),20 }, + { IPv4(217,174,32,0),24 }, + { IPv4(217,175,96,0),20 }, + { IPv4(217,176,0,0),13 }, + { IPv4(217,194,32,0),20 }, + { IPv4(217,194,160,0),20 }, + { IPv4(217,194,192,0),20 }, + { IPv4(217,195,192,0),24 }, + { IPv4(217,195,193,0),24 }, + { IPv4(217,195,194,0),24 }, + { IPv4(217,195,195,0),24 }, + { IPv4(217,195,224,0),20 }, + { IPv4(217,196,224,0),20 }, + { IPv4(217,216,0,0),15 }, + { IPv4(217,220,0,0),16 }, + { IPv4(218,0,0,0),16 }, + { IPv4(218,1,0,0),16 }, + { IPv4(218,2,0,0),15 }, + { IPv4(218,4,0,0),16 }, + { IPv4(218,5,0,0),16 }, + { IPv4(218,6,0,0),17 }, + { IPv4(218,6,128,0),17 }, + { IPv4(218,7,0,0),16 }, + { IPv4(218,8,0,0),16 }, + { IPv4(218,9,0,0),16 }, + { IPv4(218,10,0,0),16 }, + { IPv4(218,11,0,0),16 }, + { IPv4(218,12,0,0),16 }, + { IPv4(218,13,0,0),16 }, + { IPv4(218,14,0,0),15 }, + { IPv4(218,16,0,0),14 }, + { IPv4(218,20,0,0),16 }, + { IPv4(218,21,0,0),19 }, + { IPv4(218,21,32,0),20 }, + { IPv4(218,21,64,0),18 }, + { IPv4(218,21,128,0),17 }, + { IPv4(218,22,0,0),15 }, + { IPv4(218,24,0,0),16 }, + { IPv4(218,25,0,0),16 }, + { IPv4(218,26,0,0),16 }, + { IPv4(218,27,0,0),16 }, + { IPv4(218,28,0,0),15 }, + { IPv4(218,30,0,0),20 }, + { IPv4(218,30,16,0),22 }, + { IPv4(218,30,224,0),19 }, + { IPv4(218,31,0,0),16 }, + { IPv4(218,40,112,0),20 }, + { IPv4(218,40,128,0),20 }, + { IPv4(218,48,0,0),15 }, + { IPv4(218,49,226,0),23 }, + { IPv4(218,49,228,0),22 }, + { IPv4(218,49,232,0),21 }, + { IPv4(218,49,240,0),20 }, + { IPv4(218,56,0,0),15 }, + { IPv4(218,58,0,0),15 }, + { IPv4(218,60,0,0),16 }, + { IPv4(218,63,0,0),16 }, + { IPv4(218,64,0,0),16 }, + { IPv4(218,65,0,0),17 }, + { IPv4(218,65,128,0),17 }, + { IPv4(218,66,0,0),16 }, + { IPv4(218,67,0,0),17 }, + { IPv4(218,67,128,0),17 }, + { IPv4(218,68,0,0),15 }, + { IPv4(218,95,224,0),19 }, + { IPv4(218,144,0,0),13 }, + { IPv4(218,184,0,0),16 }, + { IPv4(218,184,0,0),18 }, + { IPv4(218,184,64,0),18 }, + { IPv4(218,184,128,0),18 }, + { IPv4(218,184,192,0),18 } +}; + +#define NUM_ROUTE_ENTRIES (sizeof(mae_west_tbl) / sizeof(mae_west_tbl[0])) + +#endif /* _TEST_LPM_ROUTES_H_ */ diff --git a/app/test/test_malloc.c b/app/test/test_malloc.c new file mode 100644 index 0000000000..a38a6deed4 --- /dev/null +++ b/app/test/test_malloc.c @@ -0,0 +1,776 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +#define N 10000 + +#define QUOTE_(x) #x +#define QUOTE(x) QUOTE_(x) +#define MALLOC_MEMZONE_SIZE QUOTE(RTE_MALLOC_MEMZONE_SIZE) + +/* + * Malloc + * ====== + * + * Allocate some dynamic memory from heap (3 areas). Check that areas + * don't overlap an that alignment constraints match. This test is + * done many times on different lcores simultaneously. + */ + +/* Test if memory overlaps: return 1 if true, or 0 if false. */ +static int +is_memory_overlap(void *p1, size_t len1, void *p2, size_t len2) +{ + unsigned long ptr1 = (unsigned long)p1; + unsigned long ptr2 = (unsigned long)p2; + + if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1) + return 1; + else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2) + return 1; + return 0; +} + +static int +is_aligned(void *p, int align) +{ + unsigned long addr = (unsigned long)p; + unsigned mask = align - 1; + + if (addr & mask) + return 0; + return 1; +} + +static int +test_align_overlap_per_lcore(__attribute__((unused)) void *arg) +{ + const unsigned align1 = 8, + align2 = 64, + align3 = 2048; + unsigned i,j; + void *p1 = NULL, *p2 = NULL, *p3 = NULL; + int ret = 0; + + for (i = 0; i < N; i++) { + p1 = rte_zmalloc("dummy", 1000, align1); + if (!p1){ + printf("rte_zmalloc returned NULL (i=%u)\n", i); + ret = -1; + break; + } + for(j = 0; j < 1000 ; j++) { + if( *(char *)p1 != 0) { + printf("rte_zmalloc didn't zeroed" + "the allocated memory\n"); + ret = -1; + } + } + p2 = rte_malloc("dummy", 1000, align2); + if (!p2){ + printf("rte_malloc returned NULL (i=%u)\n", i); + ret = -1; + rte_free(p1); + break; + } + p3 = rte_malloc("dummy", 1000, align3); + if (!p3){ + printf("rte_malloc returned NULL (i=%u)\n", i); + ret = -1; + rte_free(p1); + rte_free(p2); + break; + } + if (is_memory_overlap(p1, 1000, p2, 1000)) { + printf("p1 and p2 overlaps\n"); + ret = -1; + } + if (is_memory_overlap(p2, 1000, p3, 1000)) { + printf("p2 and p3 overlaps\n"); + ret = -1; + } + if (is_memory_overlap(p1, 1000, p3, 1000)) { + printf("p1 and p3 overlaps\n"); + ret = -1; + } + if (!is_aligned(p1, align1)) { + printf("p1 is not aligned\n"); + ret = -1; + } + if (!is_aligned(p2, align2)) { + printf("p2 is not aligned\n"); + ret = -1; + } + if (!is_aligned(p3, align3)) { + printf("p3 is not aligned\n"); + ret = -1; + } + rte_free(p1); + rte_free(p2); + rte_free(p3); + } + rte_malloc_dump_stats("dummy"); + + return ret; +} + +static int +test_reordered_free_per_lcore(__attribute__((unused)) void *arg) +{ + const unsigned align1 = 8, + align2 = 64, + align3 = 2048; + unsigned i,j; + void *p1, *p2, *p3; + int ret = 0; + + for (i = 0; i < 30; i++) { + p1 = rte_zmalloc("dummy", 1000, align1); + if (!p1){ + printf("rte_zmalloc returned NULL (i=%u)\n", i); + ret = -1; + break; + } + for(j = 0; j < 1000 ; j++) { + if( *(char *)p1 != 0) { + printf("rte_zmalloc didn't zeroed" + "the allocated memory\n"); + ret = -1; + } + } + /* use calloc to allocate 1000 16-byte items this time */ + p2 = rte_calloc("dummy", 1000, 16, align2); + /* for third request use regular malloc again */ + p3 = rte_malloc("dummy", 1000, align3); + if (!p2 || !p3){ + printf("rte_malloc returned NULL (i=%u)\n", i); + ret = -1; + break; + } + if (is_memory_overlap(p1, 1000, p2, 1000)) { + printf("p1 and p2 overlaps\n"); + ret = -1; + } + if (is_memory_overlap(p2, 1000, p3, 1000)) { + printf("p2 and p3 overlaps\n"); + ret = -1; + } + if (is_memory_overlap(p1, 1000, p3, 1000)) { + printf("p1 and p3 overlaps\n"); + ret = -1; + } + if (!is_aligned(p1, align1)) { + printf("p1 is not aligned\n"); + ret = -1; + } + if (!is_aligned(p2, align2)) { + printf("p2 is not aligned\n"); + ret = -1; + } + if (!is_aligned(p3, align3)) { + printf("p3 is not aligned\n"); + ret = -1; + } + /* try freeing in every possible order */ + switch (i%6){ + case 0: + rte_free(p1); + rte_free(p2); + rte_free(p3); + break; + case 1: + rte_free(p1); + rte_free(p3); + rte_free(p2); + break; + case 2: + rte_free(p2); + rte_free(p1); + rte_free(p3); + break; + case 3: + rte_free(p2); + rte_free(p3); + rte_free(p1); + break; + case 4: + rte_free(p3); + rte_free(p1); + rte_free(p2); + break; + case 5: + rte_free(p3); + rte_free(p2); + rte_free(p1); + break; + } + } + rte_malloc_dump_stats("dummy"); + + return ret; +} + + +/* test function inside the malloc lib*/ +static int +test_str_to_size(void) +{ + struct { + const char *str; + uint64_t value; + } test_values[] = + {{ "5G", (uint64_t)5 * 1024 * 1024 *1024 }, + {"0x20g", (uint64_t)0x20 * 1024 * 1024 *1024}, + {"10M", 10 * 1024 * 1024}, + {"050m", 050 * 1024 * 1024}, + {"8K", 8 * 1024}, + {"15k", 15 * 1024}, + {"0200", 0200}, + {"0x103", 0x103}, + {"432", 432}, + {"-1", 0}, /* negative values return 0 */ + {" -2", 0}, + {" -3MB", 0}, + {"18446744073709551616", 0} /* ULLONG_MAX + 1 == out of range*/ + }; + unsigned i; + for (i = 0; i < sizeof(test_values)/sizeof(test_values[0]); i++) + if (rte_str_to_size(test_values[i].str) != test_values[i].value) + return -1; + return 0; +} + +static int +test_big_alloc(void) +{ + void *p1 = rte_malloc("BIG", rte_str_to_size(MALLOC_MEMZONE_SIZE) * 2, 1024); + if (!p1) + return -1; + rte_free(p1); + return 0; +} + +static int +test_memzone_size_alloc(void) +{ + void *p1 = rte_malloc("BIG", rte_str_to_size(MALLOC_MEMZONE_SIZE) - 128, 64); + if (!p1) + return -1; + rte_free(p1); + /* one extra check - check no crashes if free(NULL) */ + rte_free(NULL); + return 0; +} + +static int +test_rte_malloc_type_limits(void) +{ + /* The type-limits functionality is not yet implemented, + * so always return 0 no matter what the retval. + */ + const char *typename = "limit_test"; + rte_malloc_set_limit(typename, 64 * 1024); + rte_malloc_dump_stats(typename); + return 0; +} + +static int +test_realloc(void) +{ + const char hello_str[] = "Hello, world!"; + const unsigned size1 = 1024; + const unsigned size2 = size1 + 1024; + const unsigned size3 = size2; + const unsigned size4 = size3 + 1024; + + /* test data is the same even if element is moved*/ + char *ptr1 = rte_zmalloc(NULL, size1, CACHE_LINE_SIZE); + if (!ptr1){ + printf("NULL pointer returned from rte_zmalloc\n"); + return -1; + } + rte_snprintf(ptr1, size1, "%s" ,hello_str); + char *ptr2 = rte_realloc(ptr1, size2, CACHE_LINE_SIZE); + if (!ptr2){ + rte_free(ptr1); + printf("NULL pointer returned from rte_realloc\n"); + return -1; + } + if (ptr1 == ptr2){ + printf("unexpected - ptr1 == ptr2\n"); + } + if (strcmp(ptr2, hello_str) != 0){ + printf("Error - lost data from pointed area\n"); + rte_free(ptr2); + return -1; + } + unsigned i; + for (i = strnlen(hello_str, sizeof(hello_str)); i < size1; i++) + if (ptr2[i] != 0){ + printf("Bad data in realloc\n"); + rte_free(ptr2); + return -1; + } + /* now allocate third element, free the second + * and resize third. It should not move. (ptr1 is now invalid) + */ + char *ptr3 = rte_zmalloc(NULL, size3, CACHE_LINE_SIZE); + if (!ptr3){ + printf("NULL pointer returned from rte_zmalloc\n"); + rte_free(ptr2); + return -1; + } + for (i = 0; i < size3; i++) + if (ptr3[i] != 0){ + printf("Bad data in zmalloc\n"); + rte_free(ptr3); + rte_free(ptr2); + return -1; + } + rte_free(ptr2); + /* first resize to half the size of the freed block */ + char *ptr4 = rte_realloc(ptr3, size4, CACHE_LINE_SIZE); + if (!ptr4){ + printf("NULL pointer returned from rte_realloc\n"); + rte_free(ptr3); + return -1; + } + if (ptr3 != ptr4){ + printf("Unexpected - ptr4 != ptr3\n"); + rte_free(ptr4); + return -1; + } + /* now resize again to the full size of the freed block */ + ptr4 = rte_realloc(ptr3, size3 + size2 + size1, CACHE_LINE_SIZE); + if (ptr3 != ptr4){ + printf("Unexpected - ptr4 != ptr3 on second resize\n"); + rte_free(ptr4); + return -1; + } + rte_free(ptr4); + + /* now try a resize to a smaller size, see if it works */ + const unsigned size5 = 1024; + const unsigned size6 = size5 / 2; + char *ptr5 = rte_malloc(NULL, size5, CACHE_LINE_SIZE); + if (!ptr5){ + printf("NULL pointer returned from rte_malloc\n"); + return -1; + } + char *ptr6 = rte_realloc(ptr5, size6, CACHE_LINE_SIZE); + if (!ptr6){ + printf("NULL pointer returned from rte_realloc\n"); + rte_free(ptr5); + return -1; + } + if (ptr5 != ptr6){ + printf("Error, resizing to a smaller size moved data\n"); + rte_free(ptr6); + return -1; + } + rte_free(ptr6); + + /* check for behaviour changing alignment */ + const unsigned size7 = 1024; + const unsigned orig_align = CACHE_LINE_SIZE; + unsigned new_align = CACHE_LINE_SIZE * 2; + char *ptr7 = rte_malloc(NULL, size7, orig_align); + if (!ptr7){ + printf("NULL pointer returned from rte_malloc\n"); + return -1; + } + /* calc an alignment we don't already have */ + while(RTE_ALIGN(ptr7, new_align) == ptr7) + new_align *= 2; + char *ptr8 = rte_realloc(ptr7, size7, new_align); + if (!ptr8){ + printf("NULL pointer returned from rte_realloc\n"); + rte_free(ptr7); + return -1; + } + if (RTE_ALIGN(ptr8, new_align) != ptr8){ + printf("Failure to re-align data\n"); + rte_free(ptr8); + return -1; + } + rte_free(ptr8); + + /* test behaviour when there is a free block after current one, + * but its not big enough + */ + unsigned size9 = 1024, size10 = 1024; + unsigned size11 = size9 + size10 + 256; + char *ptr9 = rte_malloc(NULL, size9, CACHE_LINE_SIZE); + if (!ptr9){ + printf("NULL pointer returned from rte_malloc\n"); + return -1; + } + char *ptr10 = rte_malloc(NULL, size10, CACHE_LINE_SIZE); + if (!ptr10){ + printf("NULL pointer returned from rte_malloc\n"); + return -1; + } + rte_free(ptr9); + char *ptr11 = rte_realloc(ptr10, size11, CACHE_LINE_SIZE); + if (!ptr11){ + printf("NULL pointer returned from rte_realloc\n"); + rte_free(ptr10); + return -1; + } + if (ptr11 == ptr10){ + printf("Error, unexpected that realloc has not created new buffer\n"); + rte_free(ptr11); + return -1; + } + rte_free(ptr11); + + /* check we don't crash if we pass null to realloc + * We should get a malloc of the size requested*/ + const size_t size12 = 1024; + size_t size12_check; + char *ptr12 = rte_realloc(NULL, size12, CACHE_LINE_SIZE); + if (!ptr12){ + printf("NULL pointer returned from rte_realloc\n"); + return -1; + } + if (rte_malloc_validate(ptr12, &size12_check) < 0 || + size12_check != size12){ + rte_free(ptr12); + return -1; + } + rte_free(ptr12); + return 0; +} + +static int +test_random_alloc_free(void *_ __attribute__((unused))) +{ + struct mem_list { + struct mem_list *next; + char data[0]; + } *list_head = NULL; + unsigned i; + unsigned count = 0; + + rte_srand((unsigned)rte_rdtsc()); + + for (i = 0; i < N; i++){ + unsigned free_mem = 0; + size_t allocated_size; + while (!free_mem){ + const unsigned mem_size = sizeof(struct mem_list) + \ + rte_rand() % (64 * 1024); + const unsigned align = 1 << (rte_rand() % 12); /* up to 4k alignment */ + struct mem_list *entry = rte_malloc(NULL, + mem_size, align); + if (entry == NULL) + return -1; + if (RTE_ALIGN(entry, align)!= entry) + return -1; + if (rte_malloc_validate(entry, &allocated_size) == -1 + || allocated_size < mem_size) + return -1; + memset(entry->data, rte_lcore_id(), + mem_size - sizeof(*entry)); + entry->next = list_head; + if (rte_malloc_validate(entry, NULL) == -1) + return -1; + list_head = entry; + + count++; + /* switch to freeing the memory with a 20% probability */ + free_mem = ((rte_rand() % 10) >= 8); + } + while (list_head){ + struct mem_list *entry = list_head; + list_head = list_head->next; + rte_free(entry); + } + } + printf("Lcore %u allocated/freed %u blocks\n", rte_lcore_id(), count); + return 0; +} + +#define err_return() do { \ + printf("%s: %d - Error\n", __func__, __LINE__); \ + goto err_return; \ +} while (0) + +static int +test_rte_malloc_validate(void) +{ + const size_t request_size = 1024; + size_t allocated_size; + char *data_ptr = rte_malloc(NULL, request_size, CACHE_LINE_SIZE); + if (data_ptr == NULL) { + printf("%s: %d - Allocation error\n", __func__, __LINE__); + return -1; + } + + /* check that a null input returns -1 */ + if (rte_malloc_validate(NULL, NULL) != -1) + err_return(); + + /* check that we get ok on a valid pointer */ + if (rte_malloc_validate(data_ptr, &allocated_size) < 0) + err_return(); + + /* check that the returned size is ok */ + if (allocated_size < request_size) + err_return(); + +#ifdef RTE_LIBRTE_MALLOC_DEBUG + int retval; + char *over_write_vals = NULL; + + /****** change the header to be bad */ + char save_buf[64]; + over_write_vals = (char *)((uintptr_t)data_ptr - sizeof(save_buf)); + /* first save the data as a backup before overwriting it */ + memcpy(save_buf, over_write_vals, sizeof(save_buf)); + memset(over_write_vals, 1, sizeof(save_buf)); + /* then run validate */ + retval = rte_malloc_validate(data_ptr, NULL); + /* finally restore the data again */ + memcpy(over_write_vals, save_buf, sizeof(save_buf)); + /* check we previously had an error */ + if (retval != -1) + err_return(); + + /* check all ok again */ + if (rte_malloc_validate(data_ptr, &allocated_size) < 0) + err_return(); + + /**** change the trailer to be bad */ + over_write_vals = (char *)((uintptr_t)data_ptr + allocated_size); + /* first save the data as a backup before overwriting it */ + memcpy(save_buf, over_write_vals, sizeof(save_buf)); + memset(over_write_vals, 1, sizeof(save_buf)); + /* then run validate */ + retval = rte_malloc_validate(data_ptr, NULL); + /* finally restore the data again */ + memcpy(over_write_vals, save_buf, sizeof(save_buf)); + if (retval != -1) + err_return(); + + /* check all ok again */ + if (rte_malloc_validate(data_ptr, &allocated_size) < 0) + err_return(); +#endif + + rte_free(data_ptr); + return 0; + +err_return: + /*clean up */ + rte_free(data_ptr); + return -1; +} + +static int +test_zero_aligned_alloc(void) +{ + char *p1 = rte_malloc(NULL,1024, 0); + if (!p1) + goto err_return; + if (!rte_is_aligned(p1, CACHE_LINE_SIZE)) + goto err_return; + rte_free(p1); + return 0; + +err_return: + /*clean up */ + if (p1) rte_free(p1); + return -1; +} + +static int +test_malloc_bad_params(void) +{ + const char *type = NULL; + size_t size = 0; + unsigned align = CACHE_LINE_SIZE; + + /* rte_malloc expected to return null with inappropriate size */ + char *bad_ptr = rte_malloc(type, size, align); + if (bad_ptr != NULL) + goto err_return; + + /* rte_malloc expected to return null with inappropriate alignment */ + align = 17; + size = 1024; + + bad_ptr = rte_malloc(type, size, align); + if (bad_ptr != NULL) + goto err_return; + + return 0; + +err_return: + /* clean up pointer */ + if (bad_ptr) + rte_free(bad_ptr); + return -1; +} + +int +test_malloc(void) +{ + unsigned lcore_id; + int ret = 0; + + if (test_str_to_size() < 0){ + printf("test_str_to_size() failed\n"); + return -1; + } + else printf("test_str_to_size() passed\n"); + + if (test_memzone_size_alloc() < 0){ + printf("test_memzone_size_alloc() failed\n"); + return -1; + } + else printf("test_memzone_size_alloc() passed\n"); + + if (test_big_alloc() < 0){ + printf("test_big_alloc() failed\n"); + return -1; + } + else printf("test_big_alloc() passed\n"); + + if (test_zero_aligned_alloc() < 0){ + printf("test_zero_aligned_alloc() failed\n"); + return -1; + } + else printf("test_zero_aligned_alloc() passed\n"); + + if (test_malloc_bad_params() < 0){ + printf("test_malloc_bad_params() failed\n"); + return -1; + } + else printf("test_malloc_bad_params() passed\n"); + + if (test_realloc() < 0){ + printf("test_realloc() failed\n"); + return -1; + } + else printf("test_realloc() passed\n"); +/*----------------------------*/ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(test_align_overlap_per_lcore, NULL, lcore_id); + } + + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + ret = -1; + } + if (ret < 0){ + printf("test_align_overlap_per_lcore() failed\n"); + return ret; + } + else printf("test_align_overlap_per_lcore() passed\n"); + /*----------------------------*/ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(test_reordered_free_per_lcore, NULL, lcore_id); + } + + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + ret = -1; + } + if (ret < 0){ + printf("test_reordered_free_per_lcore() failed\n"); + return ret; + } + else printf("test_reordered_free_per_lcore() passed\n"); + + /*----------------------------*/ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(test_random_alloc_free, NULL, lcore_id); + } + + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + ret = -1; + } + if (ret < 0){ + printf("test_random_alloc_free() failed\n"); + return ret; + } + else printf("test_random_alloc_free() passed\n"); + + /*----------------------------*/ + ret = test_rte_malloc_type_limits(); + if (ret < 0){ + printf("test_rte_malloc_type_limits() failed\n"); + return ret; + } + /* TODO: uncomment following line once type limits are valid */ + /*else printf("test_rte_malloc_type_limits() passed\n");*/ + + /*----------------------------*/ + ret = test_rte_malloc_validate(); + if (ret < 0){ + printf("test_rte_malloc_validate() failed\n"); + return ret; + } + else printf("test_rte_malloc_validate() passed\n"); + + return 0; +} diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c new file mode 100644 index 0000000000..d09f87f95f --- /dev/null +++ b/app/test/test_mbuf.c @@ -0,0 +1,875 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "test.h" + +#define MBUF_SIZE 2048 +#define NB_MBUF 128 +#define MBUF_TEST_DATA_LEN 1464 +#define MBUF_TEST_DATA_LEN2 50 +#define MBUF_TEST_HDR1_LEN 20 +#define MBUF_TEST_HDR2_LEN 30 +#define MBUF_TEST_ALL_HDRS_LEN (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN) + +#define REFCNT_MAX_ITER 64 +#define REFCNT_MAX_TIMEOUT 10 +#define REFCNT_MAX_REF (RTE_MAX_LCORE) +#define REFCNT_MBUF_NUM 64 +#define REFCNT_MBUF_SIZE (sizeof (struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define REFCNT_RING_SIZE (REFCNT_MBUF_NUM * REFCNT_MAX_REF) + +#define MAKE_STRING(x) # x + +static struct rte_mempool *pktmbuf_pool = NULL; +static struct rte_mempool *ctrlmbuf_pool = NULL; + +#if defined RTE_MBUF_SCATTER_GATHER && defined RTE_MBUF_REFCNT_ATOMIC + +static struct rte_mempool *refcnt_pool = NULL; +static struct rte_ring *refcnt_mbuf_ring = NULL; +static volatile uint32_t refcnt_stop_slaves; +static uint32_t refcnt_lcore[RTE_MAX_LCORE]; + +#endif + +/* + * MBUF + * ==== + * + * #. Allocate a mbuf pool. + * + * - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE + * bytes long. + * + * #. Test multiple allocations of mbufs from this pool. + * + * - Allocate NB_MBUF and store pointers in a table. + * - If an allocation fails, return an error. + * - Free all these mbufs. + * - Repeat the same test to check that mbufs were freed correctly. + * + * #. Test data manipulation in pktmbuf. + * + * - Alloc an mbuf. + * - Append data using rte_pktmbuf_append(). + * - Test for error in rte_pktmbuf_append() when len is too large. + * - Trim data at the end of mbuf using rte_pktmbuf_trim(). + * - Test for error in rte_pktmbuf_trim() when len is too large. + * - Prepend a header using rte_pktmbuf_prepend(). + * - Test for error in rte_pktmbuf_prepend() when len is too large. + * - Remove data at the beginning of mbuf using rte_pktmbuf_adj(). + * - Test for error in rte_pktmbuf_adj() when len is too large. + * - Check that appended data is not corrupt. + * - Free the mbuf. + * - Between all these tests, check data_len and pkt_len, and + * that the mbuf is contiguous. + * - Repeat the test to check that allocation operations + * reinitialize the mbuf correctly. + * + */ + +#define GOTO_FAIL(str, ...) do { \ + printf("mbuf test FAILED (l.%d): <" str ">\n", \ + __LINE__, ##__VA_ARGS__); \ + goto fail; \ +} while(0) + +/* + * test data manipulation in mbuf with non-ascii data + */ +static int +test_pktmbuf_with_non_ascii_data(void) +{ + struct rte_mbuf *m = NULL; + char *data; + + m = rte_pktmbuf_alloc(pktmbuf_pool); + if (m == NULL) + GOTO_FAIL("Cannot allocate mbuf"); + if (rte_pktmbuf_pkt_len(m) != 0) + GOTO_FAIL("Bad length"); + + data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN); + if (data == NULL) + GOTO_FAIL("Cannot append data"); + if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) + GOTO_FAIL("Bad pkt length"); + if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) + GOTO_FAIL("Bad data length"); + memset(data, 0xff, rte_pktmbuf_pkt_len(m)); + if (!rte_pktmbuf_is_contiguous(m)) + GOTO_FAIL("Buffer should be continuous"); + rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN); + + rte_pktmbuf_free(m); + + return 0; + +fail: + if(m) { + rte_pktmbuf_free(m); + } + return -1; +} + +/* + * test data manipulation in mbuf + */ +static int +test_one_pktmbuf(void) +{ + struct rte_mbuf *m = NULL; + char *data, *data2, *hdr; + unsigned i; + + printf("Test pktmbuf API\n"); + + /* alloc a mbuf */ + + m = rte_pktmbuf_alloc(pktmbuf_pool); + if (m == NULL) + GOTO_FAIL("Cannot allocate mbuf"); + if (rte_pktmbuf_pkt_len(m) != 0) + GOTO_FAIL("Bad length"); + + rte_pktmbuf_dump(m, 0); + + /* append data */ + + data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN); + if (data == NULL) + GOTO_FAIL("Cannot append data"); + if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) + GOTO_FAIL("Bad pkt length"); + if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) + GOTO_FAIL("Bad data length"); + memset(data, 0x66, rte_pktmbuf_pkt_len(m)); + if (!rte_pktmbuf_is_contiguous(m)) + GOTO_FAIL("Buffer should be continuous"); + rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN); + rte_pktmbuf_dump(m, 2*MBUF_TEST_DATA_LEN); + + /* this append should fail */ + + data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1)); + if (data2 != NULL) + GOTO_FAIL("Append should not succeed"); + + /* append some more data */ + + data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2); + if (data2 == NULL) + GOTO_FAIL("Cannot append data"); + if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2) + GOTO_FAIL("Bad pkt length"); + if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2) + GOTO_FAIL("Bad data length"); + if (!rte_pktmbuf_is_contiguous(m)) + GOTO_FAIL("Buffer should be continuous"); + + /* trim data at the end of mbuf */ + + if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0) + GOTO_FAIL("Cannot trim data"); + if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) + GOTO_FAIL("Bad pkt length"); + if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) + GOTO_FAIL("Bad data length"); + if (!rte_pktmbuf_is_contiguous(m)) + GOTO_FAIL("Buffer should be continuous"); + + /* this trim should fail */ + + if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0) + GOTO_FAIL("trim should not succeed"); + + /* prepend one header */ + + hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN); + if (hdr == NULL) + GOTO_FAIL("Cannot prepend"); + if (data - hdr != MBUF_TEST_HDR1_LEN) + GOTO_FAIL("Prepend failed"); + if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN) + GOTO_FAIL("Bad pkt length"); + if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN) + GOTO_FAIL("Bad data length"); + if (!rte_pktmbuf_is_contiguous(m)) + GOTO_FAIL("Buffer should be continuous"); + memset(hdr, 0x55, MBUF_TEST_HDR1_LEN); + + /* prepend another header */ + + hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN); + if (hdr == NULL) + GOTO_FAIL("Cannot prepend"); + if (data - hdr != MBUF_TEST_ALL_HDRS_LEN) + GOTO_FAIL("Prepend failed"); + if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN) + GOTO_FAIL("Bad pkt length"); + if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN) + GOTO_FAIL("Bad data length"); + if (!rte_pktmbuf_is_contiguous(m)) + GOTO_FAIL("Buffer should be continuous"); + memset(hdr, 0x55, MBUF_TEST_HDR2_LEN); + + rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0); + rte_pktmbuf_dump(m, 0); + + /* this prepend should fail */ + + hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1)); + if (hdr != NULL) + GOTO_FAIL("prepend should not succeed"); + + /* remove data at beginning of mbuf (adj) */ + + if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN)) + GOTO_FAIL("rte_pktmbuf_adj failed"); + if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN) + GOTO_FAIL("Bad pkt length"); + if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN) + GOTO_FAIL("Bad data length"); + if (!rte_pktmbuf_is_contiguous(m)) + GOTO_FAIL("Buffer should be continuous"); + + /* this adj should fail */ + + if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL) + GOTO_FAIL("rte_pktmbuf_adj should not succeed"); + + /* check data */ + + if (!rte_pktmbuf_is_contiguous(m)) + GOTO_FAIL("Buffer should be continuous"); + + for (i=0; ipkt.next = rte_pktmbuf_alloc(pktmbuf_pool); + if(mc->pkt.next == NULL) + GOTO_FAIL("Next Pkt Null\n"); + + clone = rte_pktmbuf_clone(mc, pktmbuf_pool); + if (clone == NULL) + GOTO_FAIL("cannot clone data\n"); + + /* free mbuf */ + rte_pktmbuf_free(mc); + rte_pktmbuf_free(clone); + mc = NULL; + clone = NULL; + return 0; + +fail: + if (mc) + rte_pktmbuf_free(mc); + return -1; +#endif /* RTE_MBUF_SCATTER_GATHER */ +} +#undef GOTO_FAIL + + + +/* + * test allocation and free of mbufs + */ +static int +test_pktmbuf_pool(void) +{ + unsigned i; + struct rte_mbuf *m[NB_MBUF]; + int ret = 0; + + for (i=0; ipkt.next; + rte_pktmbuf_free_seg(mt); + } + } + } + + return ret; +} + +/* + * Stress test for rte_mbuf atomic refcnt. + * Implies that: + * RTE_MBUF_SCATTER_GATHER and RTE_MBUF_REFCNT_ATOMIC are both defined. + * For more efficency, recomended to run with RTE_LIBRTE_MBUF_DEBUG defined. + */ + +#if defined RTE_MBUF_SCATTER_GATHER && defined RTE_MBUF_REFCNT_ATOMIC + +static int +test_refcnt_slave(__attribute__((unused)) void *arg) +{ + uint32_t lcore, free; + void *mp; + + lcore = rte_lcore_id(); + printf("%s started at lcore %u\n", __func__, lcore); + + free = 0; + while (refcnt_stop_slaves == 0) { + if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) { + free++; + rte_pktmbuf_free((struct rte_mbuf *)mp); + } + } + + refcnt_lcore[lcore] += free; + printf("%s finished at lcore %u, " + "number of freed mbufs: %u\n", + __func__, lcore, free); + return (0); +} + +static void +test_refcnt_iter(uint32_t lcore, uint32_t iter) +{ + uint16_t ref; + uint32_t i, n, tref, wn; + struct rte_mbuf *m; + + tref = 0; + + /* For each mbuf in the pool: + * - allocate mbuf, + * - increment it's reference up to N+1, + * - enqueue it N times into the ring for slave cores to free. + */ + for (i = 0, n = rte_mempool_count(refcnt_pool); + i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL; + i++) { + ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL); + tref += ref; + if ((ref & 1) != 0) { + rte_pktmbuf_refcnt_update(m, ref); + while (ref-- != 0) + rte_ring_enqueue(refcnt_mbuf_ring, m); + } else { + while (ref-- != 0) { + rte_pktmbuf_refcnt_update(m, 1); + rte_ring_enqueue(refcnt_mbuf_ring, m); + } + } + rte_pktmbuf_free(m); + } + + if (i != n) + rte_panic("(lcore=%u, iter=%u): was able to allocate only " + "%u from %u mbufs\n", lcore, iter, i, n); + + /* wait till slave lcores will consume all mbufs */ + while (!rte_ring_empty(refcnt_mbuf_ring)) + ; + + /* check that all mbufs are back into mempool by now */ + for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) { + if ((i = rte_mempool_count(refcnt_pool)) == n) { + refcnt_lcore[lcore] += tref; + printf("%s(lcore=%u, iter=%u) completed, " + "%u references processed\n", + __func__, lcore, iter, tref); + return; + } + rte_delay_ms(1000); + } + + rte_panic("(lcore=%u, iter=%u): after %us only " + "%u of %u mbufs left free\n", lcore, iter, wn, i, n); +} + +static int +test_refcnt_master(void) +{ + uint32_t i, lcore; + + lcore = rte_lcore_id(); + printf("%s started at lcore %u\n", __func__, lcore); + + for (i = 0; i != REFCNT_MAX_ITER; i++) + test_refcnt_iter(lcore, i); + + refcnt_stop_slaves = 1; + rte_wmb(); + + printf("%s finished at lcore %u\n", __func__, lcore); + return (0); +} + +#endif + +static int +test_refcnt_mbuf(void) +{ +#if defined RTE_MBUF_SCATTER_GATHER && defined RTE_MBUF_REFCNT_ATOMIC + + uint32_t lnum, master, slave, tref; + + + if ((lnum = rte_lcore_count()) == 1) { + printf("skipping %s, number of lcores: %u is not enough\n", + __func__, lnum); + return (0); + } + + printf("starting %s, at %u lcores\n", __func__, lnum); + + /* create refcnt pool & ring if they don't exist */ + + if (refcnt_pool == NULL && + (refcnt_pool = rte_mempool_create( + MAKE_STRING(refcnt_pool), + REFCNT_MBUF_NUM, REFCNT_MBUF_SIZE, 0, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, + SOCKET_ID_ANY, 0)) == NULL) { + printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n", + __func__); + return (-1); + } + + if (refcnt_mbuf_ring == NULL && + (refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring", + REFCNT_RING_SIZE, SOCKET_ID_ANY, + RING_F_SP_ENQ)) == NULL) { + printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring) + "\n", __func__); + return (-1); + } + + refcnt_stop_slaves = 0; + memset(refcnt_lcore, 0, sizeof (refcnt_lcore)); + + rte_eal_mp_remote_launch(test_refcnt_slave, NULL, SKIP_MASTER); + + test_refcnt_master(); + + rte_eal_mp_wait_lcore(); + + /* check that we porcessed all references */ + tref = 0; + master = rte_get_master_lcore(); + + RTE_LCORE_FOREACH_SLAVE(slave) + tref += refcnt_lcore[slave]; + + if (tref != refcnt_lcore[master]) + rte_panic("refernced mbufs: %u, freed mbufs: %u\n", + tref, refcnt_lcore[master]); + + rte_mempool_dump(refcnt_pool); + rte_ring_dump(refcnt_mbuf_ring); + +#endif + return (0); +} + +#ifdef RTE_EXEC_ENV_BAREMETAL + +/* baremetal - don't test failing sanity checks */ +static int +test_failing_mbuf_sanity_check(void) +{ + return 0; +} + +#else + +#include +#include + +/* linuxapp - use fork() to test mbuf errors panic */ +static int +verify_mbuf_check_panics(struct rte_mbuf *buf) +{ + int pid; + int status; + + pid = fork(); + + if (pid == 0) { + rte_mbuf_sanity_check(buf, RTE_MBUF_PKT, 1); /* should panic */ + exit(0); /* return normally if it doesn't panic */ + } else if (pid < 0){ + printf("Fork Failed\n"); + return -1; + } + wait(&status); + if(status == 0) + return -1; + + return 0; +} + +static int +test_failing_mbuf_sanity_check(void) +{ + struct rte_mbuf *buf; + struct rte_mbuf badbuf; + + printf("Checking rte_mbuf_sanity_check for failure conditions\n"); + + /* get a good mbuf to use to make copies */ + buf = rte_pktmbuf_alloc(pktmbuf_pool); + if (buf == NULL) + return -1; + printf("Checking good mbuf initially\n"); + if (verify_mbuf_check_panics(buf) != -1) + return -1; + + printf("Now checking for error conditions\n"); + + if (verify_mbuf_check_panics(NULL)) { + printf("Error with NULL mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.type = (uint8_t)-1; + if (verify_mbuf_check_panics(&badbuf)) { + printf("Error with bad-type mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.pool = NULL; + if (verify_mbuf_check_panics(&badbuf)) { + printf("Error with bad-pool mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.buf_physaddr = 0; + if (verify_mbuf_check_panics(&badbuf)) { + printf("Error with bad-physaddr mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.buf_addr = NULL; + if (verify_mbuf_check_panics(&badbuf)) { + printf("Error with bad-addr mbuf test\n"); + return -1; + } + +#ifdef RTE_MBUF_SCATTER_GATHER + badbuf = *buf; + badbuf.refcnt = 0; + if (verify_mbuf_check_panics(&badbuf)) { + printf("Error with bad-refcnt(0) mbuf test\n"); + return -1; + } + + badbuf = *buf; + badbuf.refcnt = UINT16_MAX; + if (verify_mbuf_check_panics(&badbuf)) { + printf("Error with bad-refcnt(MAX) mbuf test\n"); + return -1; + } +#endif + + return 0; +} +#endif + + +int +test_mbuf(void) +{ + RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != 64); + + /* create pktmbuf pool if it does not exist */ + if (pktmbuf_pool == NULL) { + pktmbuf_pool = + rte_mempool_create("test_pktmbuf_pool", NB_MBUF, + MBUF_SIZE, 32, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + SOCKET_ID_ANY, 0); + } + + if (pktmbuf_pool == NULL) { + printf("cannot allocate mbuf pool\n"); + return -1; + } + + /* test multiple mbuf alloc */ + if (test_pktmbuf_pool() < 0) { + printf("test_mbuf_pool() failed\n"); + return -1; + } + + /* do it another time to check that all mbufs were freed */ + if (test_pktmbuf_pool() < 0) { + printf("test_mbuf_pool() failed (2)\n"); + return -1; + } + + /* test data manipulation in mbuf */ + if (test_one_pktmbuf() < 0) { + printf("test_one_mbuf() failed\n"); + return -1; + } + + + /* + * do it another time, to check that allocation reinitialize + * the mbuf correctly + */ + if (test_one_pktmbuf() < 0) { + printf("test_one_mbuf() failed (2)\n"); + return -1; + } + + if (test_pktmbuf_with_non_ascii_data() < 0) { + printf("test_pktmbuf_with_non_ascii_data() failed\n"); + return -1; + } + + /* create ctrlmbuf pool if it does not exist */ + if (ctrlmbuf_pool == NULL) { + ctrlmbuf_pool = + rte_mempool_create("test_ctrlmbuf_pool", NB_MBUF, + sizeof(struct rte_mbuf), 32, 0, + NULL, NULL, + rte_ctrlmbuf_init, NULL, + SOCKET_ID_ANY, 0); + } + + /* test control mbuf */ + if (test_one_ctrlmbuf() < 0) { + printf("test_one_ctrlmbuf() failed\n"); + return -1; + } + + /* test free pktmbuf segment one by one */ + if (test_pktmbuf_free_segment() < 0) { + printf("test_pktmbuf_free_segment() failed.\n"); + return -1; + } + + if (testclone_testupdate_testdetach()<0){ + printf("testclone_and_testupdate() failed \n"); + return -1; + } + + if (test_refcnt_mbuf()<0){ + printf("test_refcnt_mbuf() failed \n"); + return -1; + } + + if (test_failing_mbuf_sanity_check() < 0) { + printf("test_failing_mbuf_sanity_check() failed\n"); + return -1; + } + return 0; +} diff --git a/app/test/test_memcpy.c b/app/test/test_memcpy.c new file mode 100644 index 0000000000..3ae3f9905d --- /dev/null +++ b/app/test/test_memcpy.c @@ -0,0 +1,429 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include "test.h" + +/* + * Set this to the maximum buffer size you want to test. If it is 0, then the + * values in the buf_sizes[] array below will be used. + */ +#define TEST_VALUE_RANGE 0 + +/* List of buffer sizes to test */ +#if TEST_VALUE_RANGE == 0 +static size_t buf_sizes[] = { + 0, 1, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 255, + 256, 257, 320, 384, 511, 512, 513, 1023, 1024, 1025, 1518, 1522, 1600, + 2048, 3072, 4096, 5120, 6144, 7168, 8192 +}; +/* MUST be as large as largest packet size above */ +#define SMALL_BUFFER_SIZE 8192 +#else /* TEST_VALUE_RANGE != 0 */ +static size_t buf_sizes[TEST_VALUE_RANGE]; +#define SMALL_BUFFER_SIZE TEST_VALUE_RANGE +#endif /* TEST_VALUE_RANGE == 0 */ + + +/* + * Arrays of this size are used for measuring uncached memory accesses by + * picking a random location within the buffer. Make this smaller if there are + * memory allocation errors. + */ +#define LARGE_BUFFER_SIZE (100 * 1024 * 1024) + +/* How many times to run timing loop for performance tests */ +#define TEST_ITERATIONS 1000000 +#define TEST_BATCH_SIZE 100 + +/* Data is aligned on this many bytes (power of 2) */ +#define ALIGNMENT_UNIT 16 + +/* + * Pointers used in performance tests. The two large buffers are for uncached + * access where random addresses within the buffer are used for each + * memcpy. The two small buffers are for cached access. + */ +static uint8_t *large_buf_read, *large_buf_write, + *small_buf_read, *small_buf_write; + +/* Initialise data buffers. */ +static int +init_buffers(void) +{ + unsigned i; + + large_buf_read = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT); + if (large_buf_read == NULL) + goto error_large_buf_read; + + large_buf_write = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT); + if (large_buf_write == NULL) + goto error_large_buf_write; + + small_buf_read = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT); + if (small_buf_read == NULL) + goto error_small_buf_read; + + small_buf_write = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT); + if (small_buf_write == NULL) + goto error_small_buf_write; + + for (i = 0; i < LARGE_BUFFER_SIZE; i++) + large_buf_read[i] = rte_rand(); + for (i = 0; i < SMALL_BUFFER_SIZE; i++) + small_buf_read[i] = rte_rand(); + + return 0; + +error_small_buf_write: + rte_free(small_buf_read); +error_small_buf_read: + rte_free(large_buf_write); +error_large_buf_write: + rte_free(large_buf_read); +error_large_buf_read: + printf("ERROR: not enough memory"); + return -1; +} + +/* Cleanup data buffers */ +static void +free_buffers(void) +{ + rte_free(large_buf_read); + rte_free(large_buf_write); + rte_free(small_buf_read); + rte_free(small_buf_write); +} + +/* + * Get a random offset into large array, with enough space needed to perform + * max copy size. Offset is aligned. + */ +static inline size_t +get_rand_offset(void) +{ + return ((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) & + ~(ALIGNMENT_UNIT - 1)); +} + +/* Fill in source and destination addresses. */ +static inline void +fill_addr_arrays(size_t *dst_addr, int is_dst_cached, + size_t *src_addr, int is_src_cached) +{ + unsigned int i; + + for (i = 0; i < TEST_BATCH_SIZE; i++) { + dst_addr[i] = (is_dst_cached) ? 0 : get_rand_offset(); + src_addr[i] = (is_src_cached) ? 0 : get_rand_offset(); + } +} + +/* Integer division with round to nearest */ +static inline uint64_t +div_round(uint64_t dividend, uint64_t divisor) +{ + return ((2 * dividend) + divisor) / (2 * divisor); +} + +/* + * WORKAROUND: For some reason the first test doing an uncached write + * takes a very long time (~25 times longer than is expected). So we do + * it once without timing. + */ +static void +do_uncached_write(uint8_t *dst, int is_dst_cached, + const uint8_t *src, int is_src_cached, size_t size) +{ + unsigned i, j; + size_t dst_addrs[TEST_BATCH_SIZE], src_addrs[TEST_BATCH_SIZE]; + + for (i = 0; i < (TEST_ITERATIONS / TEST_BATCH_SIZE); i++) { + fill_addr_arrays(dst_addrs, is_dst_cached, + src_addrs, is_src_cached); + for (j = 0; j < TEST_BATCH_SIZE; j++) + rte_memcpy(dst+dst_addrs[j], src+src_addrs[j], size); + } +} + +/* + * Run a single memcpy performance test. This is a macro to ensure that if + * the "size" parameter is a constant it won't be converted to a variable. + */ +#define SINGLE_PERF_TEST(dst, is_dst_cached, src, is_src_cached, size) do { \ + unsigned int iter, t; \ + size_t dst_addrs[TEST_BATCH_SIZE], src_addrs[TEST_BATCH_SIZE]; \ + uint64_t start_time, total_time = 0; \ + uint64_t total_time2 = 0; \ + for (iter = 0; iter < (TEST_ITERATIONS / TEST_BATCH_SIZE); iter++) { \ + fill_addr_arrays(dst_addrs, is_dst_cached, \ + src_addrs, is_src_cached); \ + start_time = rte_rdtsc(); \ + for (t = 0; t < TEST_BATCH_SIZE; t++) \ + rte_memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \ + total_time += rte_rdtsc() - start_time; \ + } \ + for (iter = 0; iter < (TEST_ITERATIONS / TEST_BATCH_SIZE); iter++) { \ + fill_addr_arrays(dst_addrs, is_dst_cached, \ + src_addrs, is_src_cached); \ + start_time = rte_rdtsc(); \ + for (t = 0; t < TEST_BATCH_SIZE; t++) \ + memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \ + total_time2 += rte_rdtsc() - start_time; \ + } \ + printf("%9u/", (unsigned)div_round(total_time, TEST_ITERATIONS)); \ + printf("%4u", (unsigned)div_round(total_time2, TEST_ITERATIONS)); \ +} while (0) + +/* Run memcpy() tests for each cached/uncached permutation. */ +#define ALL_PERF_TESTS_FOR_SIZE(n) do { \ + if (__builtin_constant_p(n)) \ + printf("\nC%6u ", (unsigned)n); \ + else \ + printf("\n%7u ", (unsigned)n); \ + SINGLE_PERF_TEST(small_buf_write, 1, small_buf_read, 1, n); \ + SINGLE_PERF_TEST(large_buf_write, 0, small_buf_read, 1, n); \ + SINGLE_PERF_TEST(small_buf_write, 1, large_buf_read, 0, n); \ + SINGLE_PERF_TEST(large_buf_write, 0, large_buf_read, 0, n); \ +} while (0) + +/* + * Run performance tests for a number of different sizes and cached/uncached + * permutations. + */ +static int +perf_test(void) +{ + const unsigned num_buf_sizes = sizeof(buf_sizes) / sizeof(buf_sizes[0]); + unsigned i; + int ret; + + ret = init_buffers(); + if (ret != 0) + return ret; + +#if TEST_VALUE_RANGE != 0 + /* Setup buf_sizes array, if required */ + for (i = 0; i < TEST_VALUE_RANGE; i++) + buf_sizes[i] = i; +#endif + + /* See function comment */ + do_uncached_write(large_buf_write, 0, small_buf_read, 1, SMALL_BUFFER_SIZE); + + printf("\n** rte_memcpy()/memcpy performance tests **\n" + "======= ============== ============== ============== ==============\n" + " Size Cache to cache Cache to mem Mem to cache Mem to mem\n" + "(bytes) (ticks) (ticks) (ticks) (ticks)\n" + "------- -------------- -------------- -------------- --------------"); + + /* Do tests where size is a variable */ + for (i = 0; i < num_buf_sizes; i++) { + ALL_PERF_TESTS_FOR_SIZE((size_t)buf_sizes[i]); + } + +#ifdef RTE_MEMCPY_BUILTIN_CONSTANT_P + /* Do tests where size is a compile-time constant */ + ALL_PERF_TESTS_FOR_SIZE(63U); + ALL_PERF_TESTS_FOR_SIZE(64U); + ALL_PERF_TESTS_FOR_SIZE(65U); + ALL_PERF_TESTS_FOR_SIZE(255U); + ALL_PERF_TESTS_FOR_SIZE(256U); + ALL_PERF_TESTS_FOR_SIZE(257U); + ALL_PERF_TESTS_FOR_SIZE(1023U); + ALL_PERF_TESTS_FOR_SIZE(1024U); + ALL_PERF_TESTS_FOR_SIZE(1025U); + ALL_PERF_TESTS_FOR_SIZE(1518U); +#endif + printf("\n======= ============== ============== ============== ==============\n\n"); + + free_buffers(); + + return 0; +} + +/* Structure with base memcpy func pointer, and number of bytes it copies */ +struct base_memcpy_func { + void (*func)(uint8_t *dst, const uint8_t *src); + unsigned size; +}; + +/* To create base_memcpy_func structure entries */ +#define BASE_FUNC(n) {rte_mov##n, n} + +/* Max number of bytes that can be copies with a "base" memcpy functions */ +#define MAX_BASE_FUNC_SIZE 256 + +/* + * Test the "base" memcpy functions, that a copy fixed number of bytes. + */ +static int +base_func_test(void) +{ + const struct base_memcpy_func base_memcpy_funcs[6] = { + BASE_FUNC(16), + BASE_FUNC(32), + BASE_FUNC(48), + BASE_FUNC(64), + BASE_FUNC(128), + BASE_FUNC(256), + }; + unsigned i, j; + unsigned num_funcs = sizeof(base_memcpy_funcs) / sizeof(base_memcpy_funcs[0]); + uint8_t dst[MAX_BASE_FUNC_SIZE]; + uint8_t src[MAX_BASE_FUNC_SIZE]; + + for (i = 0; i < num_funcs; i++) { + unsigned size = base_memcpy_funcs[i].size; + for (j = 0; j < size; j++) { + dst[j] = 0; + src[j] = (uint8_t) rte_rand(); + } + base_memcpy_funcs[i].func(dst, src); + for (j = 0; j < size; j++) + if (dst[j] != src[j]) + return -1; + } + + return 0; +} + +/* + * Create two buffers, and initialise one with random values. These are copied + * to the second buffer and then compared to see if the copy was successful. + * The bytes outside the copied area are also checked to make sure they were not + * changed. + */ +static int +test_single_memcpy(unsigned int off_src, unsigned int off_dst, size_t size) +{ + unsigned int i; + uint8_t dest[SMALL_BUFFER_SIZE + ALIGNMENT_UNIT]; + uint8_t src[SMALL_BUFFER_SIZE + ALIGNMENT_UNIT]; + + /* Setup buffers */ + for (i = 0; i < SMALL_BUFFER_SIZE + ALIGNMENT_UNIT; i++) { + dest[i] = 0; + src[i] = (uint8_t) rte_rand(); + } + + /* Do the copy */ + rte_memcpy(dest + off_dst, src + off_src, size); + + /* Check nothing before offset is affected */ + for (i = 0; i < off_dst; i++) { + if (dest[i] != 0) { + printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): " + "[modified before start of dst].\n", + (unsigned)size, off_src, off_dst); + return -1; + } + } + + /* Check everything was copied */ + for (i = 0; i < size; i++) { + if (dest[i + off_dst] != src[i + off_src]) { + printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): " + "[didn't copy byte %u].\n", + (unsigned)size, off_src, off_dst, i); + return -1; + } + } + + /* Check nothing after copy was affected */ + for (i = size; i < SMALL_BUFFER_SIZE; i++) { + if (dest[i + off_dst] != 0) { + printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): " + "[copied too many].\n", + (unsigned)size, off_src, off_dst); + return -1; + } + } + return 0; +} + +/* + * Check functionality for various buffer sizes and data offsets/alignments. + */ +static int +func_test(void) +{ + unsigned int off_src, off_dst, i; + unsigned int num_buf_sizes = sizeof(buf_sizes) / sizeof(buf_sizes[0]); + int ret; + + for (off_src = 0; off_src < ALIGNMENT_UNIT; off_src++) { + for (off_dst = 0; off_dst < ALIGNMENT_UNIT; off_dst++) { + for (i = 0; i < num_buf_sizes; i++) { + ret = test_single_memcpy(off_src, off_dst, + buf_sizes[i]); + if (ret != 0) + return -1; + } + } + } + return 0; +} + +int +test_memcpy(void) +{ + int ret; + + ret = func_test(); + if (ret != 0) + return -1; + ret = base_func_test(); + if (ret != 0) + return -1; + ret = perf_test(); + if (ret != 0) + return -1; + return 0; +} diff --git a/app/test/test_memory.c b/app/test/test_memory.c new file mode 100644 index 0000000000..8a25eca29d --- /dev/null +++ b/app/test/test_memory.c @@ -0,0 +1,92 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include + +#include + +#include +#include + +#include "test.h" + +/* + * Memory + * ====== + * + * - Dump the mapped memory. The python-expect script checks that at + * least one line is dumped. + * + * - Check that memory size is different than 0. + * + * - Try to read all memory; it should not segfault. + */ + +int +test_memory(void) +{ + uint64_t s; + unsigned i, j; + const struct rte_memseg *mem; + volatile uint8_t x; + + /* + * dump the mapped memory: the python-expect script checks + * that at least one line is dumped + */ + printf("Dump memory layout\n"); + rte_dump_physmem_layout(); + + /* check that memory size is != 0 */ + s = rte_eal_get_physmem_size(); + if (s == 0) { + printf("No memory detected\n"); + return -1; + } + + /* try to read memory (should not segfault) */ + mem = rte_eal_get_physmem_layout(); + for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) { + + /* check memory */ + for (j = 0; j +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "test.h" + +/* + * Mempool + * ======= + * + * #. Basic tests: done on one core with and without cache: + * + * - Get one object, put one object + * - Get two objects, put two objects + * - Get all objects, test that their content is not modified and + * put them back in the pool. + * + * #. Performance tests: + * + * Each core get *n_keep* objects per bulk of *n_get_bulk*. Then, + * objects are put back in the pool per bulk of *n_put_bulk*. + * + * This sequence is done during TIME_S seconds. + * + * This test is done on the following configurations: + * + * - Cores configuration (*cores*) + * + * - One core with cache + * - Two cores with cache + * - Max. cores with cache + * - One core without cache + * - Two cores without cache + * - Max. cores without cache + * + * - Bulk size (*n_get_bulk*, *n_put_bulk*) + * + * - Bulk get from 1 to 32 + * - Bulk put from 1 to 32 + * + * - Number of kept objects (*n_keep*) + * + * - 32 + * - 128 + */ + +#define N 65536 +#define TIME_S 5 +#define MEMPOOL_ELT_SIZE 2048 +#define MAX_KEEP 128 +#define MEMPOOL_SIZE ((RTE_MAX_LCORE*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1) + +static struct rte_mempool *mp; +static struct rte_mempool *mp_cache, *mp_nocache; + +static rte_atomic32_t synchro; + +/* number of objects in one bulk operation (get or put) */ +static unsigned n_get_bulk; +static unsigned n_put_bulk; + +/* number of objects retrived from mempool before putting them back */ +static unsigned n_keep; + +/* number of enqueues / dequeues */ +struct mempool_test_stats { + unsigned enq_count; +} __rte_cache_aligned; + +static struct mempool_test_stats stats[RTE_MAX_LCORE]; + +static int +per_lcore_mempool_test(__attribute__((unused)) void *arg) +{ + void *obj_table[MAX_KEEP]; + unsigned i, idx; + unsigned lcore_id = rte_lcore_id(); + int ret; + uint64_t start_cycles, end_cycles; + uint64_t time_diff = 0, hz = rte_get_hpet_hz(); + + /* n_get_bulk and n_put_bulk must be divisors of n_keep */ + if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep) + return -1; + if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep) + return -1; + + stats[lcore_id].enq_count = 0; + + /* wait synchro for slaves */ + if (lcore_id != rte_get_master_lcore()) + while (rte_atomic32_read(&synchro) == 0); + + start_cycles = rte_get_hpet_cycles(); + + while (time_diff/hz < TIME_S) { + for (i = 0; likely(i < (N/n_keep)); i++) { + /* get n_keep objects by bulk of n_bulk */ + idx = 0; + while (idx < n_keep) { + ret = rte_mempool_get_bulk(mp, &obj_table[idx], + n_get_bulk); + if (unlikely(ret < 0)) { + rte_mempool_dump(mp); + rte_ring_dump(mp->ring); + /* in this case, objects are lost... */ + return -1; + } + idx += n_get_bulk; + } + + /* put the objects back */ + idx = 0; + while (idx < n_keep) { + rte_mempool_put_bulk(mp, &obj_table[idx], + n_put_bulk); + idx += n_put_bulk; + } + } + end_cycles = rte_get_hpet_cycles(); + time_diff = end_cycles - start_cycles; + stats[lcore_id].enq_count += N; + } + + return 0; +} + +/* launch all the per-lcore test, and display the result */ +static int +launch_cores(unsigned cores) +{ + unsigned lcore_id; + unsigned rate; + int ret; + unsigned cores_save = cores; + + rte_atomic32_set(&synchro, 0); + + /* reset stats */ + memset(stats, 0, sizeof(stats)); + + printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u " + "n_put_bulk=%u n_keep=%u ", + (unsigned) mp->cache_size, cores, n_get_bulk, n_put_bulk, n_keep); + + if (rte_mempool_count(mp) != MEMPOOL_SIZE) { + printf("mempool is not full\n"); + return -1; + } + + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (cores == 1) + break; + cores--; + rte_eal_remote_launch(per_lcore_mempool_test, + NULL, lcore_id); + } + + /* start synchro and launch test on master */ + rte_atomic32_set(&synchro, 1); + + ret = per_lcore_mempool_test(NULL); + + cores = cores_save; + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (cores == 1) + break; + cores--; + if (rte_eal_wait_lcore(lcore_id) < 0) + ret = -1; + } + + if (ret < 0) { + printf("per-lcore test returned -1\n"); + return -1; + } + + rate = 0; + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) + rate += (stats[lcore_id].enq_count / TIME_S); + + printf("rate_persec=%u\n", rate); + + return 0; +} + +/* for a given number of core, launch all test cases */ +static int +do_one_mempool_test(unsigned cores) +{ + unsigned bulk_tab_get[] = { 1, 4, 32, 0 }; + unsigned bulk_tab_put[] = { 1, 4, 32, 0 }; + unsigned keep_tab[] = { 32, 128, 0 }; + unsigned *get_bulk_ptr; + unsigned *put_bulk_ptr; + unsigned *keep_ptr; + int ret; + + for (get_bulk_ptr = bulk_tab_get; *get_bulk_ptr; get_bulk_ptr++) { + for (put_bulk_ptr = bulk_tab_put; *put_bulk_ptr; put_bulk_ptr++) { + for (keep_ptr = keep_tab; *keep_ptr; keep_ptr++) { + + n_get_bulk = *get_bulk_ptr; + n_put_bulk = *put_bulk_ptr; + n_keep = *keep_ptr; + ret = launch_cores(cores); + + if (ret < 0) + return -1; + } + } + } + return 0; +} + + +/* + * save the object number in the first 4 bytes of object data. All + * other bytes are set to 0. + */ +static void +my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg, + void *obj, unsigned i) +{ + uint32_t *objnum = obj; + memset(obj, 0, mp->elt_size); + *objnum = i; +} + +/* basic tests (done on one core) */ +static int +test_mempool_basic(void) +{ + uint32_t *objnum; + void **objtable; + void *obj, *obj2; + char *obj_data; + int ret = 0; + unsigned i, j; + unsigned old_bulk_count; + + /* dump the mempool status */ + rte_mempool_dump(mp); + old_bulk_count = rte_mempool_get_bulk_count(mp); + rte_mempool_dump(mp); + if (rte_mempool_set_bulk_count(mp, 0) == 0) + return -1; + if (rte_mempool_get_bulk_count(mp) == 0) + return -1; + if (rte_mempool_set_bulk_count(mp, 2) < 0) + return -1; + if (rte_mempool_get_bulk_count(mp) != 2) + return -1; + rte_mempool_dump(mp); + if (rte_mempool_set_bulk_count(mp, old_bulk_count) < 0) + return -1; + if (rte_mempool_get_bulk_count(mp) != old_bulk_count) + return -1; + rte_mempool_dump(mp); + + printf("get an object\n"); + if (rte_mempool_get(mp, &obj) < 0) + return -1; + rte_mempool_dump(mp); + + printf("put the object back\n"); + rte_mempool_put(mp, obj); + rte_mempool_dump(mp); + + printf("get 2 objects\n"); + if (rte_mempool_get(mp, &obj) < 0) + return -1; + if (rte_mempool_get(mp, &obj2) < 0) { + rte_mempool_put(mp, obj); + return -1; + } + rte_mempool_dump(mp); + + printf("put the objects back\n"); + rte_mempool_put(mp, obj); + rte_mempool_put(mp, obj2); + rte_mempool_dump(mp); + + /* + * get many objects: we cannot get them all because the cache + * on other cores may not be empty. + */ + objtable = malloc(MEMPOOL_SIZE * sizeof(void *)); + if (objtable == NULL) { + return -1; + } + + for (i=0; i MEMPOOL_SIZE) { + printf("bad object number\n"); + ret = -1; + break; + } + for (j=sizeof(*objnum); jelt_size; j++) { + if (obj_data[j] != 0) + ret = -1; + } + + rte_mempool_put(mp, objtable[i]); + } + + free(objtable); + if (ret == -1) + printf("objects were modified!\n"); + + return ret; +} + +static int test_mempool_creation_with_exceeded_cache_size(void) +{ + struct rte_mempool *mp_cov; + + mp_cov = rte_mempool_create("test_mempool_creation_with_exceeded_cache_size", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0, + NULL, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, 0); + if(NULL != mp_cov) { + return -1; + } + + return 0; +} + +static struct rte_mempool *mp_spsc; +static rte_spinlock_t scsp_spinlock; +static void *scsp_obj_table[MAX_KEEP]; + +/* + * single producer function + */ +static int test_mempool_single_producer(void) +{ + unsigned int i; + void *obj = NULL; + uint64_t start_cycles, end_cycles; + uint64_t duration = rte_get_hpet_hz() * 8; + + start_cycles = rte_get_hpet_cycles(); + while (1) { + end_cycles = rte_get_hpet_cycles(); + /* duration uses up, stop producing */ + if (start_cycles + duration < end_cycles) + break; + rte_spinlock_lock(&scsp_spinlock); + for (i = 0; i < MAX_KEEP; i ++) { + if (NULL != scsp_obj_table[i]) + obj = scsp_obj_table[i]; + break; + } + rte_spinlock_unlock(&scsp_spinlock); + if (i >= MAX_KEEP) { + continue; + } + if (rte_mempool_from_obj(obj) != mp_spsc) { + printf("test_mempool_single_producer there is an obj not owned by this mempool\n"); + return -1; + } + rte_mempool_sp_put(mp_spsc, obj); + rte_spinlock_lock(&scsp_spinlock); + scsp_obj_table[i] = NULL; + rte_spinlock_unlock(&scsp_spinlock); + } + + return 0; +} + +/* + * single consumer function + */ +static int test_mempool_single_consumer(void) +{ + unsigned int i; + void * obj; + uint64_t start_cycles, end_cycles; + uint64_t duration = rte_get_hpet_hz() * 5; + + start_cycles = rte_get_hpet_cycles(); + while (1) { + end_cycles = rte_get_hpet_cycles(); + /* duration uses up, stop consuming */ + if (start_cycles + duration < end_cycles) + break; + rte_spinlock_lock(&scsp_spinlock); + for (i = 0; i < MAX_KEEP; i ++) { + if (NULL == scsp_obj_table[i]) + break; + } + rte_spinlock_unlock(&scsp_spinlock); + if (i >= MAX_KEEP) + continue; + if (rte_mempool_sc_get(mp_spsc, &obj) < 0) + break; + rte_spinlock_lock(&scsp_spinlock); + scsp_obj_table[i] = obj; + rte_spinlock_unlock(&scsp_spinlock); + } + + return 0; +} + +/* + * test function for mempool test based on singple consumer and single producer, can run on one lcore only + */ +static int test_mempool_launch_single_consumer(__attribute__((unused)) void *arg) +{ + return test_mempool_single_consumer(); +} + +static void my_mp_init(struct rte_mempool * mp, __attribute__((unused)) void * arg) +{ + printf("mempool name is %s\n", mp->name); + /* nothing to be implemented here*/ + return ; +} + +/* + * it tests the mempool operations based on singple producer and single consumer + */ +static int +test_mempool_sp_sc(void) +{ + int ret = 0; + unsigned lcore_id = rte_lcore_id(); + unsigned lcore_next; + + /* create a mempool with single producer/consumer ring */ + if (NULL == mp_spsc) { + mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, 0, 0, + my_mp_init, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET); + if (NULL == mp_spsc) { + return -1; + } + } + if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) { + printf("Cannot lookup mempool from its name\n"); + return -1; + } + lcore_next = rte_get_next_lcore(lcore_id, 0, 1); + if (RTE_MAX_LCORE <= lcore_next) + return -1; + if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) + return -1; + rte_spinlock_init(&scsp_spinlock); + memset(scsp_obj_table, 0, sizeof(scsp_obj_table)); + rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL, lcore_next); + if(test_mempool_single_producer() < 0) + ret = -1; + + if(rte_eal_wait_lcore(lcore_next) < 0) + ret = -1; + + return ret; +} + +/* + * it tests some more basic of mempool + */ +static int +test_mempool_basic_ex(struct rte_mempool * mp) +{ + unsigned i; + void **obj; + void *err_obj; + int ret = -1; + + if (mp == NULL) + return ret; + + obj = (void **)rte_zmalloc("test_mempool_basic_ex", (MEMPOOL_SIZE * sizeof(void *)), 0); + if (obj == NULL) { + printf("test_mempool_basic_ex fail to rte_malloc\n"); + return ret; + } + printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n", mp->name, rte_mempool_free_count(mp)); + if (rte_mempool_full(mp) != 1) { + printf("test_mempool_basic_ex the mempool is not full but it should be\n"); + goto fail_mp_basic_ex; + } + + for (i = 0; i < MEMPOOL_SIZE; i ++) { + if (rte_mempool_mc_get(mp, &obj[i]) < 0) { + printf("fail_mp_basic_ex fail to get mempool object for [%u]\n", i); + goto fail_mp_basic_ex; + } + } + if (rte_mempool_mc_get(mp, &err_obj) == 0) { + printf("test_mempool_basic_ex get an impossible obj from mempool\n"); + goto fail_mp_basic_ex; + } + printf("number: %u\n", i); + if (rte_mempool_empty(mp) != 1) { + printf("test_mempool_basic_ex the mempool is not empty but it should be\n"); + goto fail_mp_basic_ex; + } + + for (i = 0; i < MEMPOOL_SIZE; i ++) { + rte_mempool_mp_put(mp, obj[i]); + } + if (rte_mempool_full(mp) != 1) { + printf("test_mempool_basic_ex the mempool is not full but it should be\n"); + goto fail_mp_basic_ex; + } + + ret = 0; + +fail_mp_basic_ex: + if (obj != NULL) + rte_free((void *)obj); + + return ret; +} + +static int +test_mempool_same_name_twice_creation(void) +{ + struct rte_mempool *mp_tc; + + mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, 0, 0, + NULL, NULL, + NULL, NULL, + SOCKET_ID_ANY, 0); + if (NULL == mp_tc) + return -1; + + mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, 0, 0, + NULL, NULL, + NULL, NULL, + SOCKET_ID_ANY, 0); + if (NULL != mp_tc) + return -1; + + return 0; +} + +int +test_mempool(void) +{ + rte_atomic32_init(&synchro); + + /* create a mempool (without cache) */ + if (mp_nocache == NULL) + mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, 0, 0, + NULL, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, 0); + if (mp_nocache == NULL) + return -1; + + /* create a mempool (with cache) */ + if (mp_cache == NULL) + mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE, + MEMPOOL_ELT_SIZE, + RTE_MEMPOOL_CACHE_MAX_SIZE, 0, + NULL, NULL, + my_obj_init, NULL, + SOCKET_ID_ANY, 0); + if (mp_cache == NULL) + return -1; + + + /* retrieve the mempool from its name */ + if (rte_mempool_lookup("test_nocache") != mp_nocache) { + printf("Cannot lookup mempool from its name\n"); + return -1; + } + + rte_mempool_list_dump(); + + /* basic tests without cache */ + mp = mp_nocache; + if (test_mempool_basic() < 0) + return -1; + + /* basic tests with cache */ + mp = mp_cache; + if (test_mempool_basic() < 0) + return -1; + + /* more basic tests without cache */ + if (test_mempool_basic_ex(mp_nocache) < 0) + return -1; + + /* performance test with 1, 2 and max cores */ + printf("start performance test (without cache)\n"); + mp = mp_nocache; + + if (do_one_mempool_test(1) < 0) + return -1; + + if (do_one_mempool_test(2) < 0) + return -1; + + if (do_one_mempool_test(rte_lcore_count()) < 0) + return -1; + + /* performance test with 1, 2 and max cores */ + printf("start performance test (with cache)\n"); + mp = mp_cache; + + if (do_one_mempool_test(1) < 0) + return -1; + + if (do_one_mempool_test(2) < 0) + return -1; + + if (do_one_mempool_test(rte_lcore_count()) < 0) + return -1; + + /* mempool operation test based on single producer and single comsumer */ + if (test_mempool_sp_sc() < 0) + return -1; + + if (test_mempool_creation_with_exceeded_cache_size() < 0) + return -1; + + if (test_mempool_same_name_twice_creation() < 0) + return -1; + + rte_mempool_list_dump(); + + return 0; +} diff --git a/app/test/test_memzone.c b/app/test/test_memzone.c new file mode 100644 index 0000000000..dd6621129f --- /dev/null +++ b/app/test/test_memzone.c @@ -0,0 +1,639 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "test.h" + +/* + * Memzone + * ======= + * + * - Search for three reserved zones or reserve them if they do not exist: + * + * - One is on any socket id. + * - The second is on socket 0. + * - The last one is on socket 1 (if socket 1 exists). + * + * - Check that the zones exist. + * + * - Check that the zones are cache-aligned. + * + * - Check that zones do not overlap. + * + * - Check that the zones are on the correct socket id. + * + * - Check that a lookup of the first zone returns the same pointer. + * + * - Check that it is not possible to create another zone with the + * same name as an existing zone. + * + * - Check flags for specific huge page size reservation + */ + +/* Test if memory overlaps: return 1 if true, or 0 if false. */ +static int +is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2) +{ + if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1) + return 1; + else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2) + return 1; + return 0; +} + +static int +test_memzone_invalid_alignment(void) +{ + const struct rte_memzone * mz; + + mz = rte_memzone_lookup("invalid_alignment"); + if (mz != NULL) { + printf("Zone with invalid alignment has been reserved\n"); + return -1; + } + + mz = rte_memzone_reserve_aligned("invalid_alignment", 100, + SOCKET_ID_ANY, 0, 100); + if (mz != NULL) { + printf("Zone with invalid alignment has been reserved\n"); + return -1; + } + return 0; +} + +static int +test_memzone_reserving_zone_size_bigger_than_the_maximum(void) +{ + const struct rte_memzone * mz; + + mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum"); + if (mz != NULL) { + printf("zone_size_bigger_than_the_maximum has been reserved\n"); + return -1; + } + + mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", 0x1900000000ULL, + SOCKET_ID_ANY, 0); + if (mz != NULL) { + printf("It is impossible to reserve such big a memzone\n"); + return -1; + } + + return 0; +} + +static int +test_memzone_reserve_flags(void) +{ + const struct rte_memzone *mz; + const struct rte_memseg *ms; + int hugepage_2MB_avail = 0; + int hugepage_1GB_avail = 0; + const int size = 100; + int i = 0; + ms = rte_eal_get_physmem_layout(); + for (i = 0; i < RTE_MAX_MEMSEG; i++) { + if (ms[i].hugepage_sz == RTE_PGSIZE_2M) + hugepage_2MB_avail = 1; + if (ms[i].hugepage_sz == RTE_PGSIZE_1G) + hugepage_1GB_avail = 1; + } + /* Display the availability of 2MB and 1GB pages */ + if (hugepage_2MB_avail) + printf("2MB Huge pages available\n"); + if (hugepage_1GB_avail) + printf("1GB Huge pages available\n"); + /* + * If 2MB pages available, check that a small memzone is correctly + * reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag. + * Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an + * available page size (i.e 1GB ) when 2MB pages are unavailable. + */ + if (hugepage_2MB_avail) { + mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY, + RTE_MEMZONE_2MB); + if (mz == NULL) { + printf("MEMZONE FLAG 2MB\n"); + return -1; + } + if (mz->hugepage_sz != RTE_PGSIZE_2M) { + printf("hugepage_sz not equal 2M\n"); + return -1; + } + + mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY, + RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) { + printf("MEMZONE FLAG 2MB\n"); + return -1; + } + if (mz->hugepage_sz != RTE_PGSIZE_2M) { + printf("hugepage_sz not equal 2M\n"); + return -1; + } + + /* Check if 1GB huge pages are unavailable, that function fails unless + * HINT flag is indicated + */ + if (!hugepage_1GB_avail) { + mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY, + RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) { + printf("MEMZONE FLAG 1GB & HINT\n"); + return -1; + } + if (mz->hugepage_sz != RTE_PGSIZE_2M) { + printf("hugepage_sz not equal 2M\n"); + return -1; + } + + mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY, + RTE_MEMZONE_1GB); + if (mz != NULL) { + printf("MEMZONE FLAG 1GB\n"); + return -1; + } + } + } + + /*As with 2MB tests above for 1GB huge page requests*/ + if (hugepage_1GB_avail) { + mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY, + RTE_MEMZONE_1GB); + if (mz == NULL) { + printf("MEMZONE FLAG 1GB\n"); + return -1; + } + if (mz->hugepage_sz != RTE_PGSIZE_1G) { + printf("hugepage_sz not equal 1G\n"); + return -1; + } + + mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY, + RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) { + printf("MEMZONE FLAG 1GB\n"); + return -1; + } + if (mz->hugepage_sz != RTE_PGSIZE_1G) { + printf("hugepage_sz not equal 1G\n"); + return -1; + } + + /* Check if 1GB huge pages are unavailable, that function fails unless + * HINT flag is indicated + */ + if (!hugepage_2MB_avail) { + mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY, + RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL){ + printf("MEMZONE FLAG 2MB & HINT\n"); + return -1; + } + if (mz->hugepage_sz != RTE_PGSIZE_1G) { + printf("hugepage_sz not equal 1G\n"); + return -1; + } + mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY, + RTE_MEMZONE_2MB); + if (mz != NULL) { + printf("MEMZONE FLAG 2MB\n"); + return -1; + } + } + + if (hugepage_2MB_avail && hugepage_1GB_avail) { + mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY, + RTE_MEMZONE_2MB|RTE_MEMZONE_1GB); + if (mz != NULL) { + printf("BOTH SIZES SET\n"); + return -1; + } + } + } + return 0; +} + +static int +test_memzone_reserve_max(void) +{ + const struct rte_memzone *mz; + const struct rte_config *config; + const struct rte_memseg *ms; + int memseg_idx = 0; + int memzone_idx = 0; + uint64_t len = 0; + void* last_addr; + uint64_t maxlen = 0; + + /* get pointer to global configuration */ + config = rte_eal_get_configuration(); + + ms = rte_eal_get_physmem_layout(); + + for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){ + /* ignore smaller memsegs as they can only get smaller */ + if (ms[memseg_idx].len < maxlen) + continue; + + len = ms[memseg_idx].len; + last_addr = ms[memseg_idx].addr; + + /* cycle through all memzones */ + for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) { + + /* stop when reaching last allocated memzone */ + if (config->mem_config->memzone[memzone_idx].addr == NULL) + break; + + /* check if the memzone is in our memseg and subtract length */ + if ((config->mem_config->memzone[memzone_idx].addr >= + ms[memseg_idx].addr) && + (config->mem_config->memzone[memzone_idx].addr <= + (RTE_PTR_ADD(ms[memseg_idx].addr, + (size_t)ms[memseg_idx].len)))) { + /* since the zones can now be aligned and occasionally skip + * some space, we should calculate the length based on + * reported length and start addresses difference. Addresses + * are allocated sequentially so we don't need to worry about + * them being in the right order. + */ + len -= (uintptr_t) RTE_PTR_SUB( + config->mem_config->memzone[memzone_idx].addr, + (uintptr_t) last_addr); + len -= config->mem_config->memzone[memzone_idx].len; + last_addr = + RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr, + (size_t) config->mem_config->memzone[memzone_idx].len); + } + } + + /* we don't need to calculate offset here since length + * is always cache-aligned */ + if (len > maxlen) + maxlen = len; + } + + mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0); + if (mz == NULL){ + printf("Failed to reserve a big chunk of memory\n"); + rte_dump_physmem_layout(); + rte_memzone_dump(); + return -1; + } + + if (mz->len != maxlen) { + printf("Memzone reserve with 0 size did not return bigest block\n"); + printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n", + maxlen, mz->len); + rte_dump_physmem_layout(); + rte_memzone_dump(); + + return -1; + } + return 0; +} + +static int +test_memzone_reserve_max_aligned(void) +{ + const struct rte_memzone *mz; + const struct rte_config *config; + const struct rte_memseg *ms; + int memseg_idx = 0; + int memzone_idx = 0; + uint64_t addr_offset, len = 0; + void* last_addr; + uint64_t maxlen = 0; + + /* get pointer to global configuration */ + config = rte_eal_get_configuration(); + + ms = rte_eal_get_physmem_layout(); + + addr_offset = 0; + + for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){ + + /* ignore smaller memsegs as they can only get smaller */ + if (ms[memseg_idx].len < maxlen) + continue; + + len = ms[memseg_idx].len; + last_addr = ms[memseg_idx].addr; + + /* cycle through all memzones */ + for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) { + + /* stop when reaching last allocated memzone */ + if (config->mem_config->memzone[memzone_idx].addr == NULL) + break; + + /* check if the memzone is in our memseg and subtract length */ + if ((config->mem_config->memzone[memzone_idx].addr >= + ms[memseg_idx].addr) && + (config->mem_config->memzone[memzone_idx].addr <= + (RTE_PTR_ADD(ms[memseg_idx].addr, + (size_t) ms[memseg_idx].len)))) { + /* since the zones can now be aligned and occasionally skip + * some space, we should calculate the length based on + * reported length and start addresses difference. + */ + len -= (uintptr_t) RTE_PTR_SUB( + config->mem_config->memzone[memzone_idx].addr, + (uintptr_t) last_addr); + len -= config->mem_config->memzone[memzone_idx].len; + last_addr = + RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr, + (size_t) config->mem_config->memzone[memzone_idx].len); + } + } + + /* make sure we get the alignment offset */ + if (len > maxlen) { + addr_offset = RTE_ALIGN_CEIL((uintptr_t) last_addr, 512) - (uintptr_t) last_addr; + maxlen = len; + } + } + + maxlen -= addr_offset; + + mz = rte_memzone_reserve_aligned("max_zone_aligned", 0, + SOCKET_ID_ANY, 0, 512); + if (mz == NULL){ + printf("Failed to reserve a big chunk of memory\n"); + rte_dump_physmem_layout(); + rte_memzone_dump(); + return -1; + } + + if (mz->len != maxlen) { + printf("Memzone reserve with 0 size and alignment 512 did not return" + " bigest block\n"); + printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n", + maxlen, mz->len); + rte_dump_physmem_layout(); + rte_memzone_dump(); + + return -1; + } + return 0; +} + +static int +test_memzone_aligned(void) +{ + const struct rte_memzone *memzone_aligned_32; + const struct rte_memzone *memzone_aligned_128; + const struct rte_memzone *memzone_aligned_256; + const struct rte_memzone *memzone_aligned_512; + const struct rte_memzone *memzone_aligned_1024; + + /* memzone that should automatically be adjusted to align on 64 bytes */ + memzone_aligned_32 = rte_memzone_lookup("aligned_32"); + if (memzone_aligned_32 == NULL) + memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100, + SOCKET_ID_ANY, 0, 32); + + /* memzone that is supposed to be aligned on a 128 byte boundary */ + memzone_aligned_128 = rte_memzone_lookup("aligned_128"); + if (memzone_aligned_128 == NULL) + memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100, + SOCKET_ID_ANY, 0, 128); + + /* memzone that is supposed to be aligned on a 256 byte boundary */ + memzone_aligned_256 = rte_memzone_lookup("aligned_256"); + if (memzone_aligned_256 == NULL) + memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100, + SOCKET_ID_ANY, 0, 256); + + /* memzone that is supposed to be aligned on a 512 byte boundary */ + memzone_aligned_512 = rte_memzone_lookup("aligned_512"); + if (memzone_aligned_512 == NULL) + memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100, + SOCKET_ID_ANY, 0, 512); + + /* memzone that is supposed to be aligned on a 1024 byte boundary */ + memzone_aligned_1024 = rte_memzone_lookup("aligned_1024"); + if (memzone_aligned_1024 == NULL) + memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100, + SOCKET_ID_ANY, 0, 1024); + + printf("check alignments and lengths\n"); + if ((memzone_aligned_32->phys_addr & CACHE_LINE_MASK) != 0) + return -1; + if (((uintptr_t) memzone_aligned_32->addr & CACHE_LINE_MASK) != 0) + return -1; + if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0) + return -1; + if ((memzone_aligned_128->phys_addr & 127) != 0) + return -1; + if (((uintptr_t) memzone_aligned_128->addr & 127) != 0) + return -1; + if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0) + return -1; + if ((memzone_aligned_256->phys_addr & 255) != 0) + return -1; + if (((uintptr_t) memzone_aligned_256->addr & 255) != 0) + return -1; + if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0) + return -1; + if ((memzone_aligned_512->phys_addr & 511) != 0) + return -1; + if (((uintptr_t) memzone_aligned_512->addr & 511) != 0) + return -1; + if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0) + return -1; + if ((memzone_aligned_1024->phys_addr & 1023) != 0) + return -1; + if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0) + return -1; + if ((memzone_aligned_1024->len & CACHE_LINE_MASK) != 0) + return -1; + + + /* check that zones don't overlap */ + printf("check overlapping\n"); + if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len, + memzone_aligned_128->phys_addr, memzone_aligned_128->len)) + return -1; + if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len, + memzone_aligned_256->phys_addr, memzone_aligned_256->len)) + return -1; + if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len, + memzone_aligned_512->phys_addr, memzone_aligned_512->len)) + return -1; + if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len, + memzone_aligned_1024->phys_addr, memzone_aligned_1024->len)) + return -1; + if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len, + memzone_aligned_256->phys_addr, memzone_aligned_256->len)) + return -1; + if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len, + memzone_aligned_512->phys_addr, memzone_aligned_512->len)) + return -1; + if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len, + memzone_aligned_1024->phys_addr, memzone_aligned_1024->len)) + return -1; + if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len, + memzone_aligned_512->phys_addr, memzone_aligned_512->len)) + return -1; + if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len, + memzone_aligned_1024->phys_addr, memzone_aligned_1024->len)) + return -1; + if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len, + memzone_aligned_1024->phys_addr, memzone_aligned_1024->len)) + return -1; + return 0; +} + +int +test_memzone(void) +{ + const struct rte_memzone *memzone1; + const struct rte_memzone *memzone2; + const struct rte_memzone *memzone3; + const struct rte_memzone *mz; + + memzone1 = rte_memzone_lookup("testzone1"); + if (memzone1 == NULL) + memzone1 = rte_memzone_reserve("testzone1", 100, + SOCKET_ID_ANY, 0); + + memzone2 = rte_memzone_lookup("testzone2"); + if (memzone2 == NULL) + memzone2 = rte_memzone_reserve("testzone2", 1000, + 0, 0); + + memzone3 = rte_memzone_lookup("testzone3"); + if (memzone3 == NULL) + memzone3 = rte_memzone_reserve("testzone3", 1000, + 1, 0); + + /* memzone3 may be NULL if we don't have NUMA */ + if (memzone1 == NULL || memzone2 == NULL) + return -1; + + rte_memzone_dump(); + + /* check cache-line alignments */ + printf("check alignments and lengths\n"); + + if ((memzone1->phys_addr & CACHE_LINE_MASK) != 0) + return -1; + if ((memzone2->phys_addr & CACHE_LINE_MASK) != 0) + return -1; + if (memzone3 != NULL && (memzone3->phys_addr & CACHE_LINE_MASK) != 0) + return -1; + if ((memzone1->len & CACHE_LINE_MASK) != 0 || memzone1->len == 0) + return -1; + if ((memzone2->len & CACHE_LINE_MASK) != 0 || memzone2->len == 0) + return -1; + if (memzone3 != NULL && ((memzone3->len & CACHE_LINE_MASK) != 0 || + memzone3->len == 0)) + return -1; + + /* check that zones don't overlap */ + printf("check overlapping\n"); + + if (is_memory_overlap(memzone1->phys_addr, memzone1->len, + memzone2->phys_addr, memzone2->len)) + return -1; + if (memzone3 != NULL && + is_memory_overlap(memzone1->phys_addr, memzone1->len, + memzone3->phys_addr, memzone3->len)) + return -1; + if (memzone3 != NULL && + is_memory_overlap(memzone2->phys_addr, memzone2->len, + memzone3->phys_addr, memzone3->len)) + return -1; + + printf("check socket ID\n"); + + /* memzone2 must be on socket id 0 and memzone3 on socket 1 */ + if (memzone2->socket_id != 0) + return -1; + if (memzone3 != NULL && memzone3->socket_id != 1) + return -1; + + printf("test zone lookup\n"); + mz = rte_memzone_lookup("testzone1"); + if (mz != memzone1) + return -1; + + printf("test duplcate zone name\n"); + mz = rte_memzone_reserve("testzone1", 100, + SOCKET_ID_ANY, 0); + if (mz != NULL) + return -1; + + printf("test reserving memzone with bigger size than the maximum\n"); + if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0) + return -1; + + printf("test reserving the largest size memzone possible\n"); + if (test_memzone_reserve_max() < 0) + return -1; + + printf("test memzone_reserve flags\n"); + if (test_memzone_reserve_flags() < 0) + return -1; + + printf("test alignment for memzone_reserve\n"); + if (test_memzone_aligned() < 0) + return -1; + + printf("test invalid alignment for memzone_reserve\n"); + if (test_memzone_invalid_alignment() < 0) + return -1; + + printf("test reserving the largest size aligned memzone possible\n"); + if (test_memzone_reserve_max_aligned() < 0) + return -1; + + return 0; +} diff --git a/app/test/test_mp_secondary.c b/app/test/test_mp_secondary.c new file mode 100644 index 0000000000..c40a508cad --- /dev/null +++ b/app/test/test_mp_secondary.c @@ -0,0 +1,236 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include + +#include "test.h" + +#ifndef RTE_EXEC_ENV_BAREMETAL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "process.h" + +#define launch_proc(ARGV) process_dup(ARGV, \ + sizeof(ARGV)/(sizeof(ARGV[0])), __func__) + +/* + * This function is called in the primary i.e. main test, to spawn off secondary + * processes to run actual mp tests. Uses fork() and exec pair + */ +static int +run_secondary_instances(void) +{ + int ret = 0; + char coremask[10]; + + /* good case, using secondary */ + const char *argv1[] = { + prgname, "-c", coremask, "--proc-type=secondary" + }; + /* good case, using auto */ + const char *argv2[] = { + prgname, "-c", coremask, "--proc-type=auto" + }; + /* bad case, using invalid type */ + const char *argv3[] = { + prgname, "-c", coremask, "--proc-type=ERROR" + }; + /* bad case, using invalid file prefix */ + const char *argv4[] = { + prgname, "-c", coremask, "--proc-type=secondary", + "--file-prefix=ERROR" + }; + + rte_snprintf(coremask, sizeof(coremask), "%x", \ + (1 << rte_get_master_lcore())); + + ret |= launch_proc(argv1); + ret |= launch_proc(argv2); + + ret |= !(launch_proc(argv3)); + ret |= !(launch_proc(argv4)); + + return ret; +} + +/* + * This function is run in the secondary instance to test that creation of + * objects fails in a secondary + */ +static int +run_object_creation_tests(void) +{ + const unsigned flags = 0; + const unsigned size = 1024; + const unsigned elt_size = 64; + const unsigned cache_size = 64; + const unsigned priv_data_size = 32; + + printf("### Testing object creation - expect lots of mz reserve errors!\n"); + + rte_errno = 0; + if (rte_memzone_reserve("test_mz", size, rte_socket_id(), flags) != NULL + || rte_errno != E_RTE_SECONDARY){ + printf("Error: unexpected return value from rte_memzone_reserve\n"); + return -1; + } + printf("# Checked rte_memzone_reserve() OK\n"); + + rte_errno = 0; + if (rte_ring_create("test_rng", size, rte_socket_id(), flags) != NULL + || rte_errno != E_RTE_SECONDARY){ + printf("Error: unexpected return value from rte_ring_create()\n"); + return -1; + } + printf("# Checked rte_ring_create() OK\n"); + + + rte_errno = 0; + if (rte_mempool_create("test_mp", size, elt_size, cache_size, + priv_data_size, NULL, NULL, NULL, NULL, + rte_socket_id(), flags) != NULL + || rte_errno != E_RTE_SECONDARY){ + printf("Error: unexpected return value from rte_ring_create()\n"); + return -1; + } + printf("# Checked rte_mempool_create() OK\n"); + + const struct rte_hash_parameters hash_params = { .name = "test_mp_hash" }; + rte_errno=0; + if (rte_hash_create(&hash_params) != NULL + || rte_errno != E_RTE_SECONDARY){ + printf("Error: unexpected return value from rte_ring_create()\n"); + return -1; + } + printf("# Checked rte_hash_create() OK\n"); + + const struct rte_fbk_hash_params fbk_params = { .name = "test_mp_hash" }; + rte_errno=0; + if (rte_fbk_hash_create(&fbk_params) != NULL + || rte_errno != E_RTE_SECONDARY){ + printf("Error: unexpected return value from rte_ring_create()\n"); + return -1; + } + printf("# Checked rte_fbk_hash_create() OK\n"); + + rte_errno=0; + if (rte_lpm_create("test_lpm", size, rte_socket_id(), RTE_LPM_HEAP) != NULL + || rte_errno != E_RTE_SECONDARY){ + printf("Error: unexpected return value from rte_ring_create()\n"); + return -1; + } + printf("# Checked rte_lpm_create() OK\n"); + + /* Run a test_pci call */ + if (test_pci() != 0) { + printf("PCI scan failed in secondary\n"); + if (getuid() == 0) /* pci scans can fail as non-root */ + return -1; + } else + printf("PCI scan succeeded in secondary\n"); + + return 0; +} + +/* if called in a primary process, just spawns off a secondary process to + * run validation tests - which brings us right back here again... + * if called in a secondary process, this runs a series of API tests to check + * how things run in a secondary instance. + */ +int +test_mp_secondary(void) +{ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (!test_pci_run) { + printf("=== Running pre-requisite test of test_pci\n"); + test_pci(); + printf("=== Requisite test done\n"); + } + return run_secondary_instances(); + } + + printf("IN SECONDARY PROCESS\n"); + + return run_object_creation_tests(); +} + +#else + +/* Baremetal version + * Multiprocess not applicable, so just return 0 always + */ +int +test_mp_secondary(void) +{ + printf("Multi-process not applicable for baremetal\n"); + return 0; +} + +#endif diff --git a/app/test/test_pci.c b/app/test/test_pci.c new file mode 100644 index 0000000000..1c0c4ed5a3 --- /dev/null +++ b/app/test/test_pci.c @@ -0,0 +1,192 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include + +#include +#include + +#include "test.h" + +#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, + +#define TEST_BLACKLIST_NUM 0x100 + +/* + * PCI test + * ======== + * + * - Register a driver with a ``devinit()`` function. + * + * - Dump all PCI devices. + * + * - Check that the ``devinit()`` function is called at least once. + */ + +int test_pci_run = 0; /* value checked by the multiprocess test */ +static unsigned pci_dev_count; +static unsigned driver_registered = 0; +static struct rte_pci_addr blacklist[TEST_BLACKLIST_NUM]; + +static int my_driver_init(struct rte_pci_driver *dr, + struct rte_pci_device *dev); + +/* + * To test cases where RTE_PCI_DRV_NEED_IGB_UIO is set, and isn't set, two + * drivers are created (one with IGB devices, the other with IXGBE devices). + */ + +/* IXGBE NICS + e1000 used for Qemu */ +#define RTE_LIBRTE_IXGBE_PMD 1 +#undef RTE_LIBRTE_IGB_PMD +struct rte_pci_id my_driver_id[] = { + +#include + +/* this device is the e1000 of qemu for testing */ +{RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x100E)}, + +{ .vendor_id = 0, /* sentinel */ }, +}; + +struct rte_pci_id my_driver_id2[] = { + +/* IGB NICS */ +#undef RTE_LIBRTE_IXGBE_PMD +#define RTE_LIBRTE_IGB_PMD 1 +#define RTE_PCI_DEV_USE_82575EB_COPPER +#include + +{ .vendor_id = 0, /* sentinel */ }, +}; + +struct rte_pci_driver my_driver = { + .name = "test_driver", + .devinit = my_driver_init, + .id_table = my_driver_id, + .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, +}; + +struct rte_pci_driver my_driver2 = { + .name = "test_driver2", + .devinit = my_driver_init, + .id_table = my_driver_id2, + .drv_flags = 0, +}; + +static int +my_driver_init(__attribute__((unused)) struct rte_pci_driver *dr, + struct rte_pci_device *dev) +{ + printf("My driver init called in %s\n", dr->name); + printf("%x:%x:%x.%d", dev->addr.domain, dev->addr.bus, + dev->addr.devid, dev->addr.function); + printf(" - vendor:%x device:%x\n", dev->id.vendor_id, dev->id.device_id); + + pci_dev_count ++; + return 0; +} + +static void +blacklist_clear(void) +{ + rte_eal_pci_set_blacklist(NULL, 0); +} + + + +static void +blacklist_all_devices(void) +{ + struct rte_pci_device *dev = NULL; + unsigned idx = 0; + + memset(blacklist, 0, sizeof (blacklist)); + + TAILQ_FOREACH(dev, &device_list, next) { + if (idx >= sizeof (blacklist) / sizeof (blacklist[0])) { + printf("Error: too many devices to blacklist"); + break; + } + blacklist[idx] = dev->addr; + ++idx; + } + + rte_eal_pci_set_blacklist(blacklist, idx); + printf("%u devices blacklisted\n", idx); +} + +int +test_pci(void) +{ + + printf("Dump all devices\n"); + rte_eal_pci_dump(); + if (driver_registered == 0) { + rte_eal_pci_register(&my_driver); + rte_eal_pci_register(&my_driver2); + driver_registered = 1; + } + + pci_dev_count = 0; + printf("Scan bus\n"); + rte_eal_pci_probe(); + + if (pci_dev_count == 0) { + printf("no device detected\n"); + return -1; + } + + blacklist_all_devices(); + + pci_dev_count = 0; + printf("Scan bus with all devices blacklisted\n"); + rte_eal_pci_probe(); + + blacklist_clear(); + + if (pci_dev_count != 0) { + printf("not all devices are blacklisted\n"); + return -1; + } + + test_pci_run = 1; + return 0; +} diff --git a/app/test/test_per_lcore.c b/app/test/test_per_lcore.c new file mode 100644 index 0000000000..001ae03018 --- /dev/null +++ b/app/test/test_per_lcore.c @@ -0,0 +1,142 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +/* + * Per-lcore variables and lcore launch + * ==================================== + * + * - Use ``rte_eal_mp_remote_launch()`` to call ``assign_vars()`` on + * every available lcore. In this function, a per-lcore variable is + * assigned to the lcore_id. + * + * - Use ``rte_eal_mp_remote_launch()`` to call ``display_vars()`` on + * every available lcore. The function checks that the variable is + * correctly set, or returns -1. + * + * - If at least one per-core variable was not correct, the test function + * returns -1. + */ + +static RTE_DEFINE_PER_LCORE(unsigned, test) = 0x12345678; + +static int +assign_vars(__attribute__((unused)) void *arg) +{ + if (RTE_PER_LCORE(test) != 0x12345678) + return -1; + RTE_PER_LCORE(test) = rte_lcore_id(); + return 0; +} + +static int +display_vars(__attribute__((unused)) void *arg) +{ + unsigned lcore_id = rte_lcore_id(); + unsigned var = RTE_PER_LCORE(test); + unsigned socket_id = rte_lcore_to_socket_id(lcore_id); + + printf("on socket %u, on core %u, variable is %u\n", socket_id, lcore_id, var); + if (lcore_id != var) + return -1; + + RTE_PER_LCORE(test) = 0x12345678; + return 0; +} + +static int +test_per_lcore_delay(__attribute__((unused)) void *arg) +{ + rte_delay_ms(5000); + printf("wait 5000ms on lcore %u\n", rte_lcore_id()); + + return 0; +} + +int +test_per_lcore(void) +{ + unsigned lcore_id; + int ret; + + rte_eal_mp_remote_launch(assign_vars, NULL, SKIP_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + rte_eal_mp_remote_launch(display_vars, NULL, SKIP_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + /* test if it could do remote launch twice at the same time or not */ + ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER); + if (ret < 0) { + printf("It fails to do remote launch but it should able to do\n"); + return -1; + } + /* it should not be able to launch a lcore which is running */ + ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER); + if (ret == 0) { + printf("It does remote launch successfully but it should not at this time\n"); + return -1; + } + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/app/test/test_prefetch.c b/app/test/test_prefetch.c new file mode 100644 index 0000000000..8a9b439d2f --- /dev/null +++ b/app/test/test_prefetch.c @@ -0,0 +1,63 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include + +#include + +#include + +#include "test.h" + +/* + * Prefetch test + * ============= + * + * - Just test that the macro can be called and validate the compilation. + * The test always return success. + */ + +int +test_prefetch(void) +{ + int a; + + rte_prefetch0(&a); + rte_prefetch1(&a); + rte_prefetch2(&a); + + return 0; +} diff --git a/app/test/test_ring.c b/app/test/test_ring.c new file mode 100644 index 0000000000..d6bb44b9f9 --- /dev/null +++ b/app/test/test_ring.c @@ -0,0 +1,987 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "test.h" + +/* + * Ring + * ==== + * + * #. Basic tests: done on one core: + * + * - Using single producer/single consumer functions: + * + * - Enqueue one object, two objects, MAX_BULK objects + * - Dequeue one object, two objects, MAX_BULK objects + * - Check that dequeued pointers are correct + * + * - Using multi producers/multi consumers functions: + * + * - Enqueue one object, two objects, MAX_BULK objects + * - Dequeue one object, two objects, MAX_BULK objects + * - Check that dequeued pointers are correct + * + * - Test watermark and default bulk enqueue/dequeue: + * + * - Set watermark + * - Set default bulk value + * - Enqueue objects, check that -EDQUOT is returned when + * watermark is exceeded + * - Check that dequeued pointers are correct + * + * #. Check quota and watermark + * + * - Start a loop on another lcore that will enqueue and dequeue + * objects in a ring. It will monitor the value of quota (default + * bulk count) and watermark. + * - At the same time, change the quota and the watermark on the + * master lcore. + * - The slave lcore will check that bulk count changes from 4 to + * 8, and watermark changes from 16 to 32. + * + * #. Performance tests. + * + * This test is done on the following configurations: + * + * - One core enqueuing, one core dequeuing + * - One core enqueuing, other cores dequeuing + * - One core dequeuing, other cores enqueuing + * - Half of the cores enqueuing, the other half dequeuing + * + * When only one core enqueues/dequeues, the test is done with the + * SP/SC functions in addition to the MP/MC functions. + * + * The test is done with different bulk size. + * + * On each core, the test enqueues or dequeues objects during + * TIME_S seconds. The number of successes and failures are stored on + * each core, then summed and displayed. + * + * The test checks that the number of enqueues is equal to the + * number of dequeues. + */ + +#define RING_SIZE 4096 +#define MAX_BULK 32 +#define N 65536 +#define TIME_S 5 + +static rte_atomic32_t synchro; + +static unsigned bulk_enqueue; +static unsigned bulk_dequeue; +static struct rte_ring *r; + +struct test_stats { + unsigned enq_success ; + unsigned enq_quota; + unsigned enq_fail; + + unsigned deq_success; + unsigned deq_fail; +} __rte_cache_aligned; + +static struct test_stats test_stats[RTE_MAX_LCORE]; + +#define DEFINE_ENQUEUE_FUNCTION(name, enq_code) \ +static int \ +name(__attribute__((unused)) void *arg) \ +{ \ + unsigned success = 0; \ + unsigned quota = 0; \ + unsigned fail = 0; \ + unsigned i; \ + unsigned long dummy_obj; \ + void *obj_table[MAX_BULK]; \ + int ret; \ + unsigned lcore_id = rte_lcore_id(); \ + uint64_t start_cycles, end_cycles; \ + uint64_t time_diff = 0, hz = rte_get_hpet_hz(); \ + \ + /* init dummy object table */ \ + for (i = 0; i< MAX_BULK; i++) { \ + dummy_obj = lcore_id + 0x1000 + i; \ + obj_table[i] = (void *)dummy_obj; \ + } \ + \ + /* wait synchro for slaves */ \ + if (lcore_id != rte_get_master_lcore()) \ + while (rte_atomic32_read(&synchro) == 0); \ + \ + start_cycles = rte_get_hpet_cycles(); \ + \ + /* enqueue as many object as possible */ \ + while (time_diff/hz < TIME_S) { \ + for (i = 0; likely(i < N); i++) { \ + ret = enq_code; \ + if (ret == 0) \ + success++; \ + else if (ret == -EDQUOT) \ + quota++; \ + else \ + fail++; \ + } \ + end_cycles = rte_get_hpet_cycles(); \ + time_diff = end_cycles - start_cycles; \ + } \ + \ + /* write statistics in a shared structure */ \ + test_stats[lcore_id].enq_success = success; \ + test_stats[lcore_id].enq_quota = quota; \ + test_stats[lcore_id].enq_fail = fail; \ + \ + return 0; \ +} + +#define DEFINE_DEQUEUE_FUNCTION(name, deq_code) \ +static int \ +name(__attribute__((unused)) void *arg) \ +{ \ + unsigned success = 0; \ + unsigned fail = 0; \ + unsigned i; \ + void *obj_table[MAX_BULK]; \ + int ret; \ + unsigned lcore_id = rte_lcore_id(); \ + uint64_t start_cycles, end_cycles; \ + uint64_t time_diff = 0, hz = rte_get_hpet_hz(); \ + \ + /* wait synchro for slaves */ \ + if (lcore_id != rte_get_master_lcore()) \ + while (rte_atomic32_read(&synchro) == 0); \ + \ + start_cycles = rte_get_hpet_cycles(); \ + \ + /* dequeue as many object as possible */ \ + while (time_diff/hz < TIME_S) { \ + for (i = 0; likely(i < N); i++) { \ + ret = deq_code; \ + if (ret == 0) \ + success++; \ + else \ + fail++; \ + } \ + end_cycles = rte_get_hpet_cycles(); \ + time_diff = end_cycles - start_cycles; \ + } \ + \ + /* write statistics in a shared structure */ \ + test_stats[lcore_id].deq_success = success; \ + test_stats[lcore_id].deq_fail = fail; \ + \ + return 0; \ +} + +DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_sp_enqueue, + rte_ring_sp_enqueue_bulk(r, obj_table, bulk_enqueue)) + +DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_sc_dequeue, + rte_ring_sc_dequeue_bulk(r, obj_table, bulk_dequeue)) + +DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_mp_enqueue, + rte_ring_mp_enqueue_bulk(r, obj_table, bulk_enqueue)) + +DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_mc_dequeue, + rte_ring_mc_dequeue_bulk(r, obj_table, bulk_dequeue)) + +#define TEST_RING_VERIFY(exp) \ + if (!(exp)) { \ + printf("error at %s:%d\tcondition " #exp " failed\n", \ + __func__, __LINE__); \ + rte_ring_dump(r); \ + return (-1); \ + } + +#define TEST_RING_FULL_EMTPY_ITER 8 + + +static int +launch_cores(unsigned enq_core_count, unsigned deq_core_count, int sp, int sc) +{ + void *obj; + unsigned lcore_id; + unsigned rate, deq_remain = 0; + unsigned enq_total, deq_total; + struct test_stats sum; + int (*enq_f)(void *); + int (*deq_f)(void *); + unsigned cores = enq_core_count + deq_core_count; + int ret; + + rte_atomic32_set(&synchro, 0); + + printf("ring_autotest e/d_core=%u,%u e/d_bulk=%u,%u ", + enq_core_count, deq_core_count, bulk_enqueue, bulk_dequeue); + printf("sp=%d sc=%d ", sp, sc); + + /* set enqueue function to be used */ + if (sp) + enq_f = test_ring_per_core_sp_enqueue; + else + enq_f = test_ring_per_core_mp_enqueue; + + /* set dequeue function to be used */ + if (sc) + deq_f = test_ring_per_core_sc_dequeue; + else + deq_f = test_ring_per_core_mc_dequeue; + + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (enq_core_count != 0) { + enq_core_count--; + rte_eal_remote_launch(enq_f, NULL, lcore_id); + } + if (deq_core_count != 1) { + deq_core_count--; + rte_eal_remote_launch(deq_f, NULL, lcore_id); + } + } + + memset(test_stats, 0, sizeof(test_stats)); + + /* start synchro and launch test on master */ + rte_atomic32_set(&synchro, 1); + ret = deq_f(NULL); + + /* wait all cores */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (cores == 1) + break; + cores--; + if (rte_eal_wait_lcore(lcore_id) < 0) + ret = -1; + } + + memset(&sum, 0, sizeof(sum)); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + sum.enq_success += test_stats[lcore_id].enq_success; + sum.enq_quota += test_stats[lcore_id].enq_quota; + sum.enq_fail += test_stats[lcore_id].enq_fail; + sum.deq_success += test_stats[lcore_id].deq_success; + sum.deq_fail += test_stats[lcore_id].deq_fail; + } + + /* empty the ring */ + while (rte_ring_sc_dequeue(r, &obj) == 0) + deq_remain += 1; + + if (ret < 0) { + printf("per-lcore test returned -1\n"); + return -1; + } + + enq_total = (sum.enq_success * bulk_enqueue) + + (sum.enq_quota * bulk_enqueue); + deq_total = (sum.deq_success * bulk_dequeue) + deq_remain; + + rate = deq_total/TIME_S; + + printf("rate_persec=%u\n", rate); + + if (enq_total != deq_total) { + printf("invalid enq/deq_success counter: %u %u\n", + enq_total, deq_total); + return -1; + } + + return 0; +} + +static int +do_one_ring_test2(unsigned enq_core_count, unsigned deq_core_count, + unsigned n_enq_bulk, unsigned n_deq_bulk) +{ + int sp, sc; + int do_sp, do_sc; + int ret; + + bulk_enqueue = n_enq_bulk; + bulk_dequeue = n_deq_bulk; + + do_sp = (enq_core_count == 1) ? 1 : 0; + do_sc = (deq_core_count == 1) ? 1 : 0; + + for (sp = 0; sp <= do_sp; sp ++) { + for (sc = 0; sc <= do_sc; sc ++) { + ret = launch_cores(enq_core_count, + deq_core_count, + sp, sc); + if (ret < 0) + return -1; + } + } + return 0; +} + +static int +do_one_ring_test(unsigned enq_core_count, unsigned deq_core_count) +{ + unsigned bulk_enqueue_tab[] = { 1, 2, 4, 32, 0 }; + unsigned bulk_dequeue_tab[] = { 1, 2, 4, 32, 0 }; + unsigned *bulk_enqueue_ptr; + unsigned *bulk_dequeue_ptr; + int ret; + + for (bulk_enqueue_ptr = bulk_enqueue_tab; + *bulk_enqueue_ptr; + bulk_enqueue_ptr++) { + + for (bulk_dequeue_ptr = bulk_dequeue_tab; + *bulk_dequeue_ptr; + bulk_dequeue_ptr++) { + + ret = do_one_ring_test2(enq_core_count, deq_core_count, + *bulk_enqueue_ptr, + *bulk_dequeue_ptr); + if (ret < 0) + return -1; + } + } + return 0; +} + +static int +check_quota_and_watermark(__attribute__((unused)) void *dummy) +{ + uint64_t hz = rte_get_hpet_hz(); + void *obj_table[MAX_BULK]; + unsigned watermark, watermark_old = 16; + uint64_t cur_time, end_time; + int64_t diff = 0; + int i, ret; + unsigned quota, quota_old = 4; + + /* init the object table */ + memset(obj_table, 0, sizeof(obj_table)); + end_time = rte_get_hpet_cycles() + (hz * 2); + + /* check that bulk and watermark are 4 and 32 (respectively) */ + while (diff >= 0) { + + /* read quota, the only change allowed is from 4 to 8 */ + quota = rte_ring_get_bulk_count(r); + if (quota != quota_old && (quota_old != 4 || quota != 8)) { + printf("Bad quota change %u -> %u\n", quota_old, + quota); + return -1; + } + quota_old = quota; + + /* add in ring until we reach watermark */ + ret = 0; + for (i = 0; i < 16; i ++) { + if (ret != 0) + break; + ret = rte_ring_enqueue_bulk(r, obj_table, quota); + } + + if (ret != -EDQUOT) { + printf("Cannot enqueue objects, or watermark not " + "reached (ret=%d)\n", ret); + return -1; + } + + /* read watermark, the only change allowed is from 16 to 32 */ + watermark = i * quota; + if (watermark != watermark_old && + (watermark_old != 16 || watermark != 32)) { + printf("Bad watermark change %u -> %u\n", watermark_old, + watermark); + return -1; + } + watermark_old = watermark; + + /* dequeue objects from ring */ + while (i--) { + ret = rte_ring_dequeue_bulk(r, obj_table, quota); + if (ret != 0) { + printf("Cannot dequeue (ret=%d)\n", ret); + return -1; + } + } + + cur_time = rte_get_hpet_cycles(); + diff = end_time - cur_time; + } + + if (watermark_old != 32 || quota_old != 8) { + printf("quota or watermark was not updated (q=%u wm=%u)\n", + quota_old, watermark_old); + return -1; + } + + return 0; +} + +static int +test_quota_and_watermark(void) +{ + unsigned lcore_id = rte_lcore_id(); + unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1); + + printf("Test quota and watermark live modification\n"); + + rte_ring_set_bulk_count(r, 4); + rte_ring_set_water_mark(r, 16); + + /* launch a thread that will enqueue and dequeue, checking + * watermark and quota */ + rte_eal_remote_launch(check_quota_and_watermark, NULL, lcore_id2); + + rte_delay_ms(1000); + rte_ring_set_bulk_count(r, 8); + rte_ring_set_water_mark(r, 32); + rte_delay_ms(1000); + + if (rte_eal_wait_lcore(lcore_id2) < 0) + return -1; + + return 0; +} +/* Test for catch on invalid watermark values */ +static int +test_set_watermark( void ){ + unsigned count; + int setwm; + + struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex"); + if(r == NULL){ + printf( " ring lookup failed\n" ); + goto error; + } + count = r->prod.size*2; + setwm = rte_ring_set_water_mark(r, count); + if (setwm != -EINVAL){ + printf("Test failed to detect invalid watermark count value\n"); + goto error; + } + + count = 0; + setwm = rte_ring_set_water_mark(r, count); + if (r->prod.watermark != r->prod.size) { + printf("Test failed to detect invalid watermark count value\n"); + goto error; + } + return 0; + +error: + return -1; +} + +/* + * helper routine for test_ring_basic + */ +static int +test_ring_basic_full_empty(void * const src[], void *dst[]) +{ + unsigned i, rand; + const unsigned rsz = RING_SIZE - 1; + + printf("Basic full/empty test\n"); + + for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) { + + /* random shift in the ring */ + rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL); + printf("%s: iteration %u, random shift: %u;\n", + __func__, i, rand); + TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src, + rand)); + TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand)); + + /* fill the ring */ + TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src, + rsz)); + TEST_RING_VERIFY(0 == rte_ring_free_count(r)); + TEST_RING_VERIFY(rsz == rte_ring_count(r)); + TEST_RING_VERIFY(rte_ring_full(r)); + TEST_RING_VERIFY(0 == rte_ring_empty(r)); + + /* empty the ring */ + TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz)); + TEST_RING_VERIFY(rsz == rte_ring_free_count(r)); + TEST_RING_VERIFY(0 == rte_ring_count(r)); + TEST_RING_VERIFY(0 == rte_ring_full(r)); + TEST_RING_VERIFY(rte_ring_empty(r)); + + /* check data */ + TEST_RING_VERIFY(0 == memcmp(src, dst, rsz)); + rte_ring_dump(r); + } + return (0); +} + +static int +test_ring_basic(void) +{ + void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL; + int ret; + unsigned i, n; + + /* alloc dummy object pointers */ + src = malloc(RING_SIZE*2*sizeof(void *)); + if (src == NULL) + goto fail; + + for (i = 0; i < RING_SIZE*2 ; i++) { + src[i] = (void *)(unsigned long)i; + } + cur_src = src; + + /* alloc some room for copied objects */ + dst = malloc(RING_SIZE*2*sizeof(void *)); + if (dst == NULL) + goto fail; + + memset(dst, 0, RING_SIZE*2*sizeof(void *)); + cur_dst = dst; + + printf("enqueue 1 obj\n"); + ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1); + cur_src += 1; + if (ret != 0) + goto fail; + + printf("enqueue 2 objs\n"); + ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2); + cur_src += 2; + if (ret != 0) + goto fail; + + printf("enqueue MAX_BULK objs\n"); + ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK); + cur_src += MAX_BULK; + if (ret != 0) + goto fail; + + printf("dequeue 1 obj\n"); + ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1); + cur_dst += 1; + if (ret != 0) + goto fail; + + printf("dequeue 2 objs\n"); + ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2); + cur_dst += 2; + if (ret != 0) + goto fail; + + printf("dequeue MAX_BULK objs\n"); + ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK); + cur_dst += MAX_BULK; + if (ret != 0) + goto fail; + + /* check data */ + if (memcmp(src, dst, cur_dst - dst)) { + test_hexdump("src", src, cur_src - src); + test_hexdump("dst", dst, cur_dst - dst); + printf("data after dequeue is not the same\n"); + goto fail; + } + cur_src = src; + cur_dst = dst; + + printf("enqueue 1 obj\n"); + ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1); + cur_src += 1; + if (ret != 0) + goto fail; + + printf("enqueue 2 objs\n"); + ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2); + cur_src += 2; + if (ret != 0) + goto fail; + + printf("enqueue MAX_BULK objs\n"); + ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK); + cur_src += MAX_BULK; + if (ret != 0) + goto fail; + + printf("dequeue 1 obj\n"); + ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1); + cur_dst += 1; + if (ret != 0) + goto fail; + + printf("dequeue 2 objs\n"); + ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2); + cur_dst += 2; + if (ret != 0) + goto fail; + + printf("dequeue MAX_BULK objs\n"); + ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK); + cur_dst += MAX_BULK; + if (ret != 0) + goto fail; + + /* check data */ + if (memcmp(src, dst, cur_dst - dst)) { + test_hexdump("src", src, cur_src - src); + test_hexdump("dst", dst, cur_dst - dst); + printf("data after dequeue is not the same\n"); + goto fail; + } + cur_src = src; + cur_dst = dst; + + printf("fill and empty the ring\n"); + for (i = 0; i +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +/* + * rwlock test + * =========== + * + * - There is a global rwlock and a table of rwlocks (one per lcore). + * + * - The test function takes all of these locks and launches the + * ``test_rwlock_per_core()`` function on each core (except the master). + * + * - The function takes the global write lock, display something, + * then releases the global lock. + * - Then, it takes the per-lcore write lock, display something, and + * releases the per-core lock. + * - Finally, a read lock is taken during 100 ms, then released. + * + * - The main function unlocks the per-lcore locks sequentially and + * waits between each lock. This triggers the display of a message + * for each core, in the correct order. + * + * Then, it tries to take the global write lock and display the last + * message. The autotest script checks that the message order is correct. + */ + +static rte_rwlock_t sl; +static rte_rwlock_t sl_tab[RTE_MAX_LCORE]; + +static int +test_rwlock_per_core(__attribute__((unused)) void *arg) +{ + rte_rwlock_write_lock(&sl); + printf("Global write lock taken on core %u\n", rte_lcore_id()); + rte_rwlock_write_unlock(&sl); + + rte_rwlock_write_lock(&sl_tab[rte_lcore_id()]); + printf("Hello from core %u !\n", rte_lcore_id()); + rte_rwlock_write_unlock(&sl_tab[rte_lcore_id()]); + + rte_rwlock_read_lock(&sl); + printf("Global read lock taken on core %u\n", rte_lcore_id()); + rte_delay_ms(100); + printf("Release global read lock on core %u\n", rte_lcore_id()); + rte_rwlock_read_unlock(&sl); + + return 0; +} + +int +test_rwlock(void) +{ + int i; + + rte_rwlock_init(&sl); + for (i=0; i +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +/* + * Spinlock test + * ============= + * + * - There is a global spinlock and a table of spinlocks (one per lcore). + * + * - The test function takes all of these locks and launches the + * ``test_spinlock_per_core()`` function on each core (except the master). + * + * - The function takes the global lock, display something, then releases + * the global lock. + * - The function takes the per-lcore lock, display something, then releases + * the per-core lock. + * + * - The main function unlocks the per-lcore locks sequentially and + * waits between each lock. This triggers the display of a message + * for each core, in the correct order. The autotest script checks that + * this order is correct. + * + * - A load test is carried out, with all cores attempting to lock a single lock + * multiple times + */ + +static rte_spinlock_t sl, sl_try; +static rte_spinlock_t sl_tab[RTE_MAX_LCORE]; +static rte_spinlock_recursive_t slr; +static unsigned count; + +static int +test_spinlock_per_core(__attribute__((unused)) void *arg) +{ + rte_spinlock_lock(&sl); + printf("Global lock taken on core %u\n", rte_lcore_id()); + rte_spinlock_unlock(&sl); + + rte_spinlock_lock(&sl_tab[rte_lcore_id()]); + printf("Hello from core %u !\n", rte_lcore_id()); + rte_spinlock_unlock(&sl_tab[rte_lcore_id()]); + + return 0; +} + +static int +test_spinlock_recursive_per_core(__attribute__((unused)) void *arg) +{ + unsigned id = rte_lcore_id(); + + rte_spinlock_recursive_lock(&slr); + printf("Global recursive lock taken on core %u - count = %d\n", + id, slr.count); + rte_spinlock_recursive_lock(&slr); + printf("Global recursive lock taken on core %u - count = %d\n", + id, slr.count); + rte_spinlock_recursive_lock(&slr); + printf("Global recursive lock taken on core %u - count = %d\n", + id, slr.count); + + printf("Hello from within recursive locks from core %u !\n", id); + + rte_spinlock_recursive_unlock(&slr); + printf("Global recursive lock released on core %u - count = %d\n", + id, slr.count); + rte_spinlock_recursive_unlock(&slr); + printf("Global recursive lock released on core %u - count = %d\n", + id, slr.count); + rte_spinlock_recursive_unlock(&slr); + printf("Global recursive lock released on core %u - count = %d\n", + id, slr.count); + + return 0; +} + +static volatile int count1, count2; +static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER; +static unsigned int max = 10000000; /* 10M */ +static volatile uint64_t looptime[RTE_MAX_LCORE]; + +static int +load_loop_fn(__attribute__((unused)) void *dummy) +{ + uint64_t end, begin; + begin = rte_get_hpet_cycles(); + unsigned int i = 0; + for ( i = 0; i < max; i++) { + rte_spinlock_lock(&lk); + count1++; + rte_spinlock_unlock(&lk); + count2++; + } + end = rte_get_hpet_cycles(); + looptime[rte_lcore_id()] = end - begin; + return 0; +} + +static int +test_spinlock_load(void) +{ + if (rte_lcore_count()<= 1) { + printf("no cores counted\n"); + return -1; + } + printf ("Running %u tests.......\n", max); + printf ("Number of cores = %u\n", rte_lcore_count()); + + rte_eal_mp_remote_launch(load_loop_fn, NULL , CALL_MASTER); + rte_eal_mp_wait_lcore(); + + unsigned int k = 0; + uint64_t avgtime = 0; + + RTE_LCORE_FOREACH(k) { + printf("Core [%u] time = %"PRIu64"\n", k, looptime[k]); + avgtime += looptime[k]; + } + + avgtime = avgtime / rte_lcore_count(); + printf("Average time = %"PRIu64"\n", avgtime); + + int check = 0; + check = max * rte_lcore_count(); + if (count1 == check && count2 != check) + printf("Passed Load test\n"); + else { + printf("Failed load test\n"); + return -1; + } + return 0; +} + +/* + * Use rte_spinlock_trylock() to trylock a spinlock object, + * If it could not lock the object sucessfully, it would + * return immediately and the variable of "count" would be + * increased by one per times. the value of "count" could be + * checked as the result later. + */ +static int +test_spinlock_try(__attribute__((unused)) void *arg) +{ + if (rte_spinlock_trylock(&sl_try) == 0) { + rte_spinlock_lock(&sl); + count ++; + rte_spinlock_unlock(&sl); + } + + return 0; +} + + +/* + * Test rte_eal_get_lcore_state() in addition to spinlocks + * as we have "waiting" then "running" lcores. + */ +int +test_spinlock(void) +{ + int ret = 0; + int i; + + /* slave cores should be waiting: print it */ + RTE_LCORE_FOREACH_SLAVE(i) { + printf("lcore %d state: %d\n", i, + (int) rte_eal_get_lcore_state(i)); + } + + rte_spinlock_init(&sl); + rte_spinlock_init(&sl_try); + rte_spinlock_recursive_init(&slr); + for (i=0; i +#include +#include +#include +#include + +#include + +#include + +#include "test.h" + +#define LOG(...) do {\ + fprintf(stderr, "%s() ln %d: ", __func__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ +} while(0) + +#define DATA_BYTE 'a' + +static int +test_rte_snprintf(void) +{ + /* ================================================= + * First test with a string that will fit in buffer + * =================================================*/ + do { + int retval; + const char source[] = "This is a string that will fit in buffer"; + char buf[sizeof(source)+2]; /* make buffer big enough to fit string */ + + /* initialise buffer with characters so it can contain no nulls */ + memset(buf, DATA_BYTE, sizeof(buf)); + + /* run rte_snprintf and check results */ + retval = rte_snprintf(buf, sizeof(buf), "%s", source); + if (retval != sizeof(source) - 1) { + LOG("Error, retval = %d, expected = %u\n", + retval, (unsigned)sizeof(source)); + return -1; + } + if (buf[retval] != '\0') { + LOG("Error, resultant is not null-terminated\n"); + return -1; + } + if (memcmp(source, buf, sizeof(source)-1) != 0){ + LOG("Error, corrupt data in buffer\n"); + return -1; + } + } while (0); + + do { + /* ================================================= + * Test with a string that will get truncated + * =================================================*/ + int retval; + const char source[] = "This is a long string that won't fit in buffer"; + char buf[sizeof(source)/2]; /* make buffer half the size */ + + /* initialise buffer with characters so it can contain no nulls */ + memset(buf, DATA_BYTE, sizeof(buf)); + + /* run rte_snprintf and check results */ + retval = rte_snprintf(buf, sizeof(buf), "%s", source); + if (retval != sizeof(source) - 1) { + LOG("Error, retval = %d, expected = %u\n", + retval, (unsigned)sizeof(source)); + return -1; + } + if (buf[sizeof(buf)-1] != '\0') { + LOG("Error, buffer is not null-terminated\n"); + return -1; + } + if (memcmp(source, buf, sizeof(buf)-1) != 0){ + LOG("Error, corrupt data in buffer\n"); + return -1; + } + } while (0); + + do { + /* =========================================================== + * Test using zero-size buf to check how long a buffer we need + * ===========================================================*/ + int retval; + const char source[] = "This is a string"; + char buf[10]; + + /* call with a zero-sized non-NULL buffer, should tell how big a buffer + * we need */ + retval = rte_snprintf(buf, 0, "%s", source); + if (retval != sizeof(source) - 1) { + LOG("Call with 0-length buffer does not return correct size." + "Expected: %zu, got: %d\n", sizeof(source), retval); + return -1; + } + + /* call with a zero-sized NULL buffer, should tell how big a buffer + * we need */ + retval = rte_snprintf(NULL, 0, "%s", source); + if (retval != sizeof(source) - 1) { + LOG("Call with 0-length buffer does not return correct size." + "Expected: %zu, got: %d\n", sizeof(source), retval); + return -1; + } + + } while (0); + + do { + /* ================================================= + * Test with invalid parameter values + * =================================================*/ + const char source[] = "This is a string"; + char buf[10]; + + /* call with buffer value set to NULL is EINVAL */ + if (rte_snprintf(NULL, sizeof(buf), "%s\n", source) != -1 || + errno != EINVAL) { + LOG("Failed to get suitable error when passing NULL buffer\n"); + return -1; + } + + memset(buf, DATA_BYTE, sizeof(buf)); + /* call with a NULL format and zero-size should return error + * without affecting the buffer */ + if (rte_snprintf(buf, 0, NULL) != -1 || + errno != EINVAL) { + LOG("Failed to get suitable error when passing NULL buffer\n"); + return -1; + } + if (buf[0] != DATA_BYTE) { + LOG("Error, zero-length buffer modified after call with NULL" + " format string\n"); + return -1; + } + + /* call with a NULL format should return error but also null-terminate + * the buffer */ + if (rte_snprintf(buf, sizeof(buf), NULL) != -1 || + errno != EINVAL) { + LOG("Failed to get suitable error when passing NULL buffer\n"); + return -1; + } + if (buf[0] != '\0') { + LOG("Error, buffer not null-terminated after call with NULL" + " format string\n"); + return -1; + } + } while (0); + + LOG("%s - PASSED\n", __func__); + return 0; +} + +static int +test_rte_strsplit(void) +{ + int i; + do { + /* ======================================================= + * split a mac address correct number of splits requested + * =======================================================*/ + char test_string[] = "54:65:76:87:98:90"; + char *splits[6]; + + LOG("Source string: '%s', to split on ':'\n", test_string); + if (rte_strsplit(test_string, sizeof(test_string), + splits, 6, ':') != 6) { + LOG("Error splitting mac address\n"); + return -1; + } + for (i = 0; i < 6; i++) + LOG("Token %d = %s\n", i + 1, splits[i]); + } while (0); + + + do { + /* ======================================================= + * split on spaces smaller number of splits requested + * =======================================================*/ + char test_string[] = "54 65 76 87 98 90"; + char *splits[6]; + + LOG("Source string: '%s', to split on ' '\n", test_string); + if (rte_strsplit(test_string, sizeof(test_string), + splits, 3, ' ') != 3) { + LOG("Error splitting mac address for max 2 splits\n"); + return -1; + } + for (i = 0; i < 3; i++) + LOG("Token %d = %s\n", i + 1, splits[i]); + } while (0); + + do { + /* ======================================================= + * split on commas - more splits than commas requested + * =======================================================*/ + char test_string[] = "a,b,c,d"; + char *splits[6]; + + LOG("Source string: '%s', to split on ','\n", test_string); + if (rte_strsplit(test_string, sizeof(test_string), + splits, 6, ',') != 4) { + LOG("Error splitting %s on ','\n", test_string); + return -1; + } + for (i = 0; i < 4; i++) + LOG("Token %d = %s\n", i + 1, splits[i]); + } while(0); + + do { + /* ======================================================= + * Try splitting on non-existent character. + * =======================================================*/ + char test_string[] = "a,b,c,d"; + char *splits[6]; + + LOG("Source string: '%s', to split on ' '\n", test_string); + if (rte_strsplit(test_string, sizeof(test_string), + splits, 6, ' ') != 1) { + LOG("Error splitting %s on ' '\n", test_string); + return -1; + } + LOG("String not split\n"); + } while(0); + + do { + /* ======================================================= + * Invalid / edge case parameter checks + * =======================================================*/ + char test_string[] = "a,b,c,d"; + char *splits[6]; + + if (rte_strsplit(NULL, 0, splits, 6, ',') >= 0 + || errno != EINVAL){ + LOG("Error: rte_strsplit accepted NULL string parameter\n"); + return -1; + } + + if (rte_strsplit(test_string, sizeof(test_string), NULL, 0, ',') >= 0 + || errno != EINVAL){ + LOG("Error: rte_strsplit accepted NULL array parameter\n"); + return -1; + } + + errno = 0; + if (rte_strsplit(test_string, 0, splits, 6, ',') != 0 || errno != 0) { + LOG("Error: rte_strsplit did not accept 0 length string\n"); + return -1; + } + + if (rte_strsplit(test_string, sizeof(test_string), splits, 0, ',') != 0 + || errno != 0) { + LOG("Error: rte_strsplit did not accept 0 length array\n"); + return -1; + } + + LOG("Parameter test cases passed\n"); + } while(0); + + LOG("%s - PASSED\n", __func__); + return 0; +} + +int +test_string_fns(void) +{ + if (test_rte_snprintf() < 0 || + test_rte_strsplit() < 0) + return -1; + return 0; +} diff --git a/app/test/test_tailq.c b/app/test/test_tailq.c new file mode 100644 index 0000000000..b67eabdf74 --- /dev/null +++ b/app/test/test_tailq.c @@ -0,0 +1,125 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "test.h" + +#define do_return(...) do { \ + printf("Error at %s, line %d: ", __func__, __LINE__); \ + printf(__VA_ARGS__); \ + return 1; \ +} while (0) + +#define DEFAULT_TAILQ "dummy_q0" + +static struct rte_dummy d_elem; + +static int +test_tailq_create(void) +{ + struct rte_dummy_head *d_head; + char name[RTE_TAILQ_NAMESIZE]; + unsigned i; + + /* create a first tailq and check its non-null */ + d_head = RTE_TAILQ_RESERVE(DEFAULT_TAILQ, rte_dummy_head); + if (d_head == NULL) + do_return("Error allocating "DEFAULT_TAILQ"\n"); + + /* check we can add an item to it + */ + TAILQ_INSERT_TAIL(d_head, &d_elem, next); + + /* try allocating dummy_q0 again, and check for failure */ + if (RTE_TAILQ_RESERVE(DEFAULT_TAILQ, rte_dummy_head) != NULL) + do_return("Error, non-null result returned when attemption to " + "re-allocate a tailq\n"); + + /* now fill up the tailq slots available and check we get an error */ + for (i = 1; i < RTE_MAX_TAILQ; i++){ + rte_snprintf(name, sizeof(name), "dummy_q%u", i); + if ((d_head = RTE_TAILQ_RESERVE(name, rte_dummy_head)) == NULL) + break; + } + + /* check that we had an error return before RTE_MAX_TAILQ */ + if (i == RTE_MAX_TAILQ) + do_return("Error, we did not have a reservation failure as expected\n"); + + return 0; +} + +static int +test_tailq_lookup(void) +{ + /* run successful test - check result is found */ + struct rte_dummy_head *d_head; + struct rte_dummy *d_ptr; + + d_head = RTE_TAILQ_LOOKUP(DEFAULT_TAILQ, rte_dummy_head); + if (d_head == NULL) + do_return("Error with tailq lookup\n"); + + TAILQ_FOREACH(d_ptr, d_head, next) + if (d_ptr != &d_elem) + do_return("Error with tailq returned from lookup - " + "expected element not found\n"); + + /* now try a bad/error lookup */ + d_head = RTE_TAILQ_LOOKUP("does_not_exist_queue", rte_dummy_head); + if (d_head != NULL) + do_return("Error, lookup does not return NULL for bad tailq name\n"); + + return 0; +} + +int +test_tailq(void) +{ + int ret = 0; + ret |= test_tailq_create(); + ret |= test_tailq_lookup(); + return ret; +} diff --git a/app/test/test_timer.c b/app/test/test_timer.c new file mode 100644 index 0000000000..b3aea8ce40 --- /dev/null +++ b/app/test/test_timer.c @@ -0,0 +1,363 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Timer + * ===== + * + * #. Stress tests. + * + * The objective of the timer stress tests is to check that there are no + * race conditions in list and status management. This test launches, + * resets and stops the timer very often on many cores at the same + * time. + * + * - Only one timer is used for this test. + * - On each core, the rte_timer_manage() function is called from the main + * loop every 3 microseconds. + * - In the main loop, the timer may be reset (randomly, with a + * probability of 0.5 %) 100 microseconds later on a random core, or + * stopped (with a probability of 0.5 % also). + * - In callback, the timer is can be reset (randomly, with a + * probability of 0.5 %) 100 microseconds later on the same core or + * on another core (same probability), or stopped (same + * probability). + * + * + * #. Basic test. + * + * This test performs basic functional checks of the timers. The test + * uses four different timers that are loaded and stopped under + * specific conditions in specific contexts. + * + * - Four timers are used for this test. + * - On each core, the rte_timer_manage() function is called from main loop + * every 3 microseconds. + * + * The autotest python script checks that the behavior is correct: + * + * - timer0 + * + * - At initialization, timer0 is loaded by the master core, on master core + * in "single" mode (time = 1 second). + * - In the first 19 callbacks, timer0 is reloaded on the same core, + * then, it is explicitly stopped at the 20th call. + * - At t=25s, timer0 is reloaded once by timer2. + * + * - timer1 + * + * - At initialization, timer1 is loaded by the master core, on the + * master core in "single" mode (time = 2 seconds). + * - In the first 9 callbacks, timer1 is reloaded on another + * core. After the 10th callback, timer1 is not reloaded anymore. + * + * - timer2 + * + * - At initialization, timer2 is loaded by the master core, on the + * master core in "periodical" mode (time = 1 second). + * - In the callback, when t=25s, it stops timer3 and reloads timer0 + * on the current core. + * + * - timer3 + * + * - At initialization, timer3 is loaded by the master core, on + * another core in "periodical" mode (time = 1 second). + * - It is stopped at t=25s by timer2. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "test.h" + +#define TEST_DURATION_S 30 /* in seconds */ +#define NB_TIMER 4 + +#define RTE_LOGTYPE_TESTTIMER RTE_LOGTYPE_USER3 + +static volatile uint64_t end_time; + +struct mytimerinfo { + struct rte_timer tim; + unsigned id; + unsigned count; +}; + +static struct mytimerinfo mytiminfo[NB_TIMER]; + +static void timer_basic_cb(struct rte_timer *tim, void *arg); + +static void +mytimer_reset(struct mytimerinfo *timinfo, unsigned ticks, + enum rte_timer_type type, unsigned tim_lcore, + rte_timer_cb_t fct) +{ + rte_timer_reset_sync(&timinfo->tim, ticks, type, tim_lcore, + fct, timinfo); +} + +/* timer callback for stress tests */ +static void +timer_stress_cb(__attribute__((unused)) struct rte_timer *tim, + __attribute__((unused)) void *arg) +{ + long r; + unsigned lcore_id = rte_lcore_id(); + uint64_t hz = rte_get_hpet_hz(); + + if (rte_timer_pending(tim)) + return; + + r = rte_rand(); + if ((r & 0xff) == 0) { + mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id, + timer_stress_cb); + } + else if ((r & 0xff) == 1) { + mytimer_reset(&mytiminfo[0], hz, SINGLE, + rte_get_next_lcore(lcore_id, 0, 1), + timer_stress_cb); + } + else if ((r & 0xff) == 2) { + rte_timer_stop(&mytiminfo[0].tim); + } +} + +static int +timer_stress_main_loop(__attribute__((unused)) void *arg) +{ + uint64_t hz = rte_get_hpet_hz(); + unsigned lcore_id = rte_lcore_id(); + uint64_t cur_time; + int64_t diff = 0; + long r; + + while (diff >= 0) { + + /* call the timer handler on each core */ + rte_timer_manage(); + + /* simulate the processing of a packet + * (3 us = 6000 cycles at 2 Ghz) */ + rte_delay_us(3); + + /* randomly stop or reset timer */ + r = rte_rand(); + lcore_id = rte_get_next_lcore(lcore_id, 0, 1); + if ((r & 0xff) == 0) { + /* 100 us */ + mytimer_reset(&mytiminfo[0], hz/10000, SINGLE, lcore_id, + timer_stress_cb); + } + else if ((r & 0xff) == 1) { + rte_timer_stop_sync(&mytiminfo[0].tim); + } + cur_time = rte_get_hpet_cycles(); + diff = end_time - cur_time; + } + + lcore_id = rte_lcore_id(); + RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id); + + return 0; +} + +/* timer callback for basic tests */ +static void +timer_basic_cb(struct rte_timer *tim, void *arg) +{ + struct mytimerinfo *timinfo = arg; + uint64_t hz = rte_get_hpet_hz(); + unsigned lcore_id = rte_lcore_id(); + uint64_t cur_time = rte_get_hpet_cycles(); + + if (rte_timer_pending(tim)) + return; + + timinfo->count ++; + + RTE_LOG(INFO, TESTTIMER, + "%"PRIu64": callback id=%u count=%u on core %u\n", + cur_time, timinfo->id, timinfo->count, lcore_id); + + /* reload timer 0 on same core */ + if (timinfo->id == 0 && timinfo->count < 20) { + mytimer_reset(timinfo, hz, SINGLE, lcore_id, timer_basic_cb); + return; + } + + /* reload timer 1 on next core */ + if (timinfo->id == 1 && timinfo->count < 10) { + mytimer_reset(timinfo, hz*2, SINGLE, + rte_get_next_lcore(lcore_id, 0, 1), + timer_basic_cb); + return; + } + + /* Explicitelly stop timer 0. Once stop() called, we can even + * erase the content of the structure: it is not referenced + * anymore by any code (in case of dynamic structure, it can + * be freed) */ + if (timinfo->id == 0 && timinfo->count == 20) { + + /* stop_sync() is not needed, because we know that the + * status of timer is only modified by this core */ + rte_timer_stop(tim); + memset(tim, 0xAA, sizeof(struct rte_timer)); + return; + } + + /* stop timer3, and restart a new timer0 (it was removed 5 + * seconds ago) for a single shot */ + if (timinfo->id == 2 && timinfo->count == 25) { + rte_timer_stop_sync(&mytiminfo[3].tim); + + /* need to reinit because structure was erased with 0xAA */ + rte_timer_init(&mytiminfo[0].tim); + mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id, + timer_basic_cb); + } +} + +static int +timer_basic_main_loop(__attribute__((unused)) void *arg) +{ + uint64_t hz = rte_get_hpet_hz(); + unsigned lcore_id = rte_lcore_id(); + uint64_t cur_time; + int64_t diff = 0; + + /* launch all timers on core 0 */ + if (lcore_id == rte_get_master_lcore()) { + mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id, + timer_basic_cb); + mytimer_reset(&mytiminfo[1], hz*2, SINGLE, lcore_id, + timer_basic_cb); + mytimer_reset(&mytiminfo[2], hz, PERIODICAL, lcore_id, + timer_basic_cb); + mytimer_reset(&mytiminfo[3], hz, PERIODICAL, + rte_get_next_lcore(lcore_id, 0, 1), + timer_basic_cb); + } + + while (diff >= 0) { + + /* call the timer handler on each core */ + rte_timer_manage(); + + /* simulate the processing of a packet + * (3 us = 6000 cycles at 2 Ghz) */ + rte_delay_us(3); + + cur_time = rte_get_hpet_cycles(); + diff = end_time - cur_time; + } + RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id); + + return 0; +} + +int +test_timer(void) +{ + unsigned i; + uint64_t cur_time; + uint64_t hz; + + if (rte_lcore_count() < 2) { + printf("not enough lcores for this test\n"); + return -1; + } + + /* init timer */ + for (i=0; i +#include +#include + +#include + +#include +#include + +#include "test.h" + + +int +test_version(void) +{ + const char *version = rte_version(); + if (version == NULL) + return -1; + printf("Version string: '%s'\n", version); + if (*version == '\0' || + strncmp(version, RTE_VER_PREFIX, sizeof(RTE_VER_PREFIX)-1) != 0) + return -1; + return 0; +} diff --git a/config/defconfig_i686-default-linuxapp-gcc b/config/defconfig_i686-default-linuxapp-gcc new file mode 100644 index 0000000000..ed544346aa --- /dev/null +++ b/config/defconfig_i686-default-linuxapp-gcc @@ -0,0 +1,240 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +# + +# +# define executive environment +# +# CONFIG_RTE_EXEC_ENV can be linuxapp, baremetal +# +CONFIG_RTE_EXEC_ENV="linuxapp" +CONFIG_RTE_EXEC_ENV_LINUXAPP=y + +# +# machine can define specific variables or action for a specific board +# RTE_MACHINE can be: +# default nothing specific +# native current machine +# atm Intel® Atom™ microarchitecture +# nhm Intel® microarchitecture code name Nehalem +# wsm Intel® microarchitecture code name Westmere +# snb Intel® microarchitecture code name Sandy Bridge +# ivb Intel® microarchitecture code name Ivy Bridge +# +# Warning: if your compiler does not support the relevant -march options, +# it will be compiled with whatever latest processor the compiler supports! +# +CONFIG_RTE_MACHINE="native" + +# +# define the architecture we compile for. +# CONFIG_RTE_ARCH can be i686, x86_64, x86_64_32 +# +CONFIG_RTE_ARCH="i686" +CONFIG_RTE_ARCH_I686=y + +# +# The compiler we use. +# Can be gcc or icc. +# +CONFIG_RTE_TOOLCHAIN="gcc" +CONFIG_RTE_TOOLCHAIN_GCC=y + +# +# Compile libc directory +# +CONFIG_RTE_LIBC=n + +# +# Compile newlib as libc from source +# +CONFIG_RTE_LIBC_NEWLIB_SRC=n + +# +# Use binary newlib +# +CONFIG_RTE_LIBC_NEWLIB_BIN=n + +# +# Use binary newlib +# +CONFIG_RTE_LIBC_NETINCS=n + +# +# Compile libgloss (newlib-stubs) +# +CONFIG_RTE_LIBGLOSS=n + +# +# Compile Environment Abstraction Layer +# +CONFIG_RTE_LIBRTE_EAL=y +CONFIG_RTE_MAX_LCORE=32 +CONFIG_RTE_MAX_NUMA_NODES=8 +CONFIG_RTE_MAX_MEMSEG=32 +CONFIG_RTE_MAX_MEMZONE=512 +CONFIG_RTE_MAX_TAILQ=32 +CONFIG_RTE_LOG_LEVEL=8 +CONFIG_RTE_LOG_HISTORY=256 +CONFIG_RTE_LIBEAL_USE_HPET=y +CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n +CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n + +# +# Compile Environment Abstraction Layer for linux +# +CONFIG_RTE_LIBRTE_EAL_LINUXAPP=y + +# +# Compile Environment Abstraction Layer for Bare metal +# +CONFIG_RTE_LIBRTE_EAL_BAREMETAL=n + +# +# Compile generic ethernet library +# +CONFIG_RTE_LIBRTE_ETHER=y +CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n +CONFIG_RTE_MAX_ETHPORTS=32 +CONFIG_RTE_LIBRTE_IEEE1588=n + +# +# Compile burst-oriented IGB PMD driver +# +CONFIG_RTE_LIBRTE_IGB_PMD=y +CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n + +# +# Compile burst-oriented IXGBE PMD driver +# +CONFIG_RTE_LIBRTE_IXGBE_PMD=y +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n + +# +# Do prefetch of packet data within PMD driver receive function +# +CONFIG_RTE_PMD_PACKET_PREFETCH=y + +# +# Compile librte_ring +# +CONFIG_RTE_LIBRTE_RING=y +CONFIG_RTE_LIBRTE_RING_DEBUG=n + +# +# Compile librte_mempool +# +CONFIG_RTE_LIBRTE_MEMPOOL=y +CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512 +CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n + +# +# Compile librte_mbuf +# +CONFIG_RTE_LIBRTE_MBUF=y +CONFIG_RTE_LIBRTE_MBUF_DEBUG=n +CONFIG_RTE_MBUF_SCATTER_GATHER=y +CONFIG_RTE_MBUF_REFCNT_ATOMIC=y +CONFIG_RTE_PKTMBUF_HEADROOM=128 + +# +# Compile librte_timer +# +CONFIG_RTE_LIBRTE_TIMER=y +CONFIG_RTE_LIBRTE_TIMER_DEBUG=n + +# +# Compile librte_malloc +# +CONFIG_RTE_LIBRTE_MALLOC=y +CONFIG_RTE_LIBRTE_MALLOC_DEBUG=n +CONFIG_RTE_MALLOC_MEMZONE_SIZE=11M +CONFIG_RTE_MALLOC_PER_NUMA_NODE=y + +# +# Compile librte_cmdline +# +CONFIG_RTE_LIBRTE_CMDLINE=y + +# +# Compile librte_hash +# +CONFIG_RTE_LIBRTE_HASH=y +CONFIG_RTE_LIBRTE_HASH_DEBUG=n +CONFIG_RTE_LIBRTE_HASH_USE_MEMZONE=n + +# +# Compile librte_lpm +# +CONFIG_RTE_LIBRTE_LPM=y +CONFIG_RTE_LIBRTE_LPM_DEBUG=n + +# +# Compile librte_net +# +CONFIG_RTE_LIBRTE_NET=y + +# +# Compile the test application +# +CONFIG_RTE_APP_TEST=y + +# +# Compile the "check includes" application +# +CONFIG_RTE_APP_CHKINCS=y + +# +# Compile the PMD test application +# +CONFIG_RTE_TEST_PMD=y +CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n +CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n + +# +# gcov compilation/link directives +# +CONFIG_RTE_LIBRTE_GCOV=n + +# +# warning directives +# +CONFIG_RTE_INSECURE_FUNCTION_WARNING=n diff --git a/config/defconfig_i686-default-linuxapp-icc b/config/defconfig_i686-default-linuxapp-icc new file mode 100644 index 0000000000..cb0d017721 --- /dev/null +++ b/config/defconfig_i686-default-linuxapp-icc @@ -0,0 +1,230 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +# + +# +# define executive environment +# +# CONFIG_RTE_EXEC_ENV can be linuxapp, baremetal +# +CONFIG_RTE_EXEC_ENV="linuxapp" +CONFIG_RTE_EXEC_ENV_LINUXAPP=y + +# +# machine can define specific variables or action for a specific board +# RTE_MACHINE can be: +# default nothing specific +# native current machine +# atm Intel® Atom™ microarchitecture +# nhm Intel® microarchitecture code name Nehalem +# wsm Intel® microarchitecture code name Westmere +# snb Intel® microarchitecture code name Sandy Bridge +# ivb Intel® microarchitecture code name Ivy Bridge +# +# Warning: if your compiler does not support the relevant -march options, +# it will be compiled with whatever latest processor the compiler supports! +# +CONFIG_RTE_MACHINE="native" + +# +# define the architecture we compile for. +# CONFIG_RTE_ARCH can be i686, x86_64, x86_64_32 +# +CONFIG_RTE_ARCH="i686" +CONFIG_RTE_ARCH_I686=y + +# +# The compiler we use. +# Can be gcc or icc. +# +CONFIG_RTE_TOOLCHAIN="icc" +CONFIG_RTE_TOOLCHAIN_ICC=y + +# +# Compile libc directory +# +CONFIG_RTE_LIBC=n + +# +# Compile newlib as libc from source +# +CONFIG_RTE_LIBC_NEWLIB_SRC=n + +# +# Use binary newlib +# +CONFIG_RTE_LIBC_NEWLIB_BIN=n + +# +# Use binary newlib +# +CONFIG_RTE_LIBC_NETINCS=n + +# +# Compile libgloss (newlib-stubs) +# +CONFIG_RTE_LIBGLOSS=n + +# +# Compile Environment Abstraction Layer +# +CONFIG_RTE_LIBRTE_EAL=y +CONFIG_RTE_MAX_LCORE=32 +CONFIG_RTE_MAX_NUMA_NODES=8 +CONFIG_RTE_MAX_MEMSEG=32 +CONFIG_RTE_MAX_MEMZONE=512 +CONFIG_RTE_MAX_TAILQ=32 +CONFIG_RTE_LOG_LEVEL=8 +CONFIG_RTE_LOG_HISTORY=256 +CONFIG_RTE_LIBEAL_USE_HPET=y +CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n +CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n + +# +# Compile Environment Abstraction Layer for linux +# +CONFIG_RTE_LIBRTE_EAL_LINUXAPP=y + +# +# Compile Environment Abstraction Layer for Bare metal +# +CONFIG_RTE_LIBRTE_EAL_BAREMETAL=n + +# +# Compile generic ethernet library +# +CONFIG_RTE_LIBRTE_ETHER=y +CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n +CONFIG_RTE_MAX_ETHPORTS=32 +CONFIG_RTE_LIBRTE_IEEE1588=n + +# +# Compile burst-oriented IGB PMD driver +# +CONFIG_RTE_LIBRTE_IGB_PMD=y +CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n + +# +# Compile burst-oriented IXGBE PMD driver +# +CONFIG_RTE_LIBRTE_IXGBE_PMD=y +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n + +# +# Do prefetch of packet data within PMD driver receive function +# +CONFIG_RTE_PMD_PACKET_PREFETCH=y + +# +# Compile librte_ring +# +CONFIG_RTE_LIBRTE_RING=y +CONFIG_RTE_LIBRTE_RING_DEBUG=n + +# +# Compile librte_mempool +# +CONFIG_RTE_LIBRTE_MEMPOOL=y +CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512 +CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n + +# +# Compile librte_mbuf +# +CONFIG_RTE_LIBRTE_MBUF=y +CONFIG_RTE_LIBRTE_MBUF_DEBUG=n +CONFIG_RTE_MBUF_SCATTER_GATHER=y +CONFIG_RTE_MBUF_REFCNT_ATOMIC=y +CONFIG_RTE_PKTMBUF_HEADROOM=128 + +# +# Compile librte_timer +# +CONFIG_RTE_LIBRTE_TIMER=y +CONFIG_RTE_LIBRTE_TIMER_DEBUG=n + +# +# Compile librte_malloc +# +CONFIG_RTE_LIBRTE_MALLOC=y +CONFIG_RTE_LIBRTE_MALLOC_DEBUG=n +CONFIG_RTE_MALLOC_MEMZONE_SIZE=11M +CONFIG_RTE_MALLOC_PER_NUMA_NODE=y + +# +# Compile librte_cmdline +# +CONFIG_RTE_LIBRTE_CMDLINE=y + +# +# Compile librte_hash +# +CONFIG_RTE_LIBRTE_HASH=y +CONFIG_RTE_LIBRTE_HASH_DEBUG=n +CONFIG_RTE_LIBRTE_HASH_USE_MEMZONE=n + +# +# Compile librte_lpm +# +CONFIG_RTE_LIBRTE_LPM=y +CONFIG_RTE_LIBRTE_LPM_DEBUG=n + +# +# Compile librte_net +# +CONFIG_RTE_LIBRTE_NET=y + +# +# Compile the test application +# +CONFIG_RTE_APP_TEST=y + +# +# Compile the "check includes" application +# +CONFIG_RTE_APP_CHKINCS=y + +# +# Compile the PMD test application +# +CONFIG_RTE_TEST_PMD=y +CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n +CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n diff --git a/config/defconfig_x86_64-default-linuxapp-gcc b/config/defconfig_x86_64-default-linuxapp-gcc new file mode 100644 index 0000000000..35551872ee --- /dev/null +++ b/config/defconfig_x86_64-default-linuxapp-gcc @@ -0,0 +1,240 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +# + +# +# define executive environment +# +# CONFIG_RTE_EXEC_ENV can be linuxapp, baremetal +# +CONFIG_RTE_EXEC_ENV="linuxapp" +CONFIG_RTE_EXEC_ENV_LINUXAPP=y + +# +# machine can define specific variables or action for a specific board +# RTE_MACHINE can be: +# default nothing specific +# native current machine +# atm Intel® Atom™ microarchitecture +# nhm Intel® microarchitecture code name Nehalem +# wsm Intel® microarchitecture code name Westmere +# snb Intel® microarchitecture code name Sandy Bridge +# ivb Intel® microarchitecture code name Ivy Bridge +# +# Warning: if your compiler does not support the relevant -march options, +# it will be compiled with whatever latest processor the compiler supports! +# +CONFIG_RTE_MACHINE="native" + +# +# define the architecture we compile for. +# CONFIG_RTE_ARCH can be i686, x86_64, x86_64_32 +# +CONFIG_RTE_ARCH="x86_64" +CONFIG_RTE_ARCH_X86_64=y + +# +# The compiler we use. +# Can be gcc or icc. +# +CONFIG_RTE_TOOLCHAIN="gcc" +CONFIG_RTE_TOOLCHAIN_GCC=y + +# +# Compile libc directory +# +CONFIG_RTE_LIBC=n + +# +# Compile newlib as libc from source +# +CONFIG_RTE_LIBC_NEWLIB_SRC=n + +# +# Use binary newlib +# +CONFIG_RTE_LIBC_NEWLIB_BIN=n + +# +# Use binary newlib +# +CONFIG_RTE_LIBC_NETINCS=n + +# +# Compile libgloss (newlib-stubs) +# +CONFIG_RTE_LIBGLOSS=n + +# +# Compile Environment Abstraction Layer +# +CONFIG_RTE_LIBRTE_EAL=y +CONFIG_RTE_MAX_LCORE=32 +CONFIG_RTE_MAX_NUMA_NODES=8 +CONFIG_RTE_MAX_MEMSEG=32 +CONFIG_RTE_MAX_MEMZONE=512 +CONFIG_RTE_MAX_TAILQ=32 +CONFIG_RTE_LOG_LEVEL=8 +CONFIG_RTE_LOG_HISTORY=256 +CONFIG_RTE_LIBEAL_USE_HPET=y +CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n +CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n + +# +# Compile Environment Abstraction Layer for linux +# +CONFIG_RTE_LIBRTE_EAL_LINUXAPP=y + +# +# Compile Environment Abstraction Layer for Bare metal +# +CONFIG_RTE_LIBRTE_EAL_BAREMETAL=n + +# +# Compile generic ethernet library +# +CONFIG_RTE_LIBRTE_ETHER=y +CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n +CONFIG_RTE_MAX_ETHPORTS=32 +CONFIG_RTE_LIBRTE_IEEE1588=n + +# +# Compile burst-oriented IGB PMD driver +# +CONFIG_RTE_LIBRTE_IGB_PMD=y +CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n + +# +# Compile burst-oriented IXGBE PMD driver +# +CONFIG_RTE_LIBRTE_IXGBE_PMD=y +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n + +# +# Do prefetch of packet data within PMD driver receive function +# +CONFIG_RTE_PMD_PACKET_PREFETCH=y + +# +# Compile librte_ring +# +CONFIG_RTE_LIBRTE_RING=y +CONFIG_RTE_LIBRTE_RING_DEBUG=n + +# +# Compile librte_mempool +# +CONFIG_RTE_LIBRTE_MEMPOOL=y +CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512 +CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n + +# +# Compile librte_mbuf +# +CONFIG_RTE_LIBRTE_MBUF=y +CONFIG_RTE_LIBRTE_MBUF_DEBUG=n +CONFIG_RTE_MBUF_SCATTER_GATHER=y +CONFIG_RTE_MBUF_REFCNT_ATOMIC=y +CONFIG_RTE_PKTMBUF_HEADROOM=128 + +# +# Compile librte_timer +# +CONFIG_RTE_LIBRTE_TIMER=y +CONFIG_RTE_LIBRTE_TIMER_DEBUG=n + +# +# Compile librte_malloc +# +CONFIG_RTE_LIBRTE_MALLOC=y +CONFIG_RTE_LIBRTE_MALLOC_DEBUG=n +CONFIG_RTE_MALLOC_MEMZONE_SIZE=11M +CONFIG_RTE_MALLOC_PER_NUMA_NODE=y + +# +# Compile librte_cmdline +# +CONFIG_RTE_LIBRTE_CMDLINE=y + +# +# Compile librte_hash +# +CONFIG_RTE_LIBRTE_HASH=y +CONFIG_RTE_LIBRTE_HASH_DEBUG=n +CONFIG_RTE_LIBRTE_HASH_USE_MEMZONE=n + +# +# Compile librte_lpm +# +CONFIG_RTE_LIBRTE_LPM=y +CONFIG_RTE_LIBRTE_LPM_DEBUG=n + +# +# Compile librte_net +# +CONFIG_RTE_LIBRTE_NET=y + +# +# Compile the test application +# +CONFIG_RTE_APP_TEST=y + +# +# Compile the "check includes" application +# +CONFIG_RTE_APP_CHKINCS=y + +# +# Compile the PMD test application +# +CONFIG_RTE_TEST_PMD=y +CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n +CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n + +# +# gcov compilation/link directives +# +CONFIG_RTE_LIBRTE_GCOV=n + +# +# warning directives +# +CONFIG_RTE_INSECURE_FUNCTION_WARNING=n diff --git a/config/defconfig_x86_64-default-linuxapp-icc b/config/defconfig_x86_64-default-linuxapp-icc new file mode 100644 index 0000000000..f527f538c1 --- /dev/null +++ b/config/defconfig_x86_64-default-linuxapp-icc @@ -0,0 +1,230 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +# + +# +# define executive environment +# +# CONFIG_RTE_EXEC_ENV can be linuxapp, baremetal +# +CONFIG_RTE_EXEC_ENV="linuxapp" +CONFIG_RTE_EXEC_ENV_LINUXAPP=y + +# +# machine can define specific variables or action for a specific board +# RTE_MACHINE can be: +# default nothing specific +# native current machine +# atm Intel® Atom™ microarchitecture +# nhm Intel® microarchitecture code name Nehalem +# wsm Intel® microarchitecture code name Westmere +# snb Intel® microarchitecture code name Sandy Bridge +# ivb Intel® microarchitecture code name Ivy Bridge +# +# Warning: if your compiler does not support the relevant -march options, +# it will be compiled with whatever latest processor the compiler supports! +# +CONFIG_RTE_MACHINE="native" + +# +# define the architecture we compile for. +# CONFIG_RTE_ARCH can be i686, x86_64, x86_64_32 +# +CONFIG_RTE_ARCH="x86_64" +CONFIG_RTE_ARCH_X86_64=y + +# +# The compiler we use. +# Can be gcc or icc. +# +CONFIG_RTE_TOOLCHAIN="icc" +CONFIG_RTE_TOOLCHAIN_ICC=y + +# +# Compile libc directory +# +CONFIG_RTE_LIBC=n + +# +# Compile newlib as libc from source +# +CONFIG_RTE_LIBC_NEWLIB_SRC=n + +# +# Use binary newlib +# +CONFIG_RTE_LIBC_NEWLIB_BIN=n + +# +# Use binary newlib +# +CONFIG_RTE_LIBC_NETINCS=n + +# +# Compile libgloss (newlib-stubs) +# +CONFIG_RTE_LIBGLOSS=n + +# +# Compile Environment Abstraction Layer +# +CONFIG_RTE_LIBRTE_EAL=y +CONFIG_RTE_MAX_LCORE=32 +CONFIG_RTE_MAX_NUMA_NODES=8 +CONFIG_RTE_MAX_MEMSEG=32 +CONFIG_RTE_MAX_MEMZONE=512 +CONFIG_RTE_MAX_TAILQ=32 +CONFIG_RTE_LOG_LEVEL=8 +CONFIG_RTE_LOG_HISTORY=256 +CONFIG_RTE_LIBEAL_USE_HPET=y +CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n +CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n + +# +# Compile Environment Abstraction Layer for linux +# +CONFIG_RTE_LIBRTE_EAL_LINUXAPP=y + +# +# Compile Environment Abstraction Layer for Bare metal +# +CONFIG_RTE_LIBRTE_EAL_BAREMETAL=n + +# +# Compile generic ethernet library +# +CONFIG_RTE_LIBRTE_ETHER=y +CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n +CONFIG_RTE_MAX_ETHPORTS=32 +CONFIG_RTE_LIBRTE_IEEE1588=n + +# +# Compile burst-oriented IGB PMD driver +# +CONFIG_RTE_LIBRTE_IGB_PMD=y +CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n + +# +# Compile burst-oriented IXGBE PMD driver +# +CONFIG_RTE_LIBRTE_IXGBE_PMD=y +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n +CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n + +# +# Do prefetch of packet data within PMD driver receive function +# +CONFIG_RTE_PMD_PACKET_PREFETCH=y + +# +# Compile librte_ring +# +CONFIG_RTE_LIBRTE_RING=y +CONFIG_RTE_LIBRTE_RING_DEBUG=n + +# +# Compile librte_mempool +# +CONFIG_RTE_LIBRTE_MEMPOOL=y +CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512 +CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n + +# +# Compile librte_mbuf +# +CONFIG_RTE_LIBRTE_MBUF=y +CONFIG_RTE_LIBRTE_MBUF_DEBUG=n +CONFIG_RTE_MBUF_SCATTER_GATHER=y +CONFIG_RTE_MBUF_REFCNT_ATOMIC=y +CONFIG_RTE_PKTMBUF_HEADROOM=128 + +# +# Compile librte_timer +# +CONFIG_RTE_LIBRTE_TIMER=y +CONFIG_RTE_LIBRTE_TIMER_DEBUG=n + +# +# Compile librte_malloc +# +CONFIG_RTE_LIBRTE_MALLOC=y +CONFIG_RTE_LIBRTE_MALLOC_DEBUG=n +CONFIG_RTE_MALLOC_MEMZONE_SIZE=11M +CONFIG_RTE_MALLOC_PER_NUMA_NODE=y + +# +# Compile librte_cmdline +# +CONFIG_RTE_LIBRTE_CMDLINE=y + +# +# Compile librte_hash +# +CONFIG_RTE_LIBRTE_HASH=y +CONFIG_RTE_LIBRTE_HASH_DEBUG=n +CONFIG_RTE_LIBRTE_HASH_USE_MEMZONE=n + +# +# Compile librte_lpm +# +CONFIG_RTE_LIBRTE_LPM=y +CONFIG_RTE_LIBRTE_LPM_DEBUG=n + +# +# Compile librte_net +# +CONFIG_RTE_LIBRTE_NET=y + +# +# Compile the test application +# +CONFIG_RTE_APP_TEST=y + +# +# Compile the "check includes" application +# +CONFIG_RTE_APP_CHKINCS=y + +# +# Compile the PMD test application +# +CONFIG_RTE_TEST_PMD=y +CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n +CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n diff --git a/examples/cmdline/482246_CmdLine_Sample_App_Guide_Rev1.1.pdf b/examples/cmdline/482246_CmdLine_Sample_App_Guide_Rev1.1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..796741fa2572a8f68545f899cbdec66d4d0fda0d GIT binary patch literal 53739 zcmce-Wo#u|vNdRCEHg9HF*7qWGqYW0W@g4Rm6;i<%*@Qp%*>wZp6)mA$Gxroq)#K| z*-9NLr5%y6BG!u7Nv0qoM#o6c22ZxTH#G+j!^Fu%2q3gKvV!N~VGy&laW-{i5VJ9K zHWe{7wl^{T^OvKOGa)B{LB`b1+}VPVgPEB@j*yX&LB->{DTBO`m9n!fAD@%6qp6`S zJdDL=qnfQe4kO$r%}cAKaoQ6GG?8G7UD9BvE836X=lCY9EEUfM-T3XTDOb-Tz$nZr zs;Gm_kgMa=o|@JKN75o-$DmHepR&rh*II)&345u#F<`-gy!9I)6#4KjDQ+VrC3#>2 zVYgV2nO~P@+Uq*=6;WSH5U4`*s>j;G7>8`@37sja@xv#%U#Zi$#ft)L`A?0(3*sP0 zrUVrzO2I`H;RHHUhDi!t?kZ)z3c1ag3dvt1N2OZD^z3C}ltUzfWQk8bJ(>@eEL@R3 znSwU4t4`lvJ$rozJaPp%%<{_-)z@CrZ(4NJlGaRr(X?8n4alkUoR?T8>}0f-i8;J5 zvSOq)9z5fktR?ZN?0K{wXCyjz6cjuj+Dr+6dqwsMTfr0f<2cAII<#9Iw$#Wevqwce zXFBWes+Ox^E6uk_dw6R4SpKfypjdcH)D}LK=bZd;va@THgIRN$RV}Bfp$n#4*I2E2 z*j(XjlPtsDdnB0a^6M$elG_>`W8ulnq4oI3gYUYt55Df8UF(vrM(;P@>M4f<;;ib^ zvs^6cbmyfAD@_crI`fx$eq}MHPDTjI-A0(60-F_h8Y{O9;pE@T=!EvX8bRg=)21mV zgIMu~*gcKGmT=2x%!7MJslxcsH~Uub zsXN(<%~Hr}Xh;)al8)a+?b~Sh08P%$df>8b_Yb{uQp%ZL_N`;n__qnMv#2z0f@0A1X=4bi1x9oFXs$}-?b%xC(cbPDJAvA;D_`@;C*tjrED*o#H|dX zoP~I}2);rmiDHBDvw0h-5D7-8?j7}nbmTrArJbd9ySwEB z;RRp{E0amNIhfa>)GhpSl2bK8eDonHfm=&r=+Gtba6<#2-&uNiagKh+h9RO?`m(hK zZGf@2Zv)xg>3ql7=R?``>a7&4-gLa9oNh7pO1TSxO8WxYc^b++_MPr+^#)n#Ywf#V zjhk+t84kYJ&eQqMDUea~SY!iB6s4kI(~{?oHWB_6x)Ej^v$2=FM|%j>Ri6J&@=DhY zG=c3e2oGawXYwyJ{ki)GE}1x){|kh&{TrZ+oE-lE=-=VfB(}#2gb`KL@8BX!0l?r; zm`&VkU_qsZq=g*$gTuqXf*fnpN= z9$x77Z*3;rK_k!Xc9J^1FBkw2ExlyhUpk59|HMgbod3y3OiYZ*0EODR8MjG>2^vMf z($l02RV*75D^Mf^6=t9$3cwNpIY0_im@@t_PvawgG*IQkzyE&Th@0CV2owl@4@Lhk zx5)ZmY>|=a-!JmdF8*wi>F-`7ZJfMy9{_HkD>80vAo5~xM2ykdPYZ%Od6_o^w|>6T zunbQp#d*v3_IM|t|LR0i35_PbJVPO~YN?h;lY@mo6Op3-1fdJ@`o6c$=)aK;Bj`Kol~DpcbuuJKxMv^;U{m6Gl&2otoYS^6@pKt08QP z@!fxHfjl^Ta2meGA#!B$W82B;r)CvZ%TLxT_I1s3#-waCky}{!g~WwZyVXJe{_Ldw z+|M>A8gd#3_H$+2$|3Yed+qO^q1lXnAaFnjvNA@0={&ao!g+sc)4$3TJK!HEVEsGi zwX3WttpSie0zZTB4YfFKMVp!X04TjpJ8Z#1CCQKj=n*(;Oo>W)`uZ!Q@$tot_doY$ zq_i%oV>^yGprOblB{afFqXGP+k|Mj^B$6^bHNqo{=#!HpcnpoTQ7F5H(_~_@;1CqS zc1i?k@=e{F1)P|ec3>tK`7JHBrwB_%`f3pBLZ*-Sb)($ZdNY&YqX9iAp|8J%oGOJ%YX+6u2(r-5aMJ98yBrn4Dif zPvAQDE=qsh?I}vxuZ6n0P*cxwV0yOmp}ttyK#9&39~$(P;GJW57fMoCz12355ZJQ` zp~uH``{FW0^Zb}>Uwd@UP4l>EWGfi*JmWqn(jb@FBfun+;TkMcA!E>5YWmGzh)Q3p z9RnlC#uFNVe1LW`JwbSXSeZU|LdGxu_R+WZ`IeJau2br_Z{FT7pF8nl8S_NLNpgA& zAc%S_viEr3td9I}idh;wjjZ_aQ@^=julCGc>@+ajcS8>!AtsPzfcTS(mw3<=07&U- z846q}QiP%raNv$A9NkoW0m4S%_8h&OEQ9XdBvv}x-@GslFnq$_%1ra&LOhQrSbt1|qWKA_4H4s)vczfL3 z_9HD756fsxLWC@02!tfp<%^J4Y6TF7FmIiA6=ITW{fr^4h-HX6JAVK-aI43pJ$g=5 zz?nY>T4^C%uo0*(_lT!Mw(+Sq4ee%>gEr?`@z({KPi)Z;L7Os59)&=K=QbL-FWx0~ zqc2ElSK)sH0mRIpPEg<+n#Q75HCUoqDqg{#*~H#KxIQp_R~f_$ znsF5qNYd7%n-EI`Xk`->JlWulBrmahLiVAE|Lb_t%O2}R*k!3BMV{O7n796DYQflz z_AOmkE-y=0Nn-P-Y2mp9qUzmS+q=g_`4zuBgDUs9oqS2>`~xeCm~-lf+T*zH_qxEE zL{MoxGrYux$hg|<0B<^6COSi63a6jK-zC;YDR)V2#irIR;ubVONtAUC&lliz{iqGr(b!Hd6N-T7SjJscfAoGimf zv4bl$3FDvrdVSP$mGz(&Hd)FO-9@Tvb8$&$+juH5x@m55o#z=`OTA!^n9~{3KQz7T zYvX6rT~SALds1ENNE6omLNfsRQS+7amyyBoUqlA$zvF_DiS<8~N80E*gkAugh~I#p zL1J@6LOJcUbCm>Jzk=!tQdwk}FCW6~qukXcL?CQ5$>!;Ad`tb7+1{l97rAi~uq?S8 zl$!PNtZ2-w=vwg8M%1&g0ddg#EyGE*O!Wi&O7_v)3vN@8(+5M&(^MFKhc@x?gMD=< zeqab7Cp1dvzqF0>|Eg__2tfcOur5db)@6TN(tYy&MoQN(N+d^&fpDFkJ5KSX2`kr@ zvmM|*|EXIlbDn12rEBKA7{j|qe0>br1h#Us$jF$gP|YCgLe$Z;Wk~SLuD)azcCQ`u zM%CVTTO32k>Xfh-TCefOZ5{}jm;8k{ZGT`Or+P}5zp#xN0Qf(%?Vou5J2*2jEBuSq zOpP$W&`Ze3fjdM_phl06`!fl;y1rJUkzMwXMUW92+`{%#!Z_g)Md;npvf5gv6CTje@OlYH( ztmrSzV*I~qmdgLnW^GE575}AK|CN;fpY+Sh!txK8{Vl`GQdx_g10ZF~-|RgGEVlI5 zHELNdIHJ&$!tf(C0TVZEP*2c@#O_z+_U!j#Kl^QbkI?QU*{4FB!QieRGU;ie8tM;N@9n4wcVzg?n9X}SdqI?4Hh3@KLc)y#{-cYZW+5m}qA%?M# za$>iyB*h>CF61(v3`tW?|319py7EvY-WIVz&j{^`!z9sXYxnkjRwnCg-Oim;pwK8y z`B14z5|Jm0v5Z(;WNg#xcy-@Xh+(eKWQJ1dnq$OXuJJaQUGJ;g+JyV2k4x69!U@)4;|X4^sxQLrP_x? zQHM@=(jRhw~Z{UqrI zYO6Ea{L29RZ6XNgqVvym8AJf~n4J z>$v$)wJ%RkojRGS8hp7TYuTe)hjUn&PVU%+_Yve7l9$^_?8}GKGrJE~@n~>?i>ceF z(@W;&EgXghxhYmE++2sRzLU>q!qc18qb06%(`muC1#Yz?S-9uMkS;wqCSo;_T@uMS zQl7+fFwn$(i9nK--ue*JsMI_>3-DEc)axS~JJ)9C*W+`211m_GAO4HYB#&$1^U;{& zY(_TXjffHae*{Pg6z1Yz`u4vOm>eAcqd*whIR6Q60O0TZ+pp0Ri^q!ep2B&k%Vy`+ zvX+{!gC2lHgX#d1j~l;@1l%j&A8_AP`HQ<;*r}F|dx^=pV(#u49XbtFl7!43A zNk9%2Vu77dhy6j|hcSSiCRn<4kPA!z>QWO1c7#cAhoOK3988l*1G{jgdizwS7?6T# zG(fz7;JX-8z*;{*3?*6sRiepMgeudJa#1NnF%OBFjvd=T4W`dGoomQnlSbEo4MrL8 zBtk@DCI~tni|eF5UKiVKt7qZA($)uIc3F*?4jY2?&mX8}iOzLTH-aYW@OJl~0%5dhL@3KPE)8Xmw7BP>+)ld1pg5c+^{)1a0uo>pE?*gyr_!|5^zHAK*GI9z)A zdcrAR(SQ;XGFk|9yln=k!I7a>xY}f}x7tMPXRF~IIH#j92$<>tuQ9NhxQCSg%9}HQ zn+6!mp(YFbu(y>u9d=Ks!?Q=A%m4wV!T>>y+;upJh=fG14I}S*w4>CZTB7?0drjBd z^O^qD&0^67WT)$g`19S)g?1|e-{;NERo7(5fV*N}P(Ut#KZ_2UO*>NXupm*8Y)M!`j(1!%`uD`v1)YmGPs zN)YBou)sYu>G=3D&3*~;uwbdp{dDUDR(_#b+6ScQh2|@DrXf2(*3(RRRZHknNDOa~L$!@2l0$i= zi;9(!O-twGK?!gP!q@jhg!=)#fI(BFLXYC2F$YJFmg2+Xdc?B{1}J){#0^3qYnnmP z9P$VTqBY1V9F{8jG;jY|>;fte6+2wZIL%7++_Pd7NR~?V(3(3WWubOiv5?WL zbXDaqD1Z2FHrkO0+@ET9t&O#>|5{=Zd3Tv=ki7T2Om_gNtf~u`1y}CGpsv~ z9q_vBhZRqLpLcIpHGc0W=DwLW@AstF87LvD3WV~n95>S=5$=+46!@H+GxsL>*%AYF zE<6qnkL%71Gi&;BpJ$SOOxq}L7~hYqpZJo#qQBa%_c%Yz!H?5quhWT#HurF@hGXL9 zI39PeV~VuwM@$#8U-r9?GW0)(P$m$)TkCNv9CY~9uD)4FIvl#%LYgMt^ytQBy_-jn zE&gcuLJ7v_s5|_2whzm#bsJ%ebx&B{Ai{Hydpl+SQ&2YhnUi;Hz3jf4AYMP)r&Zyitr&(oe+czH?Zj<6y_qIhvIj~s3dBU^xZj#K1<7l;pi$)tOE2qo3R4i5TN_92H zddN9Pd#c@2L0t;11hc1$afit@CyC2u(Tc*gI6=&lC6MZq6+s&URN=?bALaRfG=Z26 zO;Rh_-EJcjpn;o-qs;f*d@(>svJGdB1D=50MC*bF|WO^yw^6TvK z)2(3uY#L+z0`>uFgvacWFEzRdz-m~?Gmh(&c!%neqH|+Gn`;=FKBRL^ybiyC5Po|c z0K;BNJ*|9Ogv+WG>(N;4O&^1JQ4&2hG72y>r!Cy!=I_Vac^ zvNk0VAJ#n`*pt5_@W#!3iEPY|@Uku9bIZ;EH-sviI^pfKh+#iT{i9od?f7NidN3$k7k{TlcJSy8X1?Cm!hRwe3BlQ z1&?ecJ%LOkPf4_E}Hsyx)LSPw|r^$A|B)ePaBLo*Zv>?g(8O zen+)O*N>vPU^RWi*xRZPKOMC#!DxumXE0)SgHTG%@b0n#>Uo)Kb%=C@QkO{96l7&9IOVy01%}^-eD4b5v<5toop+%0Uwj z!$IHeF7;$S3)Q-6H2)=aSpg~JDAUx2YNQ*MdsG2e_4@NJwQPQ4sSaE-TJxxJTH!Lt zabfsv>-T5n6p#yj6~z#K*p(a#X!dmlGYMow7!6|7U{C?gWxX#cT-_3f6wi*Eaz?8GTV^HG2Su0Fr4`2t74~-e0Vgz| z_5&fD)U!kyGLE9B)dz-mD&XY;11mFTlGmd#`j6ene1ktG&#EDhf z$f;4V)AEFF(NzYVRlo;{YV+j+cQTobS=aU@pMu&gj_PKr$pVn#UlKd8WcV1LH!$cl z-Nz%aqu70*dBVr|U~`3o=zKFmbSu7cwSl_^15C=iIWsi7p?V3xJ~ZX0xA)*s2N}~* zf~lkaS#%!diRaNq;CaE0f+Il0LyWdLM-d^rlF{8S+X-aJnJGs+O-e+0bwb+czQFBh zlBq|)UnZOeWn4wf|i3@_IV;Eb@*4utCM5(1=A z)eWTh;RjK>PmAK2LIF>U`=2sr@vF+Uah1bR2jA`2ionP8JL78$E@Q`y%PK|^`s0x9 zlSUdd%E!G6y?yx0qGVG|Bk{6LzjhHfkTe-g8AUv0XXn6zpXtb23*a_^QWiclzEX5(Xy(aZ!~BD)sL`wL7%F>l z^I$=iAS$Oq#V`W(yR~JB2=Oyx#!*$RXyVBV!mH~waXEKkPDV};R3t_T{kmoEV7_Pc zo+$UhU_`zI=N|>}1jxH%(rn!lOC<#CdjyzsQRME-vJlndeM8BeRWd41Ns?2tQnNx6 z!r@A>>7nkXj8ufZ&(Jn{F8fOC{~Tk7us|p1LOr-?qm_GfB8${ziAp}95v86xrHcvW z`NmiBQx*T1p^R4{fnEEdPQjf%QH)f#s1b%uIW8L5U;@)aGjTLWrkzU|Mj_*Rycj|* zbLB2QvdNsVNL`W8BU-V-iLr>D=~pYABTPDlk*ud+<{bs2KOJkQC;9}halQP)*sO2bcf;lgRp@sEV+QFLoLunn{+$2)o0IA#i z=OWtJsnsX<(nv?NDvgQEl4d39Y2k~LGv*{?ytQ6;bGvIJ6A;H_N#*`6dOidOKDe;b za669Q0xcR62BV-h(fC|0!l@N$u!Y9UPQ$PK@4hz#g7!^_39~_#!}Z9tyg`Sa75lL@dXVn)}XR_r8xqa^P#g=eW*Qy?Lk07ZX$BR0d+n|4f>f z4oy`hwYG1*1IGmWAZ-^K!fV_WIeD3o0#HP%83z&)`D!pIpS{e8Ht>m~-4`&z=9R=~ zD9Y^>))9?|rJ@|thzjqZMI%+JGuL*lPKjGB7DP>>d}~$EA{9@#6)P0Fc?>`+B2SlX znOkr_y+j;4I(eKz*bq%bOBS2Xgq)sJa_{+GCV}k?Wo~-+IVb#8jdwWh2Po_+61*ZJ z-Fm8ay9#CR2>2aZq!rM4!L#8~HcHwWrqi~y=LssM4S8Ajb^WC0t_u3dWdpupoqvKk z`etX2nV$g#*Q@+Gi?azNry);+3c3KqDZ6Ryy4*ol238ne2=hvWjgt3^*yuXa8-vFs z6x5|*82ah*vuC9fAFl?1YvWDu=^G`;(g$RWGDO_RrNK5_QvSk5$8YhjHZQ!v;10OG zZXcBbUvLYvyp0|}e`&f$C_ce&Z$ybjp7>B`5$r@T;B%46` z`e6wQn~}g-NV&WYzDNvVrlUu&yz2#tBGE8|!oH2>72NX9U?gr=pE-mviiDqhyAQwQ zhl?c@ym63Fv}%AI`go>c`%UUw`j;c0V-HrIW zc}H&DERTEjX$dOn?F=8ZPzY%gQKo3LU((faT#m<;v5Sf)i4T}O#WX5Rejc~oU>f~v zBCz`HK2Pas;b>#KtPiu=KuB^Jj^^+dQk1@N|!-YJ6><@x6$e| z-#F-+TnPi{)Vhnkfzt91SE%+5?i;c^*FhL$2{=_g{`ql45H$!rp(nWvK1CB5ROb)_uE<=W?rvwqD#OmmKchRw^vM!(jm#Wg%~X99j!2 zcn?3;YIp^y#rk!h@D?u>Bxs%Z60f%v?F1>J@{c8TnVkSsC;-?OCE28ey@6)6+m<4e z{1UHnz0phM%5={w!HdT9HP|_P7ni;x2+3ZaSotL8)!Nx($G0;{qcbad#p>*UQs-%d zo|e8%&Ku?fZ>BkDCQfDfQwj}65^aMe@C6vao%DVXA3)Kj zhHlcSz!DS~q!C$%dI3-Ya}-|Zb@fr9OajirV?TvG1AZ!p*HDw9#fO8yDB**sFsoyY z08hSM!Wnng`i(UEBXX?blaV8|6Y3nxeC0>qwd?0HS6{QTL%xbb~XeZqIrX%O}P1!948Z%JaT zRSH%u(wHmO=gbJg`uA2TlbLUHY)=*hP0)s2=i;FeSDkl9d9-T!ZarXs-CAM zgwnY0>Mh=?Ros5ARG-KFKE8D06KkrylD?p6;|it_+ksgB-t~`|f~!;5n$gE+)#1Gl z8?Awubub;|KskmVD?i z@8nZ^5eJG-WYDZUdkCEq-*tPwAsJ2ct*7OnQny|I;GsGY;>fr~lxxPX@>9 zLdNJq2F9{rg?HLRX={I;1@Z?^$V|slVn})>-$~Yoq*rS%N?50MW`%TFk&ccQH)R!0 zvh&4$!!r=?)h~0f6Q>6(_TJ`Om@=CWDib^aM0B;y?1zs4zNDqOsx-M6_%EO5)r-v2 z$(y$7rNNkE1gh?h<%-X2$ud+K`nci`&9x!dsnW<@5wWG_n8%~WB<|F>Ra_dni^Adp z*|91XNZTi4PX4kJ93+j5= zCs5EcHk2rM$m*KxFyLm$!Tt{M_ZzjanQB|OjQQoUIT2x3L^Bl=DB;zQa!a>sl;2FQ z@sQE)30eaZY1?Vi> z!^}MZ6P|UQd%07?F86UyR8tufL_QdhrsQqttqdFyF_&HYDvv5nh-a-Z&8em&I>C!c*DfZ4w8m$d{zS z!`pr|$M!-y6a0lp3>$h~ZPl*r`|F|8ZkRx$gXOV3J|@+uq4Pi-_Hqlwn8+7y?^iQ`)m(oKq(>?9FBdXL_M3}yR7{UE>Qkcfx(#)yo)CH>V z(WCWC{Tk-FhSi)m+fw%&nf#7v8^VD=ZuHfDlns6rxn09SOGH*19;(pOCAf*Bm`i)M z{XO_${K8})ifrEv?HVk{sxhjEQ%*opoySS%l zTj+(UX>K#ohiS=HT3GglTgAhsEj)yBx95Re1Y0@UqqbrKzWno#^-K*`eBP^lD171m zo8{~B?J2I!B2z!t^w$?4v$ar0@!J{qnmwQ^NkG-OTXRmFn;jjrTk+7kSvvcR^_BD7 zaI?YtsEgx>%VohGbu8VTVoyfjp89f7XtzcsGUd4y#it#`$k@yy#)UN;120Ir?;CL4 z$>SR5F~4epWHN4zC6gagH;sOF>Y^cT)Wp8`Q%lf&pC=A$!k{--5`r)Os!V2h)jX_b z=od5?)iyEj>XPskAT1$N;}N7i{%P$*4#LMnG<4-`g&TOe>y&pHI$inoYviwqJ)Gnfwu)PN!R2WIld)7j9=|a%@{d z8O+7n?0w(4^=iyuFUpUYq@thDGp(s=wb}TKa5YhlJk2I(dyFoZl(RM65N60Bqwlf>Dwo5%S~EIy?+}?3DaElNZMU8v->f9EinKb? ziyAV(yw)!F(}VJ)Sq%nR{Px$T>^{jEl*l)(7vg`lO~TUTpbA?{OV74bPt0GL>M1d` zVa_arKD@|L(?_>+^L)urmF#tF&exIpd(m73eh%2b@jbQRIe3_`JxImMj=ZUJ?=}@+ z9z#wRatxEva>dwOIxIczK<#u;KFG={eT0z0Kv(ikcH#CtKRW%pay5dH|IooZim9YML7f!>Gn(<=w zT^EBF_A=@5G(uomY=Gl<-5L;LpUJTSdYv9@i<`IJ_s zhxWO%uUi+Cr1yKLBMEoW{Z{PTo{UlouHvP5sYKqo)x6En}lXj2eit5TwRknX7{;rFYMQ1?D!Zf zY8oRSFVXEQhG`t+W_{%Y*!^L~qASDlMEPu2N0zB(#7rDs(zfpg-~*Y$2A)(YS{u6yyqxq!>HT^olzgtnETE4b0I z#_*qVreU(#kL%!$NsBZZnUXa6ct~*lZs~GsTy!Aq2CO4Z+*NQ#>>OJ0RvT z`$23jS4ML#J8NA6wsDcoz^tSPMcbOmgr9N=iiNphpaPEwpxhqY8KPH5cEz*4n%jN0 z(Cd7r{j+aY=zb;MPPM>hyAY9i_+$i;YEplq_~>k58J`?VZm4pzw)st%h<;lc(kENav<6)c8&{R{EM8|5_e$^mppNB=d{@G*dUO4*3?O#NiJB?EIEw@N>3c>Q zx=H~3QiwI-&5)!q>nWy&e`5I}v|&TUPF;7ed6SaOqDJ?Img<*a{+j=bsRo%#HV`bN zopXk&^4aVsONVLVS)b`~+~d+uR4mfA?hc6*oiSv_AeGK{&7zm5FA6CaHIyd&^scT| zv{~Of^V9Rbu&b@Euy?fYNGBt)dfcoSx@ySzb-H$Rrb@5d3}waKH1rIPCrz&cg|L1n zCFJy=e*8ARXlp%K44-8fgG4O8F`FH+uBKK&aX;x1v;||(b9=%-iW=w&hDiioK$9-MP`>3dL z^GvTfLJ0M+xzWZvgfOIBI1fK}z8lvH3qo3c<)vJL$6@a45(V8T z7)0GyOl5=YYuQ-VF0dVz=e$`9P6%;)HRJEDqf&5?l>=#u7mx$Z(Oy0R4_dWE6#0DU zbhT|i#Y&i2O&2OgiLrF9xZ?Kx>@2%=OE)@`(m?I^B(J><@_!%iwrbE7CW)#^PMXaP zm;V|YL}=ovY+A#Um)c|%zM>IH!a72FSn}jT| zP7^|-#z&2V+0j-f2ELIl22d2J3l%6LLy&jF?3WD_l9W-;MWH z2h>AiWvUpL0i=|pvbP<%5-$%MC6XT1$9+`zIIr?K-ut^R6ORkE)CV|2F1v$(vYWaG;9X~Jl zm(xx&%&}&oH6dAT(#w`@i9|w57Ehv=iLos{rH`BY%|8z77qrYm{l!S+KhFvOH4{RG5p{%eT#L)faa4UhrNktHl&$m{ zoOG_LrIZ z*mP<}-4Qd4y*B;NZv_3#CnDowE^1)1)+^UA=s7wBfg1Q=i;XdAlsG|ALg2ZH$YBSm z!2r^J2B9c~ug2?aHQ)+G$P{n%F?a@>%naAjOhPm4p!AH&XvmS{j&x0Lr+C?l$|Mao zX6s~l$>UO;9$7tD<2vwVa>|;Z?s#HjM1n*q%*FN%9AO;qTEZ(dBqAimT+Ir{BM~*- z>v3tCEz`t`Gl}*>kYr|9YO9~$IyuOH+a%s{qAZ6{`lH7b>Ef5LZX*!}FfBLQP-Ch( z3CAsd`IZ_jy>8a`@f#n}W(985Qx$TWBt?oo(Ax49QHr5gb?+Bo~I5L&X&8<2_U zT?pd_v;7O$FUZN-CS;fmR61SG1)66{t|1uX^ZO|QAaz7bBrQ1rf&8oXIwbtWGkwya z4oFa*Z(5U;2Z$^9vS&IonvR78)}{R;>!dMUIKl9H-d0)i-U;OQ+gL2C zYckvIJiMty2Xk88=x8*&c{tic&7_46c2;cp{NVQC2&g8btqg^1Qv@=t;Y>py0e0jm zm_#-MfJ6mI$S3$QQpo^;V`nf@!KPg4QJ!*jFtRYJumbNR+4%2sq%X^KdYT>(_K<7-#W!MAk$K!bG(;NDG+{ z3uH>7`+4aea0WGw&^G4B8W3`dz@@0p7eI**BkKwx*hEl6b-IE8K@bV3Ba4Vkxjt{` z>iC~Pnw((3&m*lF*qd;Aq%fngYqT*s_z#M7V6mQY(_0!NBX?@uX7LY>4^L zUL>%F0h zsL)?DvAi-vkQNg_bvQY|8dJkKKMsP&kmxBLk;TF^BpY@ugAEgJ&}@K?||?fv3>gnQ%mj@)Y=XIEDq8 z1m!rebp3vaX_6A<)G$(f6>`o?l8f{T+vR7(;HqU)@R6tX&x0SrXFK-#gT`Ye?0IL; zp~I`=sYTvU(uW-x*ypneS1sS04i=V(KU~2G=afLM`u7-yM#4$4V=XUtuRl2Y@C~kpCoP>=id1 z$g-l(D#lY0M@>MyrO`J`g&+Ub>t^C?!8VFp zbF349^N42VyUcr9QSs_Ixr^d8h36C9V=W*edWl1HbYzav`j5U>Tm5+jJq6SSsxyL$Bg?95Vww+Dg}0U+-6gNkmW_QP##=||2|3!N60$9^Ss z&J}tK-LAeJx>swg;7z|6i)dIE7^!|_Xl zDDFJ@@mp-K$+X#A%o|JVgBq_!k*J}w(pbkYbBSqv<=dl1mT#t=rJ0R*My%=I zVeE72QxMr~ev%^v!Hu5~?nGhL}LP*d$ipF>)8!XJ$1m@(r zammwrgYHMmh8^${#k9T|XP;Z-4PNfxAcd&X^IKZIX4i(QWA{~bGU#4hAtP!bOqnrb zfbhOg?=6gjInybAE@QWK>EgZxwXUH#E6$>PEIVZ8(QoeQjrLt$J~t2WmXWat$jT$4 zRKE=bY)!`N5S5g^09}+Z@JAI26iRLK$$B!pe$;I>UUQK68NtkpDF#8DoH(UoJFZPC zF%Q_2aR?u_eDieMk9AuRYDpfICzE`T`0i4VkTr=3?m`90&T^l|EAKUob1jO3z(5=^ z-z3BDEN`9QiJZ=d5>CTS*lTTMy~|SRZUT@CAW0p^@wqTzw)L3 z=cA$6*U|F7V5tB3bEtnYR2B{v!2f2bm0D|wYZ8t=N9yxGdZ3Yobid>9kIZdCYd&g` zJcJU6#lo{hTn@rPD*%<^zp%UFeZu+`$eXDJ0KMf$JPh`iRtyWN>dEIwDOCJkI3wV< za$&EW<#7!e9p#aWo_*$@+O1dc>G-AVqi4B5*+n{q=hu~i{yyYZu1iMEAe*I;V)HH^H1%!66;hTWYwL2o);wT-hjfojdrcnU#_;>Vl48S=J2s zxm&wxfuWsL7BfiD#As@xWRp=+wyNQMNcbaGTDSDV%2?K>LC7+k&N@nk#`p$G?Z=S&)%d?_ zqz<&HIRwb#+W*!?bI?j1JP3c94pfHIiWOKLpeLKf9sSD%b3_Vr^b0h={ z6=7UkWZY+?{zMZM!bbKYo&Ho?Uz_YQTJ&8jK(=WlB`9_=%Djr0j5dmZ&wq4s7eE{4 zIys1qPqN`W$y^%l!>VKKh&j2fxlBe?#Y9g39oH2QYny|fiEi17C@0OVHZqljIrjl` zgmYYVLn+csrbd<2FB!m$v1)@H%s;Ug zjSPg-`^Yh!`7LfboG1BuZwwR`6G3l0d)dK3uejunDeNMtmGbTdl7rI4J7=)Y1nxNF zx)fmNU=$!4dLNij7f!mkf5*bghi1+hxGgMl2q+&*VeB>${5^|#5sx(J=gBl-5gYS^<7YXRa**XhF(m=%CXa2 zqT?XhNZc7eJaW#lglUh%3Xn9&GNu%&M@)V@h8GdbC_5m8-~(VSGe;oS(8kMG^dtp~ zhdIgEs6H)tRHd}Ig{`o_QggctmzP4RICIDJXc9bH8hkN2!YAN$S2o^7jNG%Q)(MYETd|Z0{8x*uyu>)Io~K# z2x}#wf;#*`(e+Dcf#e_4O)~tZ8SRD_?}Qod8iNgY%9$1),VB+Y?TfoyPW^g`>z zmg64{BlAY_2y7|+@h6P6b4mns5@m!nK-q!uqEy6c-3R?8ndM|Na@Y&Ai1>AVuWc|v z9%#{oyDRFPfP_8|v)1c`S-MS3G3UCXn|Oq6^Pjg5kSeBaTwY;okRXPG(bIhEqCeP} zc0N2jn9x90!`8)mO+p@+ais%^f8SaP?4xcLAjH37EZ#0;6w1_rY6Jz$3?F7GOG+K3>nKo-JliZM&jgkOnI?$Q z#WV8eBbV&p6!fv{QwnDD`6V43Ipj>_lwjew^2^fpq=6?(KA7eA2y5jg7KjooPbC{F zrpm>Q0+JqB7?Y%!8urn6WLyhzyQ^cFrm)7QIAX0Jp^&lxMyl8 zK!p{RrdzQ|BFaUkdEfKk#p#(%C)h{wym}qGz>InC&+Mfi;7AGR?NAU)fFaynl6=ly zPQBn$@8zW)-HbGHA-H}2yD|<$lU`E*WaqoG%d;~W6|MWt?#im2YjcjlSeQcZ-S_-K&#&V;k_ZKi^ay5h628;A0>ZI0-LUY6=gMcnd! z+v_3wC~?mg`=+HeT4zi3cBr-nliMnsKd~wyWGjonguv37obYxPzT9-Jsb~b2Y4CF6 z##%n+7@O70$r`&CMEK*hXHch5vN`c-NZ4^-a*G5Rx=8Fx*kU#Zt#(JT?mQ{9w5HNI zoZ02TpUUYY;Wl_<%$vz|p-b3o1ea^tA-mDi4LuPs(K^Aq&8cjaOd#mB!B5}5T8hMZ zo9)%>7F9nUT-%EuFOnceI%1oF7F}1HbKP;5RB?fkYI_kr1o+(^nUA8fHIQv}JL!Y~ zdNDgjA=AG`yVtUt4d)xsDN=msqAL%_Xm6sc`yQoW;9*_uJ$7J!FI&oht&3fs>#wny ziehY61oq1DmfZEi><`tIfWtB>W5prsYE`TlO3aJFAJ6b2&I&D+*WT5ii!N-xU)RDx zO??YkkM~l-RdR&`aN5i4$BYIq=JaBM1NkVg+!K6~Cc-OdN5c)QPwGhyml9dF`nH2;4#nhtJZdb~+=F42F z=>{d>$h$-VFke3l$w`X9anKDVfRmX2^x6O2kyi~L9QMHPuyiYr-3>1p(iuWB!5dA4 zEf}f@+%i+zasBjp?dw+-xz6zpk1t>9!JHAiZ|?55l`ph6kIm%q_=2p;+M+G-l44@w zbYYOkEmg-?UaZ`Ab|c?i8dSeErf_`L&9Gb*c&pu2Sb$ERmtjH#$}@TE3~v0@6~av5 z2k}G_;srSl^VCyw4vq?C?MKTm!M@xdfNn=giziPz|LpeMb&_{`TNTlRAdnVu@0M(! z=iyx>8;B6@#*BS#X}4)%=(AbH=K#|+Xxz?IU)^`+nQ-MbBs~<$72~esfp$*l;gUFZ z>8z=`Us@fcZu^)6Kzj(6>j!V0wo{B*>q6UP8?-Mh>228-zBG%hH$4-1sf{tYGv#-- zvLRTYcDdp{lB2{bD{cTlh4%Q~M)^Sk>k(7FzLS4dKXdE3zML)Ij0hPAtp2R4#PTJD z)H&kcs+f%rA1;f3AKXZ2-Kid*F%%lOCLA@*^7W%?gzwPlN7-YF-Q2p$_lCj%sn?bY191vR8b-hohP7=VY zyli)yHSgD}L##Ejn0faXe%8KP284*TBa)Vr?AS@l&H7I%<0g=~sJa z^s|S1d%Nm`os4m??3r5Bp8`|5K70@TqSjsDFGuU z9Y!=&(BV<2gcfOHdtoYlS;?iv-zWaSWNeiL)-o^ZYPnO*tkliP@&%7Kv@WQvCmild zhf)#N_5OZ4+uhrRN&JeyD!@aG3M zy|Faf0oTogF^CfPhR2f#-B4#i_gicDFLBBSptkvouZRH+8sWX`b6S0@W3WYe^!4riMfc+!|vKIz#xW!$9vzNK23iaVOu;a3}p5J z7n_oAnIvNNF0Awka|AFR?CUn9YR|qhM!rRylBBRCli!(eJ(V_398=@@*9)jHCrN1w z3|A~~$R+8TCN|EXY=bLWNkJm$kT0=@ed}##!9_39Gw$UNLpCL2I4pB!bO!(mASq+! z_HKy0^mQ0X_~TE_N8h(#mkaoxb^CmETYgv z;bw>ZIf{VXoK1zD{qcS`J?_Jh&89qzKI^z?P!<;nV)@ z!Io3!fE5ibL7b$WHyQW1F=imUcnz1EE1exqrkyUm$M)!B-_7HWi1hBbt@jwUirS{v zu3Vde{yn_MJ5VX=4Pru;vm>7JNeq~z^9o$XPkjxTo;n!ow$+i9`{x!>0iRrZ{TA`j z)?s0y!#X`O&kqzTEBRW_qxw1EF#-kn19C$yhRD6LwT`0N z5){f1G1FPf#e1fcCFcY{!EC)eVeVrO%yx$u+(4+E{O*;a2E%3b+qX`o$KtBA4i)%W zS>{X0O)tyy;G_&_)gY?AC6)31%n6!K$U+sjoyaslesGh@Q5H0{gjpoP-=fcw%F;~5 zF6X~^nZV>%2?vv6lcDIXZ6p|O&jE{B3%MJ*&-U}z8)K4946piA3O9+%h?`E02=EXrgDw_DlQH4zO% zkzV1CBqqeK)d6M~%dtyqS6O=GMe6&=QG7xSkqvyvnQUj6R6I9@$lFP9)3!BbwbK;I zWrsUh+D^oK_JztKpm41lr{x91(;JjZC-mreOs0~TTT2&NbSfBYbKIuH9vn!YZw}_| z!M6i320h8bYEEYdss!7y=Ttz}6ZpUp8NEy8oAj_Y3Za`KIF9qZTZDh+;iJeNm`wU- z=i!$jnHs@2Z}I;YM`r+5B#@hK^XvYAUo-P{;qC{>wi{*dSWYCL#tN(gQYw%%3}1G@di!n$~6-x@7_ zHuRK7eiV7#N8;_nblQan=(Q7IDBfle1coB~!A#g!xiu*VlOxXkW8j%r2_v08y(`3v znNM2ME_uER+XRrKyS8?9e{6DVhhy9nXndQMsSZ{5rXCh0O{#Xxn}&9`f;pps^YIpl zX0WxzT3sR3lVJC-o;+a%#VOnHUucHZKV_KpO)7FR&KmuoPv5~Mx~(3|AQ<@SA} zoYd6jbv`*2i~US+1H7H|6WLY{e`@XgeoJxrWBZZjwsr13nKRQ{S&$mU`{JjLU8&wP z;xwv?HF*DYGemRiACLi-=SZeVeO3|a@smw$I)R!Lejz$d1L|=T$PsX-+NOZuvbhR$zq$DK zp$=Q52BV%_0u3!&7nXh_vhYeK%!t5HQu6`@lc`A>R2NuWKZ&XfvZwL;F-4B&SryG-&V1O?e zPj9sOSkPzkoGi@g;R!SDmnQ{QLT<(<(^gZQKUeF@Nfy_&L3eN8DS}`Z3h=%`=a+5e zdS(`a#M;?bW9O501J`5~lk8c^XXsCxdKcNHt#3tZj}y_@pRTHt8LEV$9^D50zs#PePFYyH+##b_do58gRM6zzL;Ngx*yWrc-k{*Y~$^womynF5q#s@`oMcMa-~{zu{(ipAcNRMxav{>Q>d-1`JTVR85)J@VC@ z*4xt`uHE4bT^Xs_iSslf`gf@6+D6+SH4= zSqU?6&6O}+ErVr#y}BQ{e;ArSW`X|y&9#j2f5x@^pK}>AGt+-Lmknvy#BGis{?zI< zKs*Gn(BQr1WU+zbY%*$>2^v{R|yffY#$juT#7{M9&zqq=Q3>1 zzHJgSN?Yy+Ra(+!7uyNmqkD6BbvPAnLwYA|SSeWCMx&=R$rKIM;*LRz%6?H5m*3~1 zw4|mCs?qB#uMupZ`l-v5E+}hqGN!nPS3spS%AePuD#f^g^^8-Ei8UuxsHV8a#P={c z*cz3{WBp;!YM+oIzcc5VouSXH>^B;ptWj+4{R z25N1XZjWS2pk%+VIjJgQM8szN*Z;1T3@c$l3_~lUG(fSUi(#!u_QrzN9657)mcHJk z$N*-sfxC!0B6@zO*bGuDSQm4zM0^irg^12&PN|BCNr4Mmj+bxG)#J3YPJ`-x}jSHB{tLnE>K85k53c!I9 zM9J^LYN-VmcsZ5~3W8iGIwAc~8AGA0uW<;?;Wm*;YUe8OP!u;7DChLNks!HDqd@!C zCi9sRCjUJJs)GRtC=cN&?ai>)7yuMS*gv~QNW?uRW?YW7Aug51001aK#4%e;Pn8g0 z^;2N-YcIfJeYhKbA6)A!dM0tuPxBH)4zj4%;2CJ&+#{TBO+IVGACe2<8DIZi;Ba=} zgS6}|p|F^MLQ}%20Lx#NzsD@`h%r#ct{2!s9$8ukC?<>1g0jXErYwkL8xoEH-gKlY z(uX*ZtZx>q;>ZdHiopYKu+R21tejYN4mF%&1=FYi>EdXt=8Cm=?RlXr`>bXL3->4@ zucFAe7}AvQA$i(kh&zuplJzxu(JSVTPFWCdN?s15-@zSecpq=V+DNN1k*->nj8sJk zi@zRayf=!Ml;}ZbKeEBdbzD;_4$mXsohCDD!h{oL>`c!$an?@fk%z@XBCW1#PM@!E z;RK`(QZ!ovpW>5_OdDLIom}S^uxQy2;Q@G%KN?sj&~KZvYAAhUm+7_=UX?Lwj=m*2LgTA-sn;Djye4k6shPr0E|=Ux)5hXmHUH(INb5|c{Pjt zSIMzi*T-e;W+eLRci~V5O%xs0 zqqN7#&J)|x^}4M3dm06N`jP{oh|ji`TBzzrJihO^daT)pN)&(BTv z`Hfd5tVfSq9)MnjYTQ~(hg;+6o2HMJGh6n{YYN@j++MH8tL0X+tao=8)k5Kr;il`g z?oC=>1K=$m2`;_(>sHZD1ijx})$dE1_K{0hvun$LWhA|Dpp6?0&{%=cTB$h|Mxfy= z+oQAC$7|Bi(_w8I_t(<4bUpgfH^Lr_NlHZc(W>2uOP$e@^T#S^#_Cay9h-x++H3j` zTL;|%oK#=E10syRKjsCIHnDHQMAjAb--^Lza=N|le4j5Z4mcHVGt%g|c1Dr@I=eIM zS?4C3zEt+HSVr~rJW$#>RYIhyo;pah{xSB?1dfRRUM2==n+Hswt4sBNK{8Iq_hWfY zeY>N!3FXlqMT=E)LivHUn&;5i+|HEjZIzKlcvSM6(5qTG(W!Iym&GQv15x%^Wof(k zg$6B#zqx_oHysyam)aausIhJ8c107@o~@U=^p7J>4?-uHco;Fc-(#W+#&FqgV;V%g zNzv&5e*lwR&5{}=ITJECho*6)Ma7pB^Ge%Bm2 zoBKv(@v+gi`6|Xf?5kCEl_WpXc9k%r@j|sM!X6sU-@2}T!8yC8fLZSwc5B!2KRNW9 zL|7S?Hn6?0oaG~6+6Lhz%6*tDXT&AUuNHE7$?1=i(3aNdGj$*=3Z~P8sAD!h{FdMi zi)Sb(ljIfReb}tzbbRuc=6V~B+e`7LZO_^KQ9*x13eW*kcxxW$5ke#k<;V1meo%jb zdNwv(pZqAlarHvuq$*S17l^)c)aCI@G|fpf?2dTuhf`@lM}xl@b5egI~A z55O~I+kG)dbag$FT}#hFcn~)oFmZ-qNCjBMp&l-QtZ>oBXJ`_sj_OVPYM4TnrCcInwB9&#&S>HH_yR?`i{c|$&`EgA z0gh(a^Tz~5q2*J|J9XZOM}=6aLi(M2RhU%Xu}voqvBKH>%Au-ppz_7jb7RJFVL2g~H+Dc8H{s>8t2u6vcKv!H@bZb#6NCHonr`K_F@7 zqiBZTxMayc?=h1<603!;6o;$8h8h3krR(kDJR22msRbDU#N^2Pjf(o4%MM^@&(mNLSIvk#CE-`aUvl?)u~7fy!aN zUW@+s%^+&=WDpg;GXNi)AUZqzn7MSMSY{6B-lM-NBgUvh0YMH|kA(}( zlEmH+n)~a;0;iZ`%ODT1S1TM)DK;@W6Gs}lf+u5{Xary92>2+MUGgPWOu+b8_XYtW zNRuksvWSfEqVRa}HCPb-@cUj6&7~uVnhL>=e0*r?{SM)V=R{dw zzz|e8lZyy{X8dzF>!=RJkf^PhjM1~BWr`b0)6qj9E8OFW0q_wt1}bWCE|ay;2-<}j zo)Q3dBSpXxy#wq8*QbNuGN-2W&+_S6dXRN}m>JA!V zVL~FW#tC0Rg)V{8MO}bgyST%7-kF0m>){Ilsx!CDC{}~b$>G#1i8LhaAw0`9B$Vi3 zHXyeu*Y!L^YbiKMOF?~EWS0|HX5ux#`k+9xp?cU7QM@DBa1^d7C`L1IB_ISp)Fe(8 zkl1yGU7#!G1jD(UBml?611GTdS_>%^ywv(b0OSHBmhQsI?m(bMR@4=3CTJ1?7#NBr zOIgFp#8M^ztrZ9S(J%amF9T8a4M7J!?dKe}5mv&4>`pCUM~ReOO0W48DH2%61ylqL zvI0GDSF!2|FNq=LQ({H2f#z_znx1n-hDoPcl35{;D~(jfpzvO`86pmrP#kCD9YYVC zKl_6iM3$SDU=(y?cxeR2b`z7hodkfjRYzIB)2wKf&SZk&ci{d~P3xBFiG*1CBQ&?e zWtrfvLG*ulc=cF;422DdO}tHjK^aOa7^}4tzhEJTJQN>+aFML{@z0sQLVRXlB`wRM z5(Pg743&eQgYX70TzIP`O%(#&dxo$EB(3f9A!^HCmKmrNVS`--%+YT zUjQYGVs#6wIEw(6-N($|PkTwpm%Z9~x9g?-?C~gEtu4paEBDYyWjNd6vA9cqLPIsL z0sm%j***;4l+$!ykq=x+BTXnO+oyj?a02{iuD}eZXrllmTt)rAb?`5Z@``f5)|za)_b4xNx8IDfd# zH2tP<1|z?bI#U}mzyr}t;g+=k+(&zW)(Xz|*wwCiw=Sjuyi?lX)lRBcZ*mLSO~p8E z_HXdt`T!)#J7L`8$;?Qj8el}{vJHf3`z{l^q~!2CGQDXsIWuM+m8M6?QqkFnacH%I z9>rTKKmyjHQWrth6OR&87O;O3YKM&6C-OpxTsTcufip?vCg9wD$>86tSo>`K3Kn?{A#FQ;&ee74ujfuTPfQYr33P6b9S zh-uXDC7S~vB8&>2GE)I4X8!JGxox1hlxKkp#}>20Tq)S+6VtVOYGZQYRv{ZW389-QSwW@X)~nwm_ngioO^o!5HV z4vX4qRlTpPPs~3>xd}6gxSCs&rAT3wyZu+VAF(bzl$JK0NuKrHWYcD;m3>llG<3oI ztU`6t)~Lt?qh-C(Y&Gi`3QDL)vaF4oq@9h39uF-5v|-GRP=e=Pd@dV=R=1;A?_a1O zskw9o*W{AQymFc@6aqigB)HBPzC;OxP~-Jfz861r`T~s!q8n_lb>~WE0}%Re*!vGm zt$E_ON9*}Vi&C9!?M%n?sRWo&Hn|niW_~Bzf^hlj7KbYKMyUSWbP zUMreDx2T3p$D>K_6}=)%e1zp(2YQ#b!ioxHJ%Du|I)0ee$x1aER{e2Q-78<|KMv`w z(Y>?-L(lxab}&0V3t8y5h2qlucq`I^xvz7=Vf|&z0$UmOJgqQ ztSXb;xvBzAEtk;??{0-aX*?Lpc;^yOm>cEho1!CB*NY=3MtaXO61(_-uJ9hwnuin= zs6cavvqD?nZR?ipE?hpKa#1GyC6%VD2;a}D55@0(pg9Pq`A961es;S_B*OPm5m-xwp5SP;ly z;d(6rcNFr5XFz$QnfzH|FV{u+M2?7>dLEey6pQ?7Z4ibX2MnEv0y9l@z23s8svUo)KTW;mrmztU%VYnJ+n9 zy^@r;AFk@PUXobz5zTkwY%x-<_)1%(mBm(eL(anZ;og)Xjvc_P$y-e zBMKf^blkGa7s}bZW5$T~w9Y|^s^s>}1f$(*tlie?qqx1h&hu~ig8}zNOo`m=4+4>5 zL2&lcDf6U&)Gk=FpWmc zUhiR(5|>O{n1YWr$)NMkJez#u;5vEBr&qH8f+z>H$e=AXls(41+TG&>1GBr~E^C%l zreIKq(RB>j$)p=1cA;Hp;?3crA%?>aA;uED?-t}PcOnu1pqCE zkVtSufkqd3G+OQn-F0xpwH6TEBxOnMS}k!Cs}LLV2BK(E1y(k8E{+7PlDecMDe;Ka zcd=$y<^ulGf@tP0nP65ktFcim?`q#C{}BiGm=NOtYJ?`VOb{|#2zfuewQU=B$<8qB z+@_nKT6 zZ2!@5?cb$4%P?D~nxf>XYhIt5-C6sKpLpAQ1b$ySk*{<^XFK?_oJCL_5_w zJoJtVAHBdNLmIHy^?@*haB%jUR{P^c*6eBFCE&%Ty1+tz zHd04VeAOvag5TYkgiW-+^{}@9es`=z*!6vmLt&16OY5SoJa%h*>D_S|kef%p;7cWJ zZry}1=LwK&w|K1#@J};W$c(Hg{F~&tY=ac+-}gght|=A}f@85a3ga_7Em=+^w^7wBlcyc%W9s@#4w= zZ4G_h=wlQvOSSG(X2++Grv4^o>=j%%+w z%unD3^qE&MfwbDc>RXBQR`VoZpVUgeUl*P4lS%B}?reTz7{1+nomMb{>3V$TD`x0b z&l*h|%V**-mUeoh>8}Nn3IU_Gv+JBYBfyD0htV9WP<>pz=UV%-?8U6RIVklle-@(| zV00a)k6Dg?_v;Iy22TRT^bd;OuFq(;c_B) zX`*)Tl09+woaZ)!ZgB)}y77@nFde*3m5A$Ki#mV5vF~qlFZmGt_ z^zN8M8}KtbPtwF9{+2Tls-vaLA^m>jtg+=lNKVc`)SYQ9e1YQgd2_kX7u=x1&OzGHhb;b^ChHL(4nw1(CxUs8;nBXBiN;}=1dNy2@*mYP`m=SXce!Arr@pZS%Ef{=JeU3HUPp(8o`+8Y4@dSi|%%$$|R z-JO6H?ODbk2xg@pKgB;Axju^Ge|uvl`X46+j0h~TUvmC#ix>nZ)%CI)7zN7b1T=;$ z!@NcypayH-A3%g!z(xq-&MOij(F)d73}FZbYNYO%0NEp&dkdt*&8dhd`GQ&_TcV;u zQ0Kx6*{AVPyOQ-rz@JzS)HETJWP(y>vaX11b%p2uK$hAo;E5$kNd00J0sZZKFqVY= zIMkftBRn|W?t%?e$^Z-?FAV0+14xAw!npLkZ05ua{;B%R@oHe*nJ}Kv-#O8g+CBcH zX6yNZIzqq!uF|45bQw4|Fq97(3M)B_CD`;G!Ifi?0RK#}iHe!}kS*F|89mIi z7~`h;U@#;wyOOdMzY)zRi`uZ{T&FVyvzfxx>J+wCM5iTTf4obgGdhhYziMMW6w}&D zauMA>A;3M5#~AXwJGw`?04es+_x&hR)QH}h9kk5<=m#VdE+5{vnNju_fBi&)NhE|Oa)6oO^sRM8|`OyL0~PM{2d1(#$Hn*gTyg9z2|xgfiN zjXpVxvjGx@x9b%EGcm^Pq|js%aa9k?N0*XKp=2y&=#A=0HqF$yi0Pbax~yTwAr@EYkU%GdH3ikPu;2GCh<8JWXYZU!%PaZ70H#(y|s3Y`P-_EZBIbe2qm9~xa* zuD}bz8N_DZEFnP}jwrBbWG7dM4j6nyb$x7-jQidd4OcGDGoJr%|iLg z=P1?ZN#y5pl~a1Y?DIYP739&rSY#vfKuqbTu8>EIAjw3xG%yWMSuR-$4DNM7e*0*jl@1iWQml$uW+A)|1!qp<468sAyI12l z3|x$QR6m{`HgSJ9Ew7_V|UF>|PMR(#QQWDbdJDH>$Gbi~$qsK0=kx zjMDdXWrN6D7Hmt5RM8p6W5j#m$1u@HgND9R3N$y^*yvpBBuHEbg82bZi=jZ*nolq# zOkx+oQ~%-B((lI?j_9UgISdQB2O%0+iucEpKT{U zPE%zsLg8c;HJTk5i^!NW4L;u5^x`<^sipy(oBvd~b$y%~tVP+${X?~THCl#QSJzIC zXzTFqFQaz-^xs^(*sj5E`Xcyo+t`MEvvXit+VIwx`iylkJ;i$k zI6c4O&5$F>sUhm_DDS1?YPPAz#TDJDV%J8Kw+rwg?0*L#gm8ANXuCy>w&eGFrHLMj zJaa!kdx;1lY2ym@Y=^H29QXVbNpB(vkw=WmQ25f3ydm399`EWuH5ET#*n$Ec ztPRu})U-cq)~-S6eYcy3jT!B-bdN2b78*-B7l%8v@e_mJJFe*qd{{H+=lNj1;W&K) zNC9;icDH|6p2o(2{lBFYD|Kc%T9k(mecd&0WobOeWLSQvQmas<4xIP?5vKAWFCD6y z2Rh%l@aCSu@WzIg2e@h+r#MmnT4IuF=O!_ILHie)^-C^)Wn}k0vvGgBLZN=Y9f020 z8Y^}`ef;s_zp+g_gq_QJe=*g@kF|Ru+#aV#?I%W!67P^zx^EU?U*y=GJ(IH8WPNO@ zxwX45CW$`g#R7M82>ICSkFsk?i^;_HWF`!Q{)Z6xQ_jq1~;dM71aqn^vo1C!=ZjQF5y zIjOFEzU=t5@G^G#uX)fdShX`(+_nx6-o+bpdxW93-89po@Zw^0fMZmj^KodwKNfvH z3UqB7!)p{rR+9m6mUNXI@cE4FLT(U8QxulAG-xR0*AuT@!mkQHQQNt-n`hqmxaLxP zq%M8aM*MNiu|xNnH@t1J$(+BW$(t5TIlhuExR_gI8LhLPVry{=UBfY5E(XsN8j31Q zp&r{(m694_>&6@%>iSHrdpH)13$`qJiQxF`??IWH19m1J)m39Ufx$P$$ptgAW%~OY zwoqlslwJJ7eviF8gO5J_`;WoBz?u8}-~4tl{r~m(IREXMDY+Or|HpFif6Ny%3je?L zK4mpXbrkW%T$7xPn9)LR%xG>dV5~Tb9dL{;1QA41|3SihqA(oCh{(Wv5m99lDlC*P z1XP4}L{1R{Y!cmeI0?kvjG`Kl%}#-`~EMnpoxKIBqPU9zPtg+TJ z3g#3lvK>M)HbOcc7VDVf@cdbtQMtc4mNFw1haIE$+lRq74u-EBC0@Hx@X#D9Gs8$ZX#;o9YeozXq?1bvdaNLj9GTrJu82adb=7RO)ZEBpK^fL%bd zC}>>cy1b53&EbeY!1rwi?TZOBoD9+uDQkp*{+9|fhDk;3NB z+;@Dmr`_(;w<Kn=T633M(*ik@MDxZrgh`B9aL%A?qCYSw1=$E&4@WltvUm+*PqOyZ=fS+ul4sU4Bzm__ zUxg+@7S$smQec63Kiwzr*s~22BFPgP3I|Nmu_2QlS^;y1B+kilu7mlLDG;KIMU=_G zANeSy;OdX-N80H5M(MCdxj{VoYg~l4P1#10xa#sOX+=$HOL}bQpO8y)Dnn~P3|}(^ zw^`#s3U8|8>WjuC(wn5yIr;68KK5R!9rej}ZP{F^Z*=p4@cncjG1JqdAzDD>7H;|S z@3W*~^hl-EQtLV2f`i2i>B}U=_Q(6|ZJ)1QwG{${Aj8%#5nK&PnO+mday3x>#c7tX(V#Ewzya4&B5Jx125+^s2cb$$w%?;RfG;%r*>;+eI6jxQVu;tH7{Dy8Ht3fR zZ#TVmSa+rx*rOr=inOIdJ*kfxcc+uFY#XTiZ^f)obAIVwl(8&GNOeR^1XWBekCIn` z$nrkEmA~9qb)+Ii$FA)Dr(JA$clGkn0ttpR0pl_tzb{|4Wk9rt?%Kk>%AM0%qo2oM zyW@HjiDa{F&Vf>i)goat&2}k{D>QA8Mm-%ep*K9&;>*-kov3AimwaT0KY_}&a?^7^ z5O2+UE%ao4Qwl1?p#4VRJ;Z21pxHVO2whX2eW;%Nf|GRa3B`7>nk=roNz>J|EsCw(do?A0`P2razr`#Na}softmc2Q|3?Z0q!{jqs%4MPXmAiLRW z{Ypr^&uHdPS$cEk0NiM1d%YHVW35A`OwguMbaB1?`_-<*fzuE?SuXT2_Fr0YWH+p3 z6xb=q-ciKdt1iNAamJlvHb{6Cqq+JwA4Ox`zAnEzw>r>V_3Ordzwzc4=;arAZcFAI zq^%Ezt~MjJxR^qoH?`XLm(@=>8Az`igXJf@gF0gFG<3G%=t@n!G;6tK?dg8)l`7>G zk`U8LjYv#~D`$VPvj|%-Rauf)V8!Ye5iIw9ii-Oo~I+XmF%b_pB zrtI+>>-FE?ITAi+H)Lc551*AbuJZFBPFnGuCAXl#$Bxn6>D?gm(w`sgbUvZ*v;UPz z3dQdveJ`HW*?$!{`MbZK?OnXA_HY!SG0C#MK@&AG0ao7zBh0#05%Q~PVrwY4of@U1 z2X2ZdI1#dS%$i3JRCHV{O%`SG8s1o8z#j^-G!FHx|HzsB)i-@ZuJDz=T9!h?IZl|@ zHAVH}iDk#Q=GCyV?UIThy%6sY^^-`Jh#;DGj|nq+Amo?)@)wj33#4|bKmfBv80ePw zZ57C+fm3a?=n^Iah3~`wJ`Z6EE^wIQ)Hg3TwPY0bBV!h=4;|&ks0H@ySJ^P0uEPwB zhw~)n2tq=9)vO3e_!4e)q5~I^9iQR(7t&}unTzl-gL($ldQ^HC@-k3*ok6+teJ^0N z?|`X+dHtAR*IP~gGb)EmL%ZxWfpM^LmVEXG!pP-)?&=5l8jz2l5t#U7!>ah*GJ$~4 z``Q77e_F&QQY(#wtX93&uH9GE4OEf1wHx_jLBp&oQ)UANnDFoVeorf&Sz`Bf?p|YSSj?V{*-rW`Q zx^{-xsjQ8_M7^%tXDYR4H?>%K4TxUM%n15Oel8=c9ZKNO;gAnVR^T;;?jUy4E7cB4 zlXx%SjhEVAybh5%@#2mIQlE`jc`pDG6;_K|o?K3Y zS?%D*wnOTiM6*XZ)0ghsUflnM^!ere#00gV0xbdTIc9+f}KCDSr53!`ekc^{Ucb6cMf6l)px}w)Jy;JQ938jrgfJ^IT2HQX^Bj&~!b#+ap>t?wulzDyw z#HW!J*v>gA1ofspENM%$?!_NSdC(gXpnf0huz}D;Ebu}b(}S!QcBhDi((pBMHgO0^lsa@M zB}kwxC6uP5gHgt0Uvx{cR+>wOl~9RDZV;&ht$fq-CIA3G;7q!6SKPMIGYaT)o~R@6 zuc5fzV>}Jl*uvM6CJs2t#ni`%2J3X>u?aOFm@|WD919>PrXYQxqxgQuNADe4A)5IH zPfrOi9ygoIv*^y))+WWJ=dypP)@@P$%WAvGHP7w1&jt^;7%vI}gP!Cgm(Rs%2;D`N+1V!&TS2Vhp6$#t6(NiKzFewzb3v)Lg_Ejgor~mQ0rrKUz&qKqA*Y^jZ00)i)7vcsM zA`3a1bI+ifvTt5OwAl`+uKNsZ1nr&PJ8Sr)iHJ`Rv5}r#Q(Lj?I8hLuY$LpN688;< z+pG1P)X%|0Wl}82e&5p7qQtr7>MrHdv+dC?lL5?}jB%#vU#C&7zg0a%Rp?CYP`ztO zfJ=dFA7P?8%XVkZ;%y-LLWP@=KxriSwj!S#nbWBH)pu9y7ExLQ+kRCA}?)5~X^ zD}({%BWT4`sO>`R!7izx38tfWx*?VXW~QxNN=-NZaS!IF@ID0oGgnL29L%QaMv<8S zIIWmPJ-;G;;^dY>VgjA|5IrxKXv#&ibwg+3rPwL*=4cm5DzcRvMXHR=I8^kEczw#G zC&L%0*VV_|5_UvshtGdmHeDsdk{V9^xcoH4J^B6jNkc7y~Vk5D_((q zNx96nCUxg_k6eUF>vaS`C;ZO=Rx(-mfdNTrFSg}z0|T8xu#tQG*0`hOh_x0bScA4O zcIUqSv-cUf!0&AVdpXIv;n^viQfUmU|NA95yB|QQ!_z4drTv{*(*;~$UIaF@-Gdck zH=Aw8=P3{#I)L+K6L(yIg@qIxFF7Zo@tCjmJW}uA4N!~Q0Px5@Vty`2NA2dYOWNV9HremFJjXxC?s|Ly6h$hB`u}TwP$xLs-RnEO0_v+Y_MxMiC&ELcCC~? zmAs`TrooUdn9_%G(XoVDBmg$Vds}5XvazLl?O;v~M4pFMjpkMSM6g0u&!Ywd3VKqSg26L+m*L9uiT<1Dx z&N-j!{W-mh(F_gT>M16z0ykT4^2c=d2H#aW)89_D=Hh(ev|as&q@^k}omx=ajbg=V zy^(^Y_~k7sb-wc>2uf=ezI%}nwfd*-SjppOxU9L1rPg}|Gbj+Mr?>Q+Kz-1DCWaig z&0zw47`Q*>+%1KW5S+Q!P+6th)4!;YS-Q&jr<$( zXmbFTQRE^9M{D+`m-nef1s>w}_ke$DjfqgKyD}N+lm0mQ^Ll54!3< z8XHWm%m47nrDpK+9VrvRmdA@%4t%G&}fUo5HG0w`{ET??9w0S7k`)fbip{&jm zUL3#9SO1b#Pc4}`6sz$}g+&Y`HCk}s&=VNCT1jAp+o$olNnWY#);H#FF`o^Cs)sP` zj_^k>PuP1aWP|m^AK4nQ#4&kL-$?V=s3Q+jr3zmjx3zA#saT*hE-xQGE8g*; zfq!A6*W4#o4rw=~FD9ExcTz3%<-X1a9~ufzdYc19w`7e@NPJ(JnYt4n8n(%w>Um^% zU9mHZ@>Fw`=Y*cf#?|2<(N8+h;4j+fidM8~dk@Y|d}Z$2e@xNnLJ*brcjQgMCWo@~ z6VJ|)TiCr7%4k1L_cG}nSH|1T?{vn_QIgq~ zn;*Nwr!-1VWHK+RHuhjW;%lG&vQJGLSl*a~ zaC(3bcML;1c#}$7(6ghUd6c*s=F7VbAK{N{z2z{a4Cx0g?3nkVjjyadNhTAMC$q~v z=Z6k3-w)ypP2=WqfNIxrL|x!63+tbqEzuJhuMjAhHShDzp3Fp*U1MzG zYc}^+QD*DL#xO=z*(m+1$7%V~FM~MfH#5tkl zf$QyLFb`;lgy^Qkqq>~|vNkXVspIe{kfbh@Eg_D>yMUo` za)1~Z7zPsqD8vZ9SR&j<3`^kM74k!l28w{dqdkad92T@K7w&{35tVp&whQ|C*{zw! zo`SFh@ts3{@c?3lKyw915NbPM!%@;1NQ(_hy1EK zKrOG1N5P3Wyg3esQP#r4-BAW`H$ZJDptU$u5&Tz@Kh34Mon?SSqMdy;;6#)%3<8CT zK_tbXFmsrMJXBU*QbGg*m4`rfh3ujHi;Nu&P`{A@k&}l=+3wL{m*_9-KOFow0{*8G ze{_Z&+W)rgk%*tB_aNahJI;wjfKeEfI|@rA0Op7OYJMa_-Wi8?hZB|I9v&Dp0>15> z+y0>l{xfS&ZGl80nutODaE@KK-Ek3B4Dr|0AHIV@Z}%Q~3>@pC#N#7|L^;Dr7$T3d zE|!SGG?jofj5SUJoz&eOjzxm>(O4A74DRlML4j2NR0(8306apQghrxv9TkKAwXogP zUv=22!(Y`^1ge18b-2Gs*!FWXl;^Lhe^z2w-|f-F1Bc(S(|^+BS3x_P{H4_YW`{E{ z_}T8i?Q-t!zcxG2Zww6NHE;;hwqNLKD3buo%A=rAX9)-#AtvGM>?9^Bfs_%GlR`*{ zA?0Kx;j&IBIfRqkPFMJqcaMAQ%GJdZh;S?d^+T?cw6lyP1PT>{IH9D)B%Pe4#pGmU zWW*rOPLgmLSy^cb2+*qkgZDpnj$P{kBQx-`0RH*A@ITsa*FXQu|NpG;e_8NeasL*w z*BXC&{ja(H)(w3pyNnTz(%l_d%b zEGNBzb>nZAt}sctKi96?3nndK5)6XLZU>o|f`3fsK|3p4Q?MbhBq4xc(%T?UQ?QW- z3ag6Po+sPEq#!#H0=|3Vp^FQB=_y!XFoy$2;}a=%*TZcY_BVf2{uu($jsr*b@(m(;lD;3u>9i(Z4tpfOU= zQ0BtNF^&26ykka7U5n*rA-Fo$kFxfel&`Pk@vnV>UYrYzpmdP`JokL@#llC~jiae^ zu8bYoS!J)()F)o4pR07y8rAAUuYHM~GjaPQ+^qlX;!0WIf#X>ZoQ=C0%bnNK-+Nsa z<>r6&O8tU5rl;p{z|z9!kuOWIp;KKWpXj<{uF5=KzjIHnOpiuW$>X7>pZ%8AM*PSE zTs}Ckc3o;nF@vkb`|9+xpLdQ(obp@6lh-+gH6{xq)Jje!hmBfIFP?X;92k7g$MfM% z!ns3dJ`fj4GYg!{L`C+A`S2u+5veYbWIu54w5X3X*4d?>BWKVQJ@8f*>8wKXdM~hF zrAsT%ruNutj!XB|L$&F<68f&ScMO&HO$M=(r#$rXQ5kcMsQQq#%+_cEy^0>{?&Kyn zEYY!0e{qlZ%!|7bO(fJH<<$N*Q<*vtH+8mGF0*>GlA}gqsChEv{#~q4WJ!JXF)ay% zmHd`H8N11#DGmLrPg!Ax{%dEbJX+Z7J|faN9byh7*t}0;_ej=1h|aQnUidWZOdVhS zr!Z=H)RlJ-H*uBF&E9uCisgKXNhIZ0J(T>XjB<*?t_3pm@s?=TGQ}*59Ba!F2upX0 zRcXMCKeC|9tLMl#bM&4ZHTxV{mtdnv206z;MO2H+<3NI@oGWwB+|8rqUk{jy#UR`4 zOp2z_5jkcrv;x$7WvV-len(?mRK|ltoXTC!d&-E!7_sTwn73E+tr6Eg>2hZ9xl1pp zu~Tztrge7+y}1;r#_si5^{|i~(-?2q6SB41e7!957TilGY}Lz!`X*nEtx(}w2jgb! z`+Uo$I~8AZP64M9opq5`{av?~+Os94g~kw3c(4`**cs43A+|f>AG=i2QZjox4#pgs zg5;)dUuSi23<#s}i&c7DFVm%8wrX^bc6pU5ooL3TAw+wse&vH^QKgA;aS0D;Ag@54 zek~xdXC$ao#<(wtvWV6InwoRynvzjvHW@TWa>JkN>#3B+H;%y%8ytcif@4^Dgdf*; z)I|Fi8EAdo&~0T1yHev1f7Wr(ef-D<5jkalvKaYZtM{JkRQ$R$Q|@a!%ohXJv>3Rp zi!(VA3Q`B_9D(=ptK z*KA(xl}DoO-uJP~d`^{o8}m$y&VtAN36#X>$>?>wL6qAC-2j9v^4Wlg5-EW zXSFGPbk3AgfoxtEg_yii^ruF(+Dj3yWUg+6b}abQ(M#QW_$ejG*1J`@DLQoaMhpDe zB^R;7GVMw)51BvPx70MGupYt25ZrsPqBW6t@1Vx;#G~%2UjGUzIj$&6SP46-u+1%N zJz#~$ii5#a-Gnmy97Fe3;0nca%2EZY3zI>o{Cm7#9sTMf8pQtMEAUii6^~aCKY~OiGRY{(*e>y1`K@37qxTC)%lzWk=H{vi zeB9LaeIgqw>|qq&VguE+F|*lIWm^w10x8`Uw|Lk>^|P>O5)pjg!rc_BvZO22%TlY` zhUF%k-MZ5yNM&W&-`OE{0=jX2r zRY1NPH{BFGUc`3Cpmh=^9ZMJaA=D$&BA07JAZ28Du0opeg>}(elfI|zMF)npuBQ|T z-eAmjup9{HN$78YJp1$4$(nz8MS53R`T) zL%1IJb$We}_h^!QAYPq45-4*o_t+IuzdPBf(i4=cZ&g_|T8yFZdJmH7g2vU)47k$@ zaoapP+}X%^JD4@VSnnxC$0zYi8Y?U*24e1WO;XyNELV+IIvqP6nuNcZpXpn^uaoU) z`b6Ue=>5HOt;G)-K2wZxvn^|e@INq5OFO66mex)rw4+m@L1f32IJKLeP6Tqvx>?h5okXcVG1n1%a;&I((lUxkwwRUDpgp3i%2Y~& zl9rmn>dplv7W2n6Di8F?XdvkClnzH+pQv7D`L^aME`G#Erf@urJ+jq! zwcSycA)PK zPF}X(`uaaQTTWW`m&wl=^V21Ka@239gabld*pDyCJ;}(<>lOVdHo2J|*O<1PZ^w^9 z@%2+NW(CGbPxDrw&t3_*+PI!kw0x%SLCI@-@0r6zcf>bJ?!B3Hvzw84&mDM6@-2m3 z?2y)XhrWk>v#?@chQ5{Wsu!2$k88dhp+0`s&MQ64+rkaz_>T1T=ss`5^Z833?pm%> zgntd`TaMUT>(4V1F#%jL6)g%>N81n6xfC{N%@=f7OUzvo2Sb ztZnM^r77ua?tV%!G|#GN0{K~n_o<1)7M0Etu5>!ammhG-Zp%~ZRQhO|YkDlJ z&^#kqj6$tvkv#hC)F*0Jwu$d5kyT3GS^krmWFM9;dW4M?slAi|%ON}y_YYdC8fvN0 zaowLRj&~m8|JNpb_&v^dGF$>Sh7yhrN{SoRz8MCK+EY$ZmPzX`yl61qO=l2Tp@LuRMPn~YUZtkM!o9$91m7&t+`;Ipzx%GSv94O+S<4^Yai-TX73WQU|nO#`m)cMhWqJYMThz9AYb3eRb@hzvuq*?cf@_rj0ML63oVTI}b};*R$vF;4_OmuWdZfC1hKIrJ1lvDG{q-aCtj*Xi2_6 zrrynZOu$ByPk2afV$HK5Pf)O{Q@ZDP`Z6_*LP4()zku#T)XkGdmX*#6Db}GUyPeYG z+_Rm}jKoxIZ7J7pI;gq6X(Zba&0etsAcaL$yvKj*^4qI{?U}!#BKSW{TjZd7r!D%x z@ctG;UBAxut+9}bSx1of&Ia%d=D=*}9KB z33sFJ+3?u4+uOesurwx@&%HcKMrq~Gh(95<%rZ38;nbxv-F>9yjedR?sl4s3$*FIu zEm&2(+#nz^&b=^SrZ6bg1J3)w6m%u6cnsWv&;TBF!SBd^e<^I|~$<;53ok1Hg-L2f;g z(1xiTatsAuI5HkN_}nekh5c?{-l{0;MGo7cC}rCQuBelN9SrAN3!}h(I?kIeDYTE$ zd_R1qe4dc<$&Y*Zh^V5+yG#ZgWXNNMj$3l}y1f{SN+v(Gx}nTtnGh-K&RdJ=G=Te-o_HLjOFRy4Nj(eXPUD4$8B=~$=uZK>F# z)n1=m(sA?GCsSGtj$3X5m9NOhd3UF2gsoU4KTA|4M9Q*rc_h!>%An#M8F`&bwUmGB zj+gPPGmqaOXiRUn$ys33oH=D{Hn$i?N#7fSfCKgkZ^W^`vMXTj!lO(Ify#)yrx5PV z$W?dc_oQs?;LBthn^fse;l+j-9CWZ8uVYZzaFKAt0LUt0HP%EhbtO*gbQ0Z>&{{WO{DtqzryL20RHLIU0FPYuLa)>c({6vTY#3^vh=CF{J?Lp=`D1HfBi;~ec-LPkUEqjrvx3ARBVq54$Cko@;so6V`1gq+4@j=Q~!jn*2f zb7Y&_KQMOWW!)}BeP&tis5#>Z2d=M;x)arfmwg+yo_`DcULSPk;hW-XjW=k)*;8|u z!zY9K=7;-KlnLNibrG4tlKKU;SNzA2Nh8RzT@|v*Eog)Jo52e&O-D&l>wK^lRpxR36Pp z3>afj_;}cS3tn*HkPxL1RW3gPfs2u(O;B) zo+MqBq3(PF6{YPT%2-;Y7-}xus{kYzc9n7&&S!1<`hwi`N`ECiWY9sqUf$B4;6O?`&=V_<9==# zCeQKyM(D#huGY+)o1bsBxLn;gzwdh2voZIF!3u^q*W+}EEu|zHFfrZ5$mB!8>G|{u zF;6|2a}2I_odA-#B{4(l?#GDW?vjOd%_Ba_-tv^7fX*V9sftQH1<=u~x|4^bbR0@9J_0Kg=<9VE0b&mJJo%YSi})cB;Ul?g|e*4C0Cv7jxrbZ zwUduoCyU&HqhID9o52hyw3^pl%GdlS#3z`N!sUggcK#0z0G%(HrR`~3Q`=EPhfMK^luYy1Fzbv;*mr6sG!ZPuQ~w`yZw~ zGID#TJxKHCaosA6^)IcyKvnVc^@{Oh3HIC$%p$R}AbQJQa^5#>Sc(m{VVxKK9d2MR ztNdHFb+s9d4%(&(*9C7@oAb-9bi7cxD^YovC838VE%SoCPJxN$#|UTCm-DjMLX3q^ zW0+Dz?@;U$aeG=$vu1zJ=z_ZOdzdIfzSX^6yN5jD=&icAn|PLW-=V7hr~``{7d1!G z58zyMzG4!qwJ#4mFvjTXnXc93h1pL}S;IFK-&2XXxex-2&Z>EJy@*mTFbx-9rg{Jz zlySk$gsRuX#hfQNOt6;XQVMuMcj$SI9-Ai@fn730jAJUn+*Mk+%Dk+_*a~8jjphZb z<&kB5k6AF^5KK94pV9V>-3f?4Qg&KkNthx@`3LV$ENnF=OP!^?4igio3q$xrt{!TY z{doC)KHd{IDH?PA%oFYM7*8$F`BNne0gR<0o@N$@d903SvoTjxBF1^O*ZNLN#}zuf zEncyn<6Mz?=93sLcaX`RJuLsbAEW3^ir2^VEQ4D{KY6&;qLxn!y+j?opT>*g5uALg zC+M6uC%SpFXGXyM;{0O$!JbCuE149@GoAK?+x z9(r-H+;Frgze#Z6ZB%KgYooRDx!anCH+cgnrun!wWS3U;^9E`Ogra8Ixfvc*VRF1` zHT!B5bDAO7SEmFKgGe0lhS@E-hhh!0=`Ma1qM~&z^ZfPIu3YI8s~_#l!;gOoIn^R7 zCZ^4Mhc5KHr%TrP?gZlfx~Y4-muPiwCKi;uDet&>g!;*1qI)zt*(`n&$!Y5zNoRYU z-hIg6b9XTCib%^uaQg4OU3&Xj!5@=+AWj8^#{wIZU=5TP8i6v^R{IYVf0&e<4A>AP z@n<)1eV38qZEoIoIP&EKZ@=spkXP(}iQq1%oOI*i6nkm%dDky^ca0lvQT6BDbg4~7R_<_W6<{+U(cWPCi)mmz<7T{5oZa}% z%)r9~ChfCGPP^vO46}hY40JWh$N$1*UZ-a5=6&pE4D+LKG1d~gsqJiwcAgkmWEwqL zG~d00vc*13=~1c84D>{%MLd~DUNu;D^59{Qa;j1Fn!G&cr5evgidym0@z`_jFS7jc zutZsk(Ckx*$xp{Crp1NZ3iKVM$zohKMcF;wr*nAhuG!kz9=pb#7G!dJt9DI>f9?~{ z1KPE>5>dUYk)KL8Oh1x2wnv|cy)f_2Ts1j}#G1uiP_E_yw@8aemuB8oPVis(6HEvzf7rmUot+&0=t zx24#;CA(0%WPR-E#fb$uMH7wNuFtCLWlidQF(o1CO}PZ$t%8eNkXa+!47V#n%b8?S z&!}~VvM9!8%;lwFMn5dHFMwh^%9SZYgalht#eEOflv158!C{}8PT zQ~$n|Fg;_+kE*<`_bY_C_9y0S+9i!MZ9}V ztshoU1}mmJ{`gQo&B}%F5h)LjjQvxMUl$CkDN4fQAAMTbw z^~f5o2pvhKYM0lCPLk!b5jZ76*dMn$-L>{DChw)6i9|W=>!+9vnuX5F&4Ro*)(>kp z7mT)h%vDo!FkFokX_Xr37~Sv5QteyM>TnPjzWTUgabS%(K&jziFy*PZOhFwMBDL3f zM>Pt#M0QxGTD69cj*liE^XrSf&15`Rt1nusDjL?Saqu_rwq5?BE1sv5aJe#(c%FEJ z82=?&U%)N#L>m*5+*h#XO2Czk)D6op9ys3!|D%+CArt;Caj4Lu8Z)?nf10nx5~f{# z26#m!T^m}sKIvXk46E6>aP#CI;R1Mp4LZ8ApfZzKPP|J@{9-Y!%U&FNFE;&#XwOy$V z(kC@f=OlL;c3yCuzbXXwv~98}N9aCJc*IY@N8{u095re+mNg3a`HAVz*v}U~*G(vl za|vg$eqp0vg|X3}JY$+_nRjyVWI5|WHY1^TR$<9sk|~mVg-=+1G`nVDX|iH$Xz{Kz zz{J-!B7gLDLY8Sh-V&vkoO7=9nT42{(yec?;-|fH1iM5&D1Bj<;+Ha#%C@~@`*5(n zHqI}~?}lH|H(t81{p!4yx%#d!wxaoY^7C3MOJQxcG?R1{^?$k zD|vAlaaRrF3@bX+?aNETvDdKI=iHTIIn}`?^C9mH0Xn@!~8=_r6Wo7xe-92kV z8?>#aorN8p#O*`DTF|j*slmYkzuaZ?jJI@(G(|d#^uzvRg93xn1BT*#HXSw&4+Gi_ zhvLvyZZb|r_v7!KLY^AUnrtNACislr9(n48@yPIaMbPt*pZwA^(402^c2aT5X{`L+ zM7_x9#}V5PgYyDohes`^SNiRTlsX;^1U`A(gt*@7YgBpb{uK5l9E<{|SmpM$(-yJK zSbgzHtPigjd62pGVSOR6HUK`%*5Dt&RnlYIW;ZZ$V55FxWliaG-=fjdmNFU3doE+P z1<6iHh)kJG6f-MZ59=3!emUTAp=uO13X-Q_4wHmcLW$z-QpJ)OWb$I+a#2a7$a%tJ z1>Q2IY$iIUilQOQd3z;JG?NXZT&0$aLGQI9H@04ZJ4!74>@BhK?3d!`W3bVijb9MG zP!~kKQz63HTD&;0cnYKK#qJ-CXceB&Zysz(d1ClvpykuW^l0xWaD3)ut%pbraBLaN zIp#^`@67Rh;(QVORRUuPxrs)LNS}-KH&~OCijzXk!+XMdu1Ka=K0r$Z;DXfUlNtmb zn5nRR*I%X~Mc75;vahgPu;V1uC5)VPT^7r~l`mI#BaOzZrve_(SH)G8C|HSk6Xrhn z-CC|Xo<#>eUMq7B(>^TQ(1z&ldU5{6#q{8(5^W+c{2%aRPFpqLH}f~{-r=-MKgdVB zw(s#9%>CCGZTBhc{4l%$u@z?&G27&(DWEx_$rWwW($@g{UN~A5as95s`f1VA^m9$K zqo0lqekxmcEg+%|r~5gFl|G@qlsA?4`RA@Uh@G``sE98!s>bCt-O3?6oZPfuKV|;J z>Rkc8(_Iy58ao{^@O8CcB4|cvSHQj zN#7+pS~^WSuK0zxSPnYH!EV>r{r!18=C2OP98A9zr^#9O_0o*Yb+HR#Rd*_TbH?6R zFRWGIavz?M9|_D^=Nm&l_qH9jxNkB@`zCv3b|o!tE$&LkLPw~Xh#|>)z^f_{_vQPQ z?sK<_I-cGTmOCw*9_%%8rlV$HsbK0BxY_>bOx(vH56{;vZG{m3z{7#=r>r+RdvOE9L61LbO&QFj+fzP2&_OSM$f0_| zZ|1Xm>6^a!;(7@PD~#wZi}x89U!NO)kmnDccX^u`TsF=94L-tKpgj?v%-Q$-_9|i~ zYT{{{NnclkpsEpP?A<3fwvslxBNsPrjo+GJDP*~n=$v@#*4tZHWxMZ~4d0JTU#G(| zZw8kHWv`Fco#hCgPPdVg*i6{MkMxaNeJ+iwcUYvo4L~M#P$}Ege_|v43H6WIscm?}KcN35l3a;rCk!0{M>$I( zkti{!BmxD5p-Vxc55=z)Uc3Ai%`zM!8Z zXVBZ2iL)qoClp{H54$(0Su%w2IT`r;)rkr0)Rt;T}fCMIG*H=fdiqDzzhqEas%T5J;4B;8i@i)$bmgc zI3kJwBmpQ@Iccy99`1z#L1D6BClY|TCW4W07Z)J*c=rqGgaHt^7_M-W{&L3P0Ks5qJbVXG0CpjvF*{;0C}-lIcX+gmD-rCD#*zqN4-}s0iX#z#$>OdW zfOsc30rm4{hjm9mAZM3z=kCw+J&8Mo?XVN^a3sndj&}n)qXE6Z`UK3jhDMrTvt3i! zAklykx9ecLn*bOLgCY>nV9YKl4h1Ic=J@S=fuPb5uqFwQ11==N2oip~34DPYX`l&k zZYZo18~}Ms%YuIjM&LYrca_5dU|bYX;cYZ9R0akHCb|GP8H2?U!Q#L_07DGM18)u> z0FO~nNU%E`fN)^JNR$g6g#vqENQ7Nm5xsE)5@0Yi4i6@}0(n30;0O}%@Wq`3g33y4 zn-q;hZa4joGz1C>;O60ApxuAc1Nykb2?)|Q!UN!u-OltR!SUM>=D?*Z9OJxO!XJqQ z5L6BZR^4$7u|Ywj{@?qSya$)&qXqq|F^ zyN5>iCygf26>PX$oY5}1(Jr~s9&)2U2!3+80}ySr2gVm{wA;8AyQ~(wtQLD%Eq?OY z>}Hy~;_z57;Q!lzZYK-@4BzF2?`FgI@B%Puphtn>KSjWI%m4?v=MOUgZ3=WQFlv_> zwTBsKrav4Ky-S1Mr9tnZLI0#d0cODN7Khs<$L*5i_K@TLAo$6JM0){{b1-h#2&7#` z(k>%u4p73SMBvx^F=Z*S2VWJ zKMkm*m#ZN{>1R815eU6x^vVWObygIu?KCJd0nc zKLV#?(++cxxH0plhp&eB<#;lcXEL|cO&9gmXYB~ZsK@y?H(~R4&K7O#pDA@4W8wGC zUmpOw9d}4Ez&{vd!t3YzJQ3)6X?>|c!|@VpvUu4wGsC6H(?Ede9dhf*-~5~caC$t^ Y2aj^5qm`0?N=wnv^7CsNY0=UCFV8VhPyhe` literal 0 HcmV?d00001 diff --git a/examples/cmdline/Makefile b/examples/cmdline/Makefile new file mode 100644 index 0000000000..0d63d190bd --- /dev/null +++ b/examples/cmdline/Makefile @@ -0,0 +1,52 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = cmdline + +# all source are stored in SRCS-y +SRCS-y := main.c commands.c parse_obj_list.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/cmdline/commands.c b/examples/cmdline/commands.c new file mode 100644 index 0000000000..5ce239cc0f --- /dev/null +++ b/examples/cmdline/commands.c @@ -0,0 +1,282 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef __linux__ +#include +#endif + +#include +#include +#include +#include +#include +#include + +#include + +#include "parse_obj_list.h" + +struct object_list global_obj_list; + +/* not defined under linux */ +#ifndef NIPQUAD +#define NIPQUAD_FMT "%u.%u.%u.%u" +#define NIPQUAD(addr) \ + (unsigned)((unsigned char *)&addr)[0], \ + (unsigned)((unsigned char *)&addr)[1], \ + (unsigned)((unsigned char *)&addr)[2], \ + (unsigned)((unsigned char *)&addr)[3] +#endif + +#ifndef NIP6 +#define NIP6_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x" +#define NIP6(addr) \ + (unsigned)((addr).s6_addr[0]), \ + (unsigned)((addr).s6_addr[1]), \ + (unsigned)((addr).s6_addr[2]), \ + (unsigned)((addr).s6_addr[3]), \ + (unsigned)((addr).s6_addr[4]), \ + (unsigned)((addr).s6_addr[5]), \ + (unsigned)((addr).s6_addr[6]), \ + (unsigned)((addr).s6_addr[7]), \ + (unsigned)((addr).s6_addr[8]), \ + (unsigned)((addr).s6_addr[9]), \ + (unsigned)((addr).s6_addr[10]), \ + (unsigned)((addr).s6_addr[11]), \ + (unsigned)((addr).s6_addr[12]), \ + (unsigned)((addr).s6_addr[13]), \ + (unsigned)((addr).s6_addr[14]), \ + (unsigned)((addr).s6_addr[15]) +#endif + + +/**********************************************************/ + +struct cmd_obj_del_show_result { + cmdline_fixed_string_t action; + struct object *obj; +}; + +static void cmd_obj_del_show_parsed(void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_obj_del_show_result *res = parsed_result; + char ip_str[INET6_ADDRSTRLEN]; + + if (res->obj->ip.family == AF_INET) + rte_snprintf(ip_str, sizeof(ip_str), NIPQUAD_FMT, + NIPQUAD(res->obj->ip.addr.ipv4)); + else + rte_snprintf(ip_str, sizeof(ip_str), NIP6_FMT, + NIP6(res->obj->ip.addr.ipv6)); + + if (strcmp(res->action, "del") == 0) { + SLIST_REMOVE(&global_obj_list, res->obj, object, next); + cmdline_printf(cl, "Object %s removed, ip=%s\n", + res->obj->name, ip_str); + free(res->obj); + } + else if (strcmp(res->action, "show") == 0) { + cmdline_printf(cl, "Object %s, ip=%s\n", + res->obj->name, ip_str); + } +} + +cmdline_parse_token_string_t cmd_obj_action = + TOKEN_STRING_INITIALIZER(struct cmd_obj_del_show_result, + action, "show#del"); +parse_token_obj_list_t cmd_obj_obj = + TOKEN_OBJ_LIST_INITIALIZER(struct cmd_obj_del_show_result, obj, + &global_obj_list); + +cmdline_parse_inst_t cmd_obj_del_show = { + .f = cmd_obj_del_show_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "Show/del an object", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_obj_action, + (void *)&cmd_obj_obj, + NULL, + }, +}; + +/**********************************************************/ + +struct cmd_obj_add_result { + cmdline_fixed_string_t action; + cmdline_fixed_string_t name; + cmdline_ipaddr_t ip; +}; + +static void cmd_obj_add_parsed(void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + struct cmd_obj_add_result *res = parsed_result; + struct object *o; + char ip_str[INET6_ADDRSTRLEN]; + + SLIST_FOREACH(o, &global_obj_list, next) { + if (!strcmp(res->name, o->name)) { + cmdline_printf(cl, "Object %s already exist\n", res->name); + return; + } + break; + } + + o = malloc(sizeof(*o)); + if (!o) { + cmdline_printf(cl, "mem error\n"); + return; + } + rte_snprintf(o->name, sizeof(o->name), "%s", res->name); + o->ip = res->ip; + SLIST_INSERT_HEAD(&global_obj_list, o, next); + + if (o->ip.family == AF_INET) + rte_snprintf(ip_str, sizeof(ip_str), NIPQUAD_FMT, + NIPQUAD(o->ip.addr.ipv4)); + else + rte_snprintf(ip_str, sizeof(ip_str), NIP6_FMT, + NIP6(o->ip.addr.ipv6)); + + cmdline_printf(cl, "Object %s added, ip=%s\n", + o->name, ip_str); +} + +cmdline_parse_token_string_t cmd_obj_action_add = + TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "add"); +cmdline_parse_token_string_t cmd_obj_name = + TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, NULL); +cmdline_parse_token_ipaddr_t cmd_obj_ip = + TOKEN_IPADDR_INITIALIZER(struct cmd_obj_add_result, ip); + +cmdline_parse_inst_t cmd_obj_add = { + .f = cmd_obj_add_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "Add an object (name, val)", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_obj_action_add, + (void *)&cmd_obj_name, + (void *)&cmd_obj_ip, + NULL, + }, +}; + +/**********************************************************/ + +struct cmd_help_result { + cmdline_fixed_string_t help; +}; + +static void cmd_help_parsed(__attribute__((unused)) void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + cmdline_printf(cl, + "Demo example of command line interface in RTE\n\n" + "This is a readline-like interface that can be used to\n" + "debug your RTE application. It supports some features\n" + "of GNU readline like completion, cut/paste, and some\n" + "other special bindings.\n\n" + "This demo shows how rte_cmdline library can be\n" + "extended to handle a list of objects. There are\n" + "3 commands:\n" + "- add obj_name IP\n" + "- del obj_name\n" + "- show obj_name\n\n"); +} + +cmdline_parse_token_string_t cmd_help_help = + TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help"); + +cmdline_parse_inst_t cmd_help = { + .f = cmd_help_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "show help", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_help_help, + NULL, + }, +}; + + +/**********************************************************/ +/**********************************************************/ +/****** CONTEXT (list of instruction) */ + +cmdline_parse_ctx_t main_ctx[] = { + (cmdline_parse_inst_t *)&cmd_obj_del_show, + (cmdline_parse_inst_t *)&cmd_obj_add, + (cmdline_parse_inst_t *)&cmd_help, + NULL, +}; + diff --git a/examples/cmdline/commands.h b/examples/cmdline/commands.h new file mode 100644 index 0000000000..b13a25b577 --- /dev/null +++ b/examples/cmdline/commands.h @@ -0,0 +1,41 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _COMMANDS_H_ +#define _COMMANDS_H_ + +extern cmdline_parse_ctx_t main_ctx[]; + +#endif /* _COMMANDS_H_ */ diff --git a/examples/cmdline/main.c b/examples/cmdline/main.c new file mode 100644 index 0000000000..c78c1cc343 --- /dev/null +++ b/examples/cmdline/main.c @@ -0,0 +1,100 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "commands.h" +#include "main.h" + +int MAIN(int argc, char **argv) +{ + int ret; + struct cmdline *cl; + + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_panic("Cannot init EAL\n"); + + cl = cmdline_stdin_new(main_ctx, "example> "); + if (cl == NULL) + rte_panic("Cannot create cmdline instance\n"); + cmdline_interact(cl); + cmdline_stdin_exit(cl); + + return 0; +} diff --git a/examples/cmdline/main.h b/examples/cmdline/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/cmdline/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/cmdline/parse_obj_list.c b/examples/cmdline/parse_obj_list.c new file mode 100644 index 0000000000..7aa9f9e501 --- /dev/null +++ b/examples/cmdline/parse_obj_list.c @@ -0,0 +1,164 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "parse_obj_list.h" + +/* This file is an example of extension of libcmdline. It provides an + * example of objects stored in a list. */ + +struct cmdline_token_ops token_obj_list_ops = { + .parse = parse_obj_list, + .complete_get_nb = complete_get_nb_obj_list, + .complete_get_elt = complete_get_elt_obj_list, + .get_help = get_help_obj_list, +}; + +int +parse_obj_list(cmdline_parse_token_hdr_t *tk, const char *buf, void *res) +{ + struct token_obj_list *tk2 = (struct token_obj_list *)tk; + struct token_obj_list_data *tkd = &tk2->obj_list_data; + struct object *o; + unsigned int token_len = 0; + + if (*buf == 0) + return -1; + + while(!cmdline_isendoftoken(buf[token_len])) + token_len++; + + SLIST_FOREACH(o, tkd->list, next) { + if (token_len != strnlen(o->name, OBJ_NAME_LEN_MAX)) + continue; + if (strncmp(buf, o->name, token_len)) + continue; + break; + } + if (!o) /* not found */ + return -1; + + /* store the address of object in structure */ + if (res) + *(struct object **)res = o; + + return token_len; +} + +int complete_get_nb_obj_list(cmdline_parse_token_hdr_t *tk) +{ + struct token_obj_list *tk2 = (struct token_obj_list *)tk; + struct token_obj_list_data *tkd = &tk2->obj_list_data; + struct object *o; + int ret = 0; + + SLIST_FOREACH(o, tkd->list, next) { + ret ++; + } + return ret; +} + +int complete_get_elt_obj_list(cmdline_parse_token_hdr_t *tk, + int idx, char *dstbuf, unsigned int size) +{ + struct token_obj_list *tk2 = (struct token_obj_list *)tk; + struct token_obj_list_data *tkd = &tk2->obj_list_data; + struct object *o; + int i = 0; + unsigned len; + + SLIST_FOREACH(o, tkd->list, next) { + if (i++ == idx) + break; + } + if (!o) + return -1; + + len = strnlen(o->name, OBJ_NAME_LEN_MAX); + if ((len + 1) > size) + return -1; + + if (dstbuf) + rte_snprintf(dstbuf, size, "%s", o->name); + + return 0; +} + + +int get_help_obj_list(__attribute__((unused)) cmdline_parse_token_hdr_t *tk, + char *dstbuf, unsigned int size) +{ + rte_snprintf(dstbuf, size, "Obj-List"); + return 0; +} diff --git a/examples/cmdline/parse_obj_list.h b/examples/cmdline/parse_obj_list.h new file mode 100644 index 0000000000..eb25fd75d6 --- /dev/null +++ b/examples/cmdline/parse_obj_list.h @@ -0,0 +1,113 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARSE_OBJ_LIST_H_ +#define _PARSE_OBJ_LIST_H_ + +/* This file is an example of extension of libcmdline. It provides an + * example of objects stored in a list. */ + +#include +#include + +#define OBJ_NAME_LEN_MAX 64 + +struct object { + SLIST_ENTRY(object) next; + char name[OBJ_NAME_LEN_MAX]; + cmdline_ipaddr_t ip; +}; + +/* define struct object_list */ +SLIST_HEAD(object_list, object); + +/* data is a pointer to a list */ +struct token_obj_list_data { + struct object_list *list; +}; + +struct token_obj_list { + struct cmdline_token_hdr hdr; + struct token_obj_list_data obj_list_data; +}; +typedef struct token_obj_list parse_token_obj_list_t; + +extern struct cmdline_token_ops token_obj_list_ops; + +int parse_obj_list(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res); +int complete_get_nb_obj_list(cmdline_parse_token_hdr_t *tk); +int complete_get_elt_obj_list(cmdline_parse_token_hdr_t *tk, int idx, + char *dstbuf, unsigned int size); +int get_help_obj_list(cmdline_parse_token_hdr_t *tk, char *dstbuf, unsigned int size); + +#define TOKEN_OBJ_LIST_INITIALIZER(structure, field, obj_list_ptr) \ +{ \ + .hdr = { \ + .ops = &token_obj_list_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .obj_list_data = { \ + .list = obj_list_ptr, \ + }, \ +} + +#endif /* _PARSE_OBJ_LIST_H_ */ diff --git a/examples/dpdk_qat/497691_QuickAssist_in_DPDK_Env_Sample_App_Guide_Rev1.0.pdf b/examples/dpdk_qat/497691_QuickAssist_in_DPDK_Env_Sample_App_Guide_Rev1.0.pdf new file mode 100644 index 0000000000000000000000000000000000000000..3e841536ccc17354ed3f0e398490c812650ffe23 GIT binary patch literal 99638 zcmeFZWl&t(wl<8rI|OOmp>cP2m*Cd8ySux)J3)el;I6?n5ZoPtTL>?EAG!B_wa=~d zR()0PpVQT{WX!q7c;++K8dFA7DvL`pvM_NXQ||0e&m$wSu>%1>fP;xOGCx1Fq?N5J z$eCHv*4Py!4l;Ex1HB(|c5wyp0GZ`M_7<*|0B$x;W<>xC3$wbHBZyha#9GbOPEgRr z)fr@Lhm2skQIl?`v4`S9p9A>2Gmuiyq>{!VGh)k&cK*~sB}*%pjfL9u_c(0s zU24Rk8gKLye3_cMbdZ1_;#L!7QxP@ItjEzo6oafaRW;RRwX46AcVx^;&V|oT3Wm49 zcBR1FD3a(Ss9xDHaKh?X2qKR_+kANFW3?<-(48FM-YIOtN~Vh0jc`>hOLROa#Yh#; z$LDN<8lII_r7{ZbE~jwbRDC@2-Qe7%E3e-9WvLyzBi1fC&)-rNPju$YiIz(4b$Mxs zT@lh}e6$lvdok;cthhO~@)>we<~o&0^C`q%JCCJ%9K_W-?7a{6ti(JU+4AazIGh<(9!KvJl ze(1b}+Z&rPLdm_LD${tV(WOlLd0lEBa|ewgMA8yuT=pjoxTr?2X3N=j8U3^Vk1~4r zaljuK?lgS^852C<`N{ZzRb!4|D40COoc!KIcd%J%W|5#FgPqrkiv#sUbI3$nG+JmyPSr8GePEN(#}&iOGRteqtqsQeZUjE_~( zgokl+KOK_8(@%wTqZ-j@@JWAvI@RQXZPkHiov%^1Y=9xf20}D9Q<9{Hjz9L-K+aS$ z8s`Z=0bEa6o65GLlVjCY#o9=^PlP#A{E%Y-sQ1Xn zz}9W*@_M?N>sE9WBVCRgqe`cbvky=^7=Oz>d!F?}cCrYrhF zOQ?|CJwxyCTKslSsVd_#GjX3FmMsY})=S7g3V8YS#pdzc!w0wHL-#2CmOu=xyePs? zu)7}19%wu;S91RmWCW1C+20-Ned`a`WMgOfH=LC7A3n;;0{p{A|CNiHCCpkwu%Jl< zfHu~Yt*K^l7|9sI(^a9$qLqE4RzyOaEMyu}f2;R2^gSP}>MOZ5O064Wj?ZL#?|@3CZWtdScKs`Hc50> z>UbS!9mtL5jNNujH1x?m36(SCiuMI9+EfP%?#23WHN0ou0IuzdXXb6t5J-Omp5Q2V zy3yg1r`l&McCp~RwTtTeLk<`2qz-XIzbDX+?)vKn7sbJi5I79jtE~#gUjoMZe*+jN z&;N!OYa_b`P`SQg)?`O|PSNNTopVI_>s@w0f!fXM>ut$O+HA&r)pX7zWAj3#ul8hqE4W29 z?l}MFAKF7|LJah!ZXh#FLd_EDx*uOY$QdoEOxsXp>zU%T`CnL9>{Z6wNirLQvS}7v z8j}T_APqh=N=p)1Xt*rM)}=I%5@7#GvnsAcsx?noi~JtWkmAJa<&6LB3qC>&p*kKX zyfO3&PnlJ}+~ixW{7pw{Ucr?9%I<-SMad9)d`o}mm)T+3tkHm92ib4M`)%1Qq{=FdthyS8TH#NW4}Lm@=4GarR?A^zh$TFpRCoQ{c<04qvyX5Rdea znhG-bci)b1?)%;R`nh~ICncD=z@vZd(;j{8>hNt`5BGa=zUswB`&tl5a=^P!PTvUA z{w04n{tfe$ETF(a0)2~nM8RsTnL$<&|+!X(o}I( zQdz9Q(nwaBnH7BEzLf3hnChRoYhIh3L!PFWr*kI?_SO!1>b)JY&br%GSM4-CN(wnr z?RM^$w@Y~eJ{(~`Cl_yX(T5g|xCc#7IfrZEnkAMzjh%an8zNQ1Fp?x^urla&2q~k* zkC%;+G*&rb_mb$Pt$% z<~BrL1yC0(&$ZXgw!ur%E6ZcjG=qi5H+-uJm=sr-!GU~BpU>(o{Xr9u}%fCLlH zjYk-}X9NfIbO1`1K65mGlkN$hi@meK!vy&?e6YBgc*O3`n)*3+_KR0K&Z!&7=;|b} zZJe*~7{P8mu)GC^#y>mhqh@=_PiDN!`WVFUR~_VopsdM>NaHb8M_R=UjM5|=IOUB)SJ_Ul9=y)W)&72_ywEWZEk5ye8;EV zB#Lm|*J#-ZCw5kaJrg-6h`t@*VWTtk^aKv;rVMFwV+X6Dk_Rj;3r41+kFRTRvd;~*7zX- z31Bna{!UOyU=VWtsN(zyKX+CGklz~O?g@zMR+!%(bA?xF?0?Bu&VPfiod4u24=dOI z#@Ad8JBL+nq~FjFB)3ijOC<&jV=&nh!>70}y zO7)ZA~3EOAxQ+9zP$c`qd$L?4reLO^{Sz3QJ!qKd3x!%cIoI=^aYFaMVF+>C!q!H zNQ{@aBFlsT)mhC1;aVm5-b7%$*kys%z|W%T7>{b%;-BK=581 z#c|V41OjhRmlVpA%FyOM+{bfIZn1NvAdJ(Of8Jy!=^>$1O(YNcI)YI{BT3m2)lp%7 za9yD_-snyAeQI*T9g(pH)yCm;7HdjQh|No{-Cj?6CPcDcOWs*7J~PzV&SFzqG5{BT z6>Gj6Y5Q~f8%w8K(nF`%3+!TxVnIfWCZ9TIf{u@cD>WnS;w0j1*LlX1X9k*EJ9hC! zr$LKcZ1zo8u~?6xx0Oyqu&cRTgoO8zhr(~?M>fI7kE=U zz-T9b-5@%O<}Z~q8M#>d=;=mwU%PZ}QUHG|#4}T5F*=ixxwfERjaqHCV$~2ZCT$`FYX8vrq`zP!mqJ%n<3Gk>#nD^5o=bKO8@QAE7mkv3(c)2oM?;njyO3Zn5$f z=Uabpm{e;hJ$DdkXM~c`B`zF!TP@vDAESEW00fe6C}L{}i+m>sNBRUOPo2`kT-3YP$?JG9L_g;`d*7`?DiolyElyZQ@o?DL|cIQ6>x7D z%y`taX>x%cZ9~83dv^%d`97D&t+SIoz}gba!DaM>Uxrri?!LW0uD-Y~S^uT`bNw6c z&%*kTXorRUk2v>V20QHW9Vq=kB#D5bfJ7;)F=Y^KDW&O`F`KJkK4LcY^wAq9&$=ua$?UQ$SB5XuF9)r zBEdI!_#}J9(rz;Tp@p|RIV18`d7iQo-@{w>i%AieJwp$2ZRZn-4>Po?H>Ip&=M|@D7~)M4 zr-#F?f`SGF-g<()s*rA6G5^Dz@7xvMwlK~7&fDQMOnh6-c35-Hw24c;nVgz z5L}V>f{CoLYyPFxJpcb%&G+8aXyTt6xqea$;U?t-s*y@86BXC@<4D6vP2une+5p$j zd`f1saVRoT768VCL#YO|FN}5C@m&#;ZhEac^i!~t`DEB^K11c7hs(HFToF=P!=ljG zUjpolW*f~77TV1R{Pelfb2SrOF~`TE0%6w?e*ot(J&1ec#k_8GTUx+MA0x3uz6w2~ z8-YpKvK#-E)g1rM;1mlh$3Ga($@ND-#=`M0BQg!$_jn#{PvsFmVDKOrixONUkbkS$ za)#H6$%s3eexU*JqhgNSU8G$Ez0T}7ZFC{fEP2P-g8Lkc~|{hhpZR#(PAEsP0umbv9sP1emq=C-?;RldFx z{;Bd4%;q;_x;`0c+xiA|jU|RXLp}{SWut?#zpzyGqIoo3Cd@tAMhu1>*scaSKU^vB zn;c9_Uy1DT6YpBRw4w8zF%D`uQMEkodv@*vn3L5dktAyWInYC zU9Rla>n0G89RY%gQ&iEbnl-`6xCZ6InFQ$@spBo=py`K2!C!DXzrR5=PM+|7h*RrXpJ2~naQ6UPmp4a_1!y2 zqY!&AB?-piPbhRUQ#mhUpBieX8;Tn*G1aZ~s*g}c(7r62*oY!ce;mZd`z3}2%Q{Xa zbE|5&P{X`l`8iQ`081gK7zInni3j^}n#KQ8#z;-h2OeU1Bj90ap7+=9aHznql#4}~ zc};N-J1-*fU6dnlPBi}33;Q2#3%iLIOYIi~h$v3jqiwi9iXLJ!xlH26727W? zQ;xXwt@9pk=plifEH80jccTm(UA+lweOn<|?+V^{;#|M>pgzp4ZTzV}YOv{&@54RS zR+;40;*;Tb;4m|0R_)AxCL1lW&Lhwz^?odN;^YhOrG~n`Q6CGE$|7x_@JPO8z~b z#*-!C>CW%b?U`Nv3#PJWDbIeJuROCC%w=$XDoC4E`{tM1v*LRc+cDEKLOZu(WrG6Prkm792kw0hWsVY|IUCB zH_tzTX5o1snt8v~0D=Ds>%ouu4%l31ukNK%W(u%pHdT7*zT{gR<4#G$TpDe(U`@Qe_cIl5E%cQomJ+RH^PgR z4Oea8gGj&2Ge-`E8PdXZ)!$b&}K z3iCSD{xJ^Hb0^^bi9X|ULh^L4GSzJ^Ej157S+`$9F zB8b`I{4{|+{V1H8YgJ?v1z`>|TR7sA@*YjB~;UN+Hl2 z#bd!NkZB9Jn%V||AB1w95oM|sgCCq(24>;kpiJQN+$Y+!&49V|eYnN5fc`wj2gHTD z*@0Q$Kn@COyxK7am=Zgea1$Uv-dzkC0k(;0>qgTaXZhMY?4R%KAXFV#JOxtBsE zEO$ra7O(RufkiZfXt6}2XcBA|8e0VW8ddd_c>q)iPHKFb-BB|wuPl9{EQQe9bE zP!^I>9McaHJ2KK3{0cUaGtse_Bk{1Z%)5`da6uS8Dnafpo3*ISdoncHhO|;i?oJ1b zo@!GRj*oFMC#SVJjQg7o9C+EXF!YIyCDFZT#?a#FIPdKb-`c`jO-cmoWm3zYBl8h0tNc%7yC0ePwC)bohnsNg zE57C{504PH;E^i8Q^wh+mWQe?@yn`-(&og^r7C}mnqiGSn^`WUEt+^Nqa{6~jMwRg z(v$3Uqan?^hhl;%A(XWY4<^s}k{IVqQv{hR$&)$*PNt>duy)TaAz6ieD|!%{)ee-c zEy9fSWBL-USoo}BO_V1r_QRTzNF+zn8kRuw^P(#RdBq6I1YDcu0=zi3Tx}IOUPY3= zIJ|(0EH1j@+55LjKU2z_#9&J3*Ki#1^&6PkU~mof_Q=K^i43M<);L@8oo%7(;o0CY z-qpxtrvkq3AJ2y-H3_@MD^rV)oy zI0?YXkiCEAqz3cJ{lp>OH}*(no+FoIOA{htD{j%fPR_`8G$gIXBc@7h?>4>^vd9_m zCRY1w23ZPiMBv2o+jqGaI=8z4rnN};5)Y%57)Mc#{eCwVH@_F7K zxcfbM+VT35vyNKuSR z;DqDe8MlJEO3BLm`i{1Me97A(&m(F=Ysrhx@5b3)B*Fjbj?1^x71I0K@#gsXd5cMk z8NqCQEaB6sfoATGX8py!7PuVq`B+qw;5JbEX6TF8;~MLr{g}B4b<362GGJ&!-=nzf z_bCu*^+|-q*_6+jFg*H1g{P^CUcnL|3yp zh0vpZawWaRd^fxE_~PE;QggkM-XFTFy=`1_Wc0(0ZHYt}vSN?w?xXLD4NuzVpW~t_ zi`QIwkz1i+Q$g`(C;f&OBX+6%>T+cnZ7R$xF{G?U>RvBqypeNnZ2^yC> z@MYNjV{lg-*X7q=KH~qK2^UTl?tgYB*#77_a{bFGmu5}5cs&-lP6RGI(^%v=OHCdT z%_kZzJW$3<#V2fq2bUi-Ikp53^lHLEuLhS2*|&9bkmWJVcqxVW#j>Zx1I86ctlIoG z%|Gi=a^|ROf@r8~qF?(ag0iA0fgf1$M)ksP-clP{9iq6N!?^EV<(#hLsPdPE)Tv%2 zGme|7*eoL$_60M-+(^`xcHSswdxb=Uy<$@xf0CO+oq$Qv*=3X#6*06Z4VXBCA1FS+n}>Y!3trKlfNFwn z%rheX2Av6OeStV(>I80XAyKKA=S;5ewl{~HnK!9pl>d#|C|@3l=a_Z-vthSCrnpp*P(WADo2Snza74M7HlZ*cHPz)yn_yvdGUuj^vVbXGDP%R* zx&VptsF(Y``ZLFR{zJX5|JxH^?IHK~H{^RcV=D_Z7w17o_o-49qw~P^P=3l??lrz= z8n(y*de{6}>L8MBi=sPL=iOL4I{kWuGWsPHJjEY8_#@al<<>mEi8@<4PL3yPnmyLg zvDY-6@<;Gj2PZ|>7zQV0*J_4{hMd}hUnpXCdTov9FY`?$r(qrp;;+A~Rp+obC2sf> zT~3@}2yQg}Lde3wK1;&pPsbI_{#ddj*YA)M2C*ND zbe?`>YZ%#p41THextwwe`>3=<&n}!n^M04A_D2MQC7D(v`~|Q7_rUe<%Qm=xT-^VC znZ|^!oXhTLQumG4!epSBIHp4{7w4G$?2?W2Hvyu1Vj488?QJp0#C7_&xT}KY@!!L* z*D8R@C0p+MW;XOmx_uAaAD=i7c7<(Z`V9+xfw9Hnof{<^cAWjownP zEo9M@$6F@UM`VM?gSLn58$zKtQR(XiNHB=Ge(i_3z@nG#j2&tq`V!Uk$-tv>uKWIP zOTo2xh#M#Vlh=h8jmSAWrweFwyVI{PnKf@8CVTD$C%&CuYx~hlMVuGax$igBA*exa zN2sJ^7PD;hKFm{a8e~$bSa(9bm6g-ppIZfRbmMOYkhEel6U`h43XRG)?BaG9Y$kMq zp+0xxVjzoLAn#0gS`p^>ANlxa;yF@4zDj&1fypNcajL?d^I?Gb4Y>UUg%&)BI54qA zJ?X1sM`C7hlEZk30t;W8Fo!i`$_%50!91WOgwzG46eNg*mv#xwGtYtd_&C{$7g8r!Hp`X@AzBjThJO@uNc@&It@f6fcE-^ho1pOEb|d$NAYA51D0&6? zE6DdzAw|v?9}fvzmK+YH)JW*Swkfy&01-6)9AG;E0Z z+}HVpzC!n`X9{D_#ZzrjMF!~+Fy2W?Zr1zqT-DcNmb~Y*gAS8wRMmVLbnje#7Z$~bYa@|fY$e6 zGtdPyThvtWN$N1*d80~EtKc<38>U=?R85SN!RikX+A*JE?rUog>WPl9ZG!}q=~=mD zr4i^0x3H) z;``WV8#U;Uiy?+lBjw|w{;(8QR+Wykl%Z+c1hsNNtS=603TDJ!4>{+2H?9OfIr_B~(7bSu z6Qlh~2bS(^g{6M}nLY5}gyKRD&(*&MM+5dIFy@`7{=|kN?7J-f(lghn!nL^aN&SdhLmL>Mye)} z1K1|Rk^qGeye>6r;wMM{2C5nq#4T#i7~?GVb;_9RmPj$9TVHfqV_cb?09sz#Qrhq2iS_uEqRyk+CwA? zpcrK=5>l3RL0I}XJf7d-UcVisqQ)=QJ+FE7xEQ~w**l^p8hsPPhLsN5yufsO)M=*R z&o|%H$pmiZ13TADCOTzwHcF!{yVtsG<=A|^Ry3g##ZeIQ2J*i?(2gJcf|(Tv4u-=j z;E@-G5Zp3VTROO@(n<4G-HY_XC~yjAoRW4#F+7Q0M)?h6U+~EP6H>&I3$Z^_*p(X} z&G$%;g4e=^IZ8|2hP3G3@BW*tRe9>ONVON5iD;~|Oue}{1WMk3Kl1g%h&Anw06pii zLoX+KZoJ98v@-oZ-%XPbRm1s;+`D^IpH>Ernf=TLyFTa@F zHJOt2kEWK(iUYrr(Qzvjjel(?8Is8afZUtzaf=qoWkf!`#~f=leVl`VK0g7q53`9@=wnTS)()dqUP!)Z-cp zYST+kzg*#?`|w3?RxJF4w8(~nBx0uN$q_=#@wlr~oSSbT+L@}2F|=oOW6T(G5kg>e zpxM-f#4v)gAB4O-q(DN+&|2Ba#1)d7!^K71MGl6Liv@dIj#3?1KTAr1%&FR;F=y6v zoG30op zuqyk~M3EFKEfw-Q3bw!^lmoI43YS-UEf^IbQtXWJEHsx5#qC#&#Jdt~G<;2Em~FStDQV85QR=L~VkX!;-|$2PNW~s1Y?+S&j(M zL6BV}V(Svdqu$bLrmEROtyziVr?{Bwj^oyB8AJmeEE9Y7{FIPmMxh=qpenRkr(2UWS!I0lh&`^VpyPAED+l8wmStW2c`=aa0X&Mcw$(QvlN#Aj7o&BBNUAy-#M zU5&HlG!xw@7Z9qPSKtpyEXT^vhowB|Q9*m|e6wxa`wzwDNL%K-sahl}2jvKs?P&3S z9ENq<#W1#D44#xW*WaTAh7yC~Wjbg^2l$5K?WxO@h~(BhdB3Y2r4%8y&g(8w26DrR0(%pZVz|jsg%sr2;wn*Eeh3v$+sy& zv{}79ygnsxXEd>4YE%Rk1I5LH4oSuS+wmFvnzX6bFg;D6z}oa3oYs!!$NG@*LZBtI ztxJ*aod4WjQceS+IWtda)F8@ZO5*efS;ec_vKe3(cubM0YcG9VuBw z1FIUa)NRk+a$f7oe^^zL>F9=|SVr7I0Djk5=M&G=#q!sx7zA5OfAaLC!%I1njBQsM z6O?4Vv!{mT-pX`gT_Tw%-vxf@AuoYNDqH1lG0NyiJ~~yb(H^LXhOH=Xd6o11%x-62 z=yXat!jIrHI8kB7T=Wu~61beA?hVGbp4|o~?X_}1C0Q3*Dejv6hgRA$TXyv$)?y4D z362QDBasK%d+eFQC3@FdS`=yt8cR)x7R|~@nQcOS>AP<=sXW-sEfg%Gh|Vvcu$Bn0 zKXsXCYkZlU!S{4h-}-Ey@APA2(Oxlw-OhO?T?29w8IPD@w=8v{Bs?L4uy8O!Ad2M+ zx62_(#q%Peb<Q7i{Y2KhmV?kvpj zZj3B(i$HZ^K&8jP98hHjDBMafX@y!*rx8)<9|AA^E(3%;+z98zULa^X4u1XIu2=Bt zV86RIFfi#lC_;+sK%`bTq!3EaXgn$*DspwhgIjev_4p~THX5Yow*;B z>?tFns4+$X4?3%cPy$QzzAeOv;AUN z*>(J+o#o^!$+i&pJRkRPc$8Fa5>|CzE}7i1<`@rDkC6D1=u#~)cx2Fj;2Sg_f1a#M zCLv1pFnB-+SGS-*&~X;e<~*rGEhIz}<2t{tm!a?@Si%O9EoD#)7v)fPFc13Im8A7- zl)dX%l?t5_nU3;xp=elya$(Bn9k;`P!oApJn)C$mydtvPsfu>JX^RVz6(?5i^_Hebo_d0fST#lnZMkY;^qcxhzrUDP!oA25$ zGV^R=S35P-ORV3xHp@YVSi_D*4|;Q*j-<7=Jc#dHDFQ3H&#G)H$wDS7T6QW8B2VcJ zax<)>jyWBZPdUk@(|0y{sGD9)$#I0#hX9PzLB7|65^N&rbzkGoXRZZUu@1*<FBh_VkqVu~;uwD})D-r&+_3Y)dgTEM_L@f8K*2De!5QDUyN^VoR*+WE+ zAe{+BqAOCb>r8oVZP5c9RlTcQW_T#png*5dJoK?MKX)g>$bVV#$KQWRBfRUq(&gf6 zM0uaqW4p|Ft`{X{0%#%B(ON5-U=1RI)-MOt$dQ?2o9*nwTiV*xp}3*g-ftU21!^z_ zd>yKfwX#=vO>MH7u4{yf?aODVQhqwA`I#OS1@>rglT;?9hPA=@vx4m3aI2d`% zuCAx|8aY5ooMRN1H5%cjR8z7?57qgW4P@sp-z!iu!2Ov07K_bV;j!r&?94?Av+Wj2 zpBkDN*j@P2-6*Y8Txz0+aa_E0vGO2AAKdBVo0)iiGS2ICv0df-fcR{#UkpL<;Ibnq zGWk(Z9sW*b!@Y$*<3z-JlhEd)_Gq2#)H7;zruqVF=3>G z(vBA;c6{g4l3&K21KP}LGbnyhS%yWYt)(O6a=@m_Zj6%qp>m(i^V`uVfhJXO?BdEk z)i7emC7Ig63GZlsvhuK6s$5*EK(rr&;J_%?M{N{rioFOR+{ZyqGYt-+Af?{j#92cF^X51b8@M6ko{dj9xFZK~zqMX=>U|oX z1NKJstIpNkS5*Q?t0_O)oM~61oqk$q&tJ)1x-*s%846MH2U%gJS9#aIDs)hH9F_t&F4+9CuN* z+O@!H+1%`)h;VdF^3E(SsSl7)nX};v#fPTSn^o+=A#U-i5%6*CyMvg}P2VG=B&WL5 zP`PmIp*8?E9lnfg5Xcs=t)QzDsiwNUj-av+S8gtnq9qqQLi#6u>Gm5Na14{*6vU+| z7NWI;#@%nac&|ZslI5SgXpUaFu+?o(L3SvFTxn1(l6>BD(drUO+A3zXG#D0d{#)_3 z5H83r(y*?H;Zm-y42O;$m!5a-7J zUc@?Ka{XRK+cuRxMZV9HPDAChuiDheZJq46FftK|6|Z>5d&WIS;@$S+Q7@W`$S%Ug zPB)9M=;!{eSw~HrU5+fXGYA{E@8_K9vqMk0FqM}2sqAfoBP`0H1N8`InvAJpWin8?DJ-FmW%b6i4EmKm@v-DO5( zm#eSlLcD9T9hiBfL#IAhH1c#i2s$|2NnyaF)+qZp8Ttk_H6I^VS6`3ksWB$B@G__C zBAj+srJAoVvClClWUjZ(FX!K8Yfx_OVXQ6O9T}(}H+8 zE=3J>#OPkE=nQ+>n;TJWw5KKh{KK$$I4GcF0b90iiXx^0J8Vr=i}5vi$RZi6o`9Xx zZKRuey`=@VDbgU7alH74(CH51#x=gdqf~+0Ic15dRXMr`VZhQ;eyxF%hJNx7WbV7h zv7l!5l<^Ozid}-dotAaSE>y$kgeXI>$KPtdNOaD%af{I%Jz9(G-ao|MV~?uYrf=$v zFQ+;)y434?wb|3S8QoXpWgn$L)P|MZwt@BOpL}b)aT?k$ar5HY#O2lQ<3{$nOK(2# zKy?D4q*LgNY<{)V$rk^9huE3gw9L4P$ucg6V#scDX?hmMI%|{Ay0==Rb+aH4TO2Yh zYXW$Ver&oV7TZIOSbI{WZk#+8&RfFlyjFF-sI6Q{A7G5STzT}f6EcT<-J2AQB=<_Z z)3Cu)It2ug+OP6Q!EBE1Yk~*{SzDd$dfv926N6G*CfKCLARpq!q@N}W_Sg@5bP-;G z$kPE|u`HxdkKXec$f#ubmHjr4zRX?iD-AZGl5_{WU4-v}3ry;^^(-6sTZ#Wf==%A? z3HLdqtCj8pGSjp#ddr)z@Yj1XW$|3z`>8tYMvGm<&5-kj^Xu>4USDJ2iYtTg_6&Y6 z5!ihqHqZWq`k+@aT2_QpF)4!3dUwTVxLY4xY68omi@#sqepH&Ea)9$}sgWJ?g_(EW zYyw6tKJg6Uv8l8Rizl}qluy<7=?i_BUgawlLL2mvGdmyTsl9>CiK7YBm-P_G_2dT4 zM-HTACLu?G%F;`2L!78k?-+zzI5}qOFs+^=wV~SHj?`s4`B6<8zr_S+z2gM98jX=e zOinZFY{IisM*J^Hgpo%Xd}|?dbkGSq-YVgRk`_6VEzgP|%$0A$^)+pMS(={RpKCxXW$Ete$8k(rw2*QNw~fYIb`P7(tm2=rbKXW3g1PifrL z$5LkT;VwUolOx08AZcwnYlFfm;dxs%Etu;WQZyKy+NnW>z99@4Dk5R$es*6Pu@L8v zx+q{zwPaM6%Wke{ILEXgZ8ep-s&U45pH-KNIpX4C7z-`ZG|{YOCo1gfVm}41dh?zP z-BO}9O`zrWvy~VBoY*8{soz}N6UA<)pdYXFl(Ld8aNrK`*w>ZPMRcb$Zo)UtTD1PK zB!|xE_MXn`c793JCdGbIm21n^>thIM9PM7n=C@t5 zF&E74mv3X`51ioXqQx{EDF>*2%nkYX(6RdT`%@l(AV!Sy*VS)Sr0BTgdRSTJrhJDj zB0bl)Y?DKhWYSO8=>*PY-|hKceTaNd@83XCvpyjqYn(0yBR@?r>WZt)A)XfS1wt4b7678NFQd|1DaGzpt4I@)R(5tgUcqb|d%QshP_V8pU z8}Z8}z^{bGAqQD!pvRdy@)cP|=H$6c^Fk^LrZl0n^p`4KQS*6av&L_Nou>66S(58+ z5*-Yw)tJQL-l;T%z@Kk(zqa_hrXKF1UPZ-DU91g*7%n*M_>j*jxs>)_zfHD|Nbk;d zcFd1%KyJnW@)ncOfraS`n)dX<{I7Z8#el8-_@44prr$&RtLW`n{55wSX&&eaflx9DT;WuK1+8ggASo4W{09`4l>oUu=$PIEi_hxd)kzoUPHss9DZ1zDsOG3741 zYpvFrY6WahNj^UC&X;|QkM8l2%MJMa@KfrQ$xxQf-i*=~jFaS&6HOU4oB9gLm8(P~ zxO-iU_wg$1PrbD+6>(11}1RI!QnaOQr}6elPc<%jW;+TnA&kU&tJ^-{kIwHzfbJ30pHi8skxcB{*jKYtswum)o2PJ zS7S3{SL6Sh^5YlsbTl@#0l5N9Ko(Z^0@RlsJ=6dzGXZKX4h5isqZr82O4{2Qq~@)t zZt87g%40??B#6N8$?Iw7X!l+lz|+pw-i6mwfXw(kB^AW`zWv8xW-`E^BCa+9WFmi@ z0B9>H1H>GhK>!XWRz_1GkOjcW#>B$L!_LOT0AK~ar>4HYfozN{th_*O-uDH*e}9n) zA^`ZE&CGdK#U=lKvG+XzGD}xiM_y)T4-XF}4>l$TXA5Q)9v&WMAS*K~E91Kaql=fl ztFb4ey$kstRs5|Eagd9tvz4Q(m4iLtk2;M_9Nb(5$jIK$1O8!&o#Q_%w|8Os(<&xY z2Rmj@V@GBdCLr_wM_57Oe;2p2`yVCDO#dnF=;mzur{ZR&%phBk9mxKDfS-kl<$uUK z{x@6yhdS>k-i6uzCj8zQyb2CxR_0#f?^(96J(>UCl>P^A{!MuW1zvFnQ@1}Xk`Wgmb91vY<25tq261w8n=o>LKxT~W z?3_SG6E+YBBO43QjMJ3c-26Q|`5)T7mzS}3aW%F#1^u%;3oAPp7pDmaqd6A`CnGzE z)11-7oE^jnGUWo9u>m=_xVbt0EN^DYYwqA|XZ+r-#*U7*R;I>(G&l1*UVol`|K(M7 zb}(}@1v&qNoA3X90FbvbaW;1L0`M>a|KtmR|GgqtD_2|4AME)%e?)9u|F-uxI@wzN zK^k6LV|xn$GEYV`kh!s&tt*+J7_Z(x>~{ee+nWK@K(4M<_7*OBcCL;fV`mo!Jq0ho zKSlJc>|H^&hPDnC4u+N>V>6I5lZ(5BH$U?q6t%Vb`}P0a`-cMj%>P}xze)Z#a=#b( z+p%}2dG}W4f4b}6wg1DQ|3|Ms!Tf)8?LRO39|it{z<&_<4+8%|;6Dib2LXit>Hxg| zsf+-%#~WSPG5+_I^1pXGSh-o*|EqIhxpAg5=}h<)y?a`1i#z2L_*H_4J(d-H zwCPAHXIcnd#H6E)UFTZbeFT$`%m&$X=I0L~D<6}S^rxAEmIopkfMd_zX+3gKvFjQ|Yf z$vG!y&hsP}IHz6QFZcb?xEZ*M8h6HpL}c^#G6d9QG_5rHcvV6vUr`rVPKT_!4L6vX zIIAb!y}m9xWOOYD@2w0mMfy{h&hf%9m&Wx;kV5{JXgK}8TO|m`i1piMJ1w0wNCf%0 z9P^b(X-%WneQrylx{Q)oKKUCpX~{$J7ai*lp-YXBZ+?q9JH4ET0 z@`+3C5DCjxItB)=tkw-X=bF$umnKl&%mRZLW)V_0wMd?b&r3i?Mg^1M)HzSrIVt)d z05J=RjU6+2G>I_UkwOSAZ9GrNr4UZbQkq!yM0G=Cqtuy&PjjWsG&S;^2tXOtGXOM( zf?-!RgZc{oPLE4snwx5f-ytcTQMwM_w&S1?o6jdqZt)6?215h9W2D*O>=c|SQ5#L| zwDya=Gs4ZGsS-LU^Fvb)@Va_@6n*ND7aG@xFBuq%w~B+`!` zR*9@6>@}5T4`FZ@Yg9SYDe|*Fm}`=dSiXr(jM2bGjWhr&^STvQMPQ1;im&+04R6>{ zMlRR|OvrR8QM-qmtOkgS1Rj%WmK;o9&T#_$Y;WF~wh!#cbu_|TzVS%bH6Zc4q0 z!6YJ%@@rON#>`H?7|BP8uZTq=gj>EN5wo=L&6sl+rRe1xeGE2M-J@+LzXudg5v zppf-!iM<+Eu>+%>P7Ijw{)vqdQ;@H$G}4y?#=vCbrCkr~J@RTGz=SX^s+Z`5`o@G5 zRLPQ#A&M6SF9tB1+D~h)cw@ol5^{fkKbt;aModN<%7xC2crw@(64ib{gDcSfo zRKYyXDr#NhNmWS;U`!=to!Amy7-o^csJpl-c|0q59FtMlz^tnwJb4pIpLop(neet$ zgLcK#&Ge(Rq!eHSca79-nG*P!_ep6PKNU}?tk?MdANJm9NwY5M)=k^CZQHghZQHiZ zO53WmjY`|LZR^ck6+8ZewIkMEu`l2|nHSI^=9ptXt@qYY?97iCK*tm-1d*FM#FVQQ zX%;-EtWOt@vBmmW@Z>x-O~0H-6h`!N7!K+)ld&Aeg(!lYFvS{Ja8xa?I)SmUB&W5* z$@=`g3-GXdKhh?ys~Rf&n5$;YS@$rTvSFV81rrMBY(gX_jlL91Agb-J=8Z^<>pm7{* zoVa&qhhMASz2}J>K=rpcEA`pvg{!7NM<#1~ZJLKBpL?YjRu8g|^_zp=4*9E22%7{2 z_ZHkMj?*2yc<4R}XFTLn%68b**jFdF`xk8uJ4UN(cWdUOw!VjA zt!QYd;HDX%8LiFthJny!B|gplSDU3ion*hU5^5{;PF8(T6*w|6f zU62%|Sk+TU$4q~idrdDiJmWmny-s?eO2Beu@o2GHdtU0MCn=;jNK$9`_2w^ez-RQ&Ufpi_{z-dJZ&8}oL-_7lndTcG=#D$)RYpDLYf=#kE4SK z@eh4+lvdMsnHuWMOJm}rXxb95H-%+WdU!$ndsPW8!-1ubzI{|itOwFe#>amLAt<`A zI`UB50oSag*jaf*0frNjR@drP1eK1At*P4>D}(%@}d|R4^ovrIQQ7VmM9Mt35?|ZloL^jmDn1ok;L}5*_g=?`XRugDJtOyB zypw9>&H8=SefA-7#w|VszjaF-pUGNJJ$$t zCK_vmpJMX|s{&qcXKS^7=KAQPL2VKc^krBS(xpmr80_5Lngw z>A_UF%2~ukKaW0iHI?lZr4Z3T2`Gc_)_#7KJ)5mhJE-4kfl^v)m{e9p9IV3=3kVV|eR%h8n!epR;`{_1ws`7& ze0C$q;pDb%Gl$ygWyqu3x!WSJcer>@dkJ2QcTLJn6eYj=0X6j!c)kAu%pv{a*cV>I z6Byr5+9Y=X?7<4|hBP?DuM!OFeK(RZ!av(9y6-LgFV|md9jB_>6~f6$AW`%@^HxSrb<_Sh0k49r~$d&Tvxcy1+xHyURW|q=* z63=sIL|5~-dQ3t68aAE;BPWl)++o+2AFnRswcjcI00=l2c-a$ml^J}Mb%Z-S?VjDv zk$qQ(Um>)RnCFe($r=cQzfbkJ+t=n-YYcpkm4Y)?jyW#s7OoPZ$W3|2j#6ZT6#W<;r6TaZ!aT%s0XEc-XZYJ3(WB!zpNtY+78F1Z0t zON-1{>f9+G$^3qbjByXjF~|W9K@h*Jv8tzWK7f@F*9>dwdH2WK_YVvc5Y-cL`~TrK z{l6u&{73BnBliCh`~Qgjf5iU(!xs3D()_2ue+vAkz<&z-r@(&-{QoER%>Vb4y#M*{ z;N)as{eK~Qoy&NWQKxS~MJIpoh;YxBryiX^Ry|4ZNH9 zSK;5?VfoAkzuaD}eh+4D|3cyoCo?RM+1As0{nML6$e`AC!=Q_0dnHnzi{r+L=r7f< zS;O#sY_WYkycSjxG4HCcY_PBug0S4v>-ln-{^pX-{DuZWL_~%@S`|}(-K8ShPq?iW z`@HacHSGTWV(!*;o9Fku<=*Fc|85GqpiDU>wB)!e&`3r=94&f z2!Ca@L1)Ktbb45KUHCAf0|#!tV3DbF4gPP3z5P-_>@86e)JHm|OEKgzQ9BdX#i(I* z-E;vTlcgXQD(@Gx^8DF@C|>Xd2j6CF-pD3W;5Iyh%Dw1Neg|{NXr1PcrM?V253}B3 z#T_LALx%kgB%wT|>J<_SnNtwM_uMiGm9AQtZ>5w#&fN@Ff~%npT9BnXfqk%qR zK!~6&qewqfpQtIL(8+t$CG|3jAg?t0N+W%D`V&Y)%#WO+j}C@gGnZcAsnmic5M z!ze>P1OqkT$96=2F$&mAYP&J?NE^(wLLw7n9X$xOe@s6f>G$3e9;^{;blP7+NgR@z*eouU zppu$>_@#t(rEDSGVAnC1plXI?2A``c=w8sSU`9unWwEdbfYmK z1D&o1$1M$r{Yb%8L{Jn5DP5;AvgIXbCDoRYh1F2+R z__;~0WSa6xy~t$T%bM>QBbwnzoMx`xtUTndm;mSFm6Ug6;?)Q=xDZr;nq2y zpE!EqbzZcJwub~?m7|P2?yL+X=2t#5l{wX=cT67X!h+>Q%5yhMtYh_)kSeZ3_ zu~f1zJqm&btRwa8K5^_nrY{L84%hEAN5er$KPuWGEdy(!Z2pUrHeZZ3bPFo*DRcpq zIQud=>G@pEzG)+q_gwgncG`BpE74t5(sF~jIfTnv2H}tzWDLbpau=Ppm_vL7i!~8i zvPV0Mtj}yR5iH!mUp(xEv!wtqd_3GVDXXdJ@qPg^oM3T#6y8D#PLYBL3ith6WzEJo z!#ZE~4-CUC%M{ppy$9EmaJKMWSy{eNfA z^{p{>?mA$!u_AKR{~2_s^4MAn{JrF0G4|OSFvwf7Tl>208-7rJ+nY0;bm^1XK8vbQ zxlA>5P2DqfBPwR2sUymwqD_(cG)xu{g8u8rJsj%l^ZDVE5v%v#={(I;w>zu0t5<7I z)_eSXO@{=@=Gx>3k6)^v5-g(%E)T5J3}nI32<=sv9+SwBztED*3R)0uI=V2`uB1eNgTvUN0F5$3&T1-!Mvop*=83EVo4VCU0fqzc z@^9m!Z@UgNo79Gaa_MTv+qm+=X6oYuG!sJZXxX~0Q?ItOZg?Z)YruU*vK{^NvRRO0!Z1PQb##m>3wv5tBcTZVgvWa+36O3&S=qOJ+p6J^ z{wotzHfmKohow99RexSf1lQ3QBYOs?t6Sf#K5mY2_s0E*#KV{!wiSm1>%32U+t%Us zr7M%M&1cHbr|qJ(&akG;!n4EY$Gp!k&ZwgGb2$6*-xm-h=zLd6DlkDEPRMVS z8B~{tT0H3}j4Ni^G^=`>v+J6-QKSN-WNM7C^LFPky2Ml24$Cw^E`Z%V&QbD~=slvl zRZ2#+Qs&T-kr83W*;!nnto>TYlaQMZJM*URCz#KDS8t|XP;~FF^--;k=eZB^TGjST zS(m|W-D+DwGNp@JY|m57Uld=No?&x7KKX7p((4SJ108(K)kLcC7?ld$>4GX8aFQ1d zx^Lu6`WS-+loneBw8%=oGK=RFN|tu^$r@}M)^+L z;@$32uw_l2FD}G_ts}0!M&ZQe7*8Sq>N(!z$MMVZcOINIvuMrgtux%m_6Kh~!kAju z>o(55dBb{jt-vLQ$2P#asjx4`uT6_yX!hl$I->}C5B&rBmf30usE)E+komI~Jy4S; z3Yho#0EPrYR2oOb>KIA-mm>6mcS9Es5|{=P_g=3K>27z>{#{)&f?VmLW8=lm&aU*b z^WY^S`|k@;4FQfRgXt$KGvuPR+J>9O{-4Y}HKxwFPKD{ji1abWc|5hd%hWY5VO0jIZ=-i4&96sz8q8 zR5;~I(l0Kjjo$$W&lgSDsZZPd>8qU}zH8k;_qBAlDU%NKu&hyJqf zpK2K^uE4nvpJeE4rXa6=0+u~9U)Xcg-Ryk56JpkR2>!GMcseoWHFkVF49`Ae znWe<7+Z!wk9X%#r|C*^B7R!J?#Epd-RH3bLoh2(cId&sV3NJP|y1GpxOgjKlJ7guK z(m7{J0C%8n<+rVVU+KT~4RnS-7yZ%TaS>ry4W~^_i1^}2?YOOycWqtI@JW3;efdT@ z6(6%>zm*K61St=f9F!`z9lGlJe0O@*Z(i7w2UcutTGZ~2$7HcJN%(wT^`qa>t1j3M zs)jaoIm0Rc^vo~x#AnOICP#ib9>f#`6<~ z_6bvLUe&c-U3Y^n0c?c-g-~(hKjx3r9v1jaej>4u=sW(VmsYTreQ90%O?TIe^NG2! zD?m}r^?Q1AsMo!fjF*&g^E5b?mFzTo&7x2mGD<;v{eqgxKRsZA+Yu1`q9>V}8yN*c z07Gn%75UnK6Mn^P=2}x#*toVcNnkGRBlafR#GG`$E^1(BMTc&83Ai}9ana9bu+^0I zZSu1J7s^zxBmZl0wW&dHA!7;EzQ;VC&|)kB;?`M@4qQL z?B<5;x{80DC`eQoMywUNOvL+6qU-0#A)F*AdNvCjI56qZ|Gs>=K*c3kaS zvZ!@A+Ul(EtVSdsM(l+7c?|c1ml|2siTXvNlfy-dy~4<`dHlK!3j-g6TlScqj^`ah zqW?<<4eeZCpO?&tel=+3ZaUx^-qmXjA#r`h>KpVz4}00Q3hmfdsnf;4i#AZemXGw_ zP8q_ole@)GIbe-7JTd$X+34IF7K&~c(U33(o`#W?UuG}JbtG4Ey4D->rJH=bqE) ze%*Kq3*^a-ojHk4biuPUjSg5^q&U1Hf4Rm}vW;%NH9AeW}9%E=Uf);eW z4ym3kOVzCDpwF3sn_8o7_jyPy?(vP7c%>-&uCZ%n8L_h3ny{Q7ni+v^s~ejM4Cb3M zZ;TIBX!{9Zr{i5Wj|>~1F(PhJE!s-bk>!-K+G>e*WkJ5n2u4#^{hO{qG_?-JOUSTZ zHpA0t z?$$cS=ZL=dvAVA7K${RL*u1gbU)O!4UmM3CIAL5YM7$DsN?92`kW7y)xEtni?Hc_y z?$EVU?;7@pO*p@maXz&aqx8s2HJ54yo$&P5OEdU{gV3soL-uMX=d=Nrd(`X&?Tews zB0zRfaDp6j0TB=r7|q+bZ;HUs(LXFyMtf{d%1E#1%v$3aMNgO+KUJ<3h@$q3{zNXD zc@(Q_*;`7(!(>}8XG2$5gj7ULyZW~ZWzT~|yIHHa7(Ewy#`b}VpkQ_Ck)AK(qq=kU zcpP^ksY@iysa45+_*a`cyj#fL<}fUdMKXM#c*B(E_c4Cc9kt3Kl2n^;!3wFn#^12T zxAX{cfqpK=c0<99czBru&*%?TEEe|?!{YPbqj`=lwqs_^#TH0{;pHy(TNJ%%lOBg9 zKaYPn6=2W3T$jH@K_1nF2o~jw_H>_`hbHQ6LDe#pJ;i|8rLX10XiX4pI#eM)3|;{) zJd`p@zp_o=0M2Cs=gAjhBU-~ZE_u$G@6h5ff4Dac$rOptr2TX-5h4WlF4F9|6{Q6j zOV9{io~wBtIBN*+pIH!mPL2eZ83m-r{|c0vbtd@G^`|>!z0L_T^b3}md56{s>Q1`7 zdC?Ole~km{(fpDLd*|2n^yVMKB6{@qB=QQdmEFZZY%yJSu8j_LiLvlL3?obTYpA{fA(N?-HLz_U z&haMAAq0d56j_qpJ1=~=0xz*jToLIOH8E+bOFF3L`bI49y=j=@qTQ-Ck-#a9AG$Wb z$#=DaB$il?EG3uFYd994ybhMKr;p}6tnG}ju>VYP9XH%RhoxzUd)NCHpcK;$@jsO? zh>`}V9vq1#^#_jU$IW3PhOZvNvim0}By+{i+t&k^DQ7iWWY$@Y>M7xFkf(^lH`H5E zZ|in{XeGTqrXx;%9zWJGLbz3&^&wX(zAW9Tjo-VrWt2i68AS#wxool`Vcji=?vmC! z+(9Cz2_WsgHrwwZqHB6?wO$i#@T9D>NX$hadGXJ0YKCk!5V^6TT_k{Iw&a4V17Vcf z`FH~8CI_y_#-oJNOeyBjVnPJs(a4w(Y?pf41cKW!D^0Z6i60}MK2I#;&=ji-w(!bZ37ENQm52sYL^k0%G@5n0EC{EC_67svF%%JTQ;Y~kD6LLf4f3ZsshE)Tdr{f#p?@E zEWk_yB?pEJ-{VJyUl#^u?((gTc2^gGC{ruIdikHJkKe5aYC|;sM9xr8{FMBRJ_~ZY z(~gWZh4_y(QQQ3L|6FM@Ib26m{$s(r?a2Ymy?4@|eDP!?qQkfOHp;h&yW=!DEu-Tf zr%ggk$O{}f#6@2&L2B%bEG`Q>Y*m1Vp+VN7%8uHh!pH@i=-cG_`$3p1nbDw3NH+A? zAVzQ@uJYI+ah+H=bP*nTE1Lu`mdGE7CMpb6pQ2KxjtGY)0cK(y$RZWTa49uuv8%8U zLPXhE7RH*CEt=JYm`C6JEr7myRjy;yoHvvoKoM{yQ$y)z9X%EQd-nC~37z3#p{&A+=gsi6+J|gDt9Dbw`CrPKE1~S9BAW9hI@XlPfKf%{MDg zxnK}W0?R^sH=YS|t@(0)Xq+kqn5i`hT2}r{h#>Vx&=cjr=T9ntZCUswh=}LZQ(wK{ zlPDM@V0JDTO&G}e2#(~GugTSVXDvNb+d4P4RfKpY$;H@PTL4mpfF*;a1&ypYoiOIq ztM#)Z?!ZUbrDA%_xx6@nEs!o?L_k=Lm3t?)r2sV&0V@Y}L?S2}iDfKad2~YzyN(1z z_#Ua*yGVF!8&4&mG5hm#Vfly;Qa8Ud;m7a?c`q3dk$WQi!=3&T3qzF-*L?OBBy!ek zLqf_ErLi`UEz0zu#GZ?g64k=^W^SCJB*Q9iLgpz?%gS}4Tt1<6Kj)NUG#Fd5iYuUI z8JQ%&6F5!e*9V^n{--^fBoVxfO*NI}*nAESz4i(CN5nC9QR-u*l!TcCM+>Oror|7u z7(}O2u-pBsOL^-^yoN|%DN5x_P);O5!PQ$`UnZvbuI%09kyQU&II2|fPmO>QA-@#y z!%--Ms3bO{mOs0b89rO?$7j8yqNpln>}e~a$Oo#Iod9&E(qNg^7x&|l`twu%?88AAdJB$|fQY)!3j z`{VE$n%L}q2h-b>2WfeLLF%CEP^9Z~PBchoUXOGSqa49kWX-Ri!4R*5U7KP&;`}Lf z`eTG!o}al$Dw(&4`Z%6M*;%?Oe-qkj3pccSGBYRc4$H&eLH$=-QDJLLa!;CsW#?hp zLfpa+TVJS{QspeO&Tg#PS;bWs_{S6^A)`MkIR;Je>Vxsq$K45V&#(iRj4$DCyW0E< zp2ivEJ~I0hQa9C;n$e0YTG~LIB2Yu5Q7u45E@QLQqh7N>OEep{%(R9o2H%)4vwA00$Y}-`S`K|L{+gB*CXL>G{4Azcd z>~^mAJuK+zs|3g*oXlNwDwxwNtkBV}@x_KnkC0rdJA0Y^)Hg|m4F826Y(O7hbxI6X zf;9lr*|YZfxkFpfr`7?`<#tOE^x5Rr+p1`n7GunAk;~y(h10Ev+wF(j4NXOv%3Y9V zoY=A$oZ^?&Sv6Iq7iybzCL2{s(eHdQ1$wODERdW0!C_oQnAuFOWgT5KTuK>Ssv6fw zI7uL0AC=sP%r|lo_n4B85?M8~I0RUO3g3(%J$4cvG0p;?@CohiCBTWmSePxn$Zy7< z<*kprNdi4=x~U>`H845_zRe9^TH3P3=+0MR#mC5(pAJtDJ%=5b6Hqe83DJ^%GO+C1KqzET5w$-K<#drrNc|xLP7d96NFD-~x^Y;+{bo zGo@KeJBFXuWhr#j4>U*P4$NbkdXyIjmfz1vXRsin|4VsuuYqRvcv4^JSQIo}uW7`= z*BG<#v~&bUBG19Fz91uUI9Nyn26DwFWjQF;AYkuwgXF>O;6wc&9hw8hW+9;b%YEKd zYxZ0LBLvZK7SJ1rI)ge% zF6Ms{a~<27igzCUP7*A1jGKt44m4rpdmsW=Kpi3pF*-4B`N&!iF|&2F{}tps3_hj^ z8|I6iG@i&(it>XDSsNgEVS`s8b3nQskt!+t*8Led()g+AvnSx%bD-FL8a3%S_eB(Ppe6sBsKA~{ZooSFNYqvO$a-8SVWvA-AFhs zpB_=*dt#vpUr0&u-bOli;&m(L;Bzt5wV-KVcT%WJ)`r;;wNYzTH9qI-pFtj0lYgPj znS_z$adMsgI4=lj=Zsm%7=&+;A{Si8bQ-_B^aVmj5U<7+7I?}d+mJ~7m@0PZ{4QBj zyJo2~4mA>7R|<1QD0pBNXP#76gs^2nX%O2l)GTdu%dx!#GiVj0ZwLWOSgz^7@pHUIwfPYM za3EtIt4yB9Oe4yA!X`eC#+C@daLfF<`F(lQXYTT0zW!v*F{)nj<30iv2~TS)lqZdS zLc*C^j;X1AkATaEBqQH0Ij|Yfvt4)QLo=`s4mn|xi^3F$_C1kx1|1&}umfO7VA!=m zl%b{%k!JfNCxle~4-PKtwe=o5v`$$3TI1D9Cdwt|%B2fX1~0|YnHRbU8xVZiwcDAI zI@jti0ee$`DnwZ2sDm*#o7rYBd)matr+a#B0<-c!>b!j2YPi^$qMCn-!M<4Ze+ST^ zd;@G}4mGQ?ul-)Fy9OP-7Z>}6#*~Q@=iJl{;hF}a>Dbo}f%`zFc9l-@HBs0OH&f7R8q zIozx+CsW<3A`r;SWh9MMZl>Pp_5r@nN04GJGKSML*{Q6YZuRo?aeb^6=BAn^6neJA@;q+a*VL%^Ji+>N3pv6SPo1sBbLbRh| zf(VxNZDqQcS{v{{4Xg#du6EN(9sufhhOX^sKW(=POSI-amVf8W9$9h#e&ZmB1%P_^c{=jwnyymCiNF( zYS|)4N?YX0oJ41?enA3ty1KAkA^sB(zZfu&P~8ILli(M${^eAW_Db?m;fL6b)vM_p zuwlL`<&-n8$g`b6>TsM>UqZbfVfS_%^v&Z^v89)T-6Otc00+(XqPZZlyH8}j7ptqD z*J#3yPe=k7VQfVR+eFblTM&f`$8o`zYDHpMp}F~-T$Xr~nI%W9{O~PECiz@^wmwXC0q&i- zeMzbcGwmdewmj#4&9ja)!WFeR#~!pXo}3t-b_8-= z^fZ%S0kR@3Eg)u?X0$8hdN7|8^}HCD-e}Ak#APz#J&;Kg&&cdfv=arczoOJEo9F^k z*}K&lOpia>DXYzI(ufO5YEBnD<~V$VO5f{wF7pPw(wcLG&=LY!#(Z` z)TSmLfSy8{QFmV$yffSn4N?&_qF)yX-cLZ(B#xh`_`^Bh!T6;=O8_FPGiCv5o0Fnx zsxL>s;q%-Mi5g*0*{y5LRi1J0!&>>}Vd2CJtZ`D(=%*Bh6rKRzB z%)XivbHpK#@Nx<`iEDy|2D>&NPd9FEvsK=@>&J~Vxf?A}4%ZILjnW~q#uqEK*m|TB zmrk~mY!#}F0fw!Na}>OWZTE--TUwz2Zl_$+53<&iE)H}0D%iarSRHP;%67%5b zOr&V5p|=mE{Fz+eaesroII=#6{G`-0;2!4j{Bzix6z}#WcUI3ZoUn>%t&YPkpS zSX%>U!@YxngQF|55)E}$@4x^df7eq5H4Ht}Mhdlp+yMK``Pw?-uyuwGjZA6fmSGc6 zbzkS16d`ME31<~sUEQ+gd7w=7epaHrRIB&0D85B+1FdqvFz(B+lD+$=w(6IvGleNw zHQtt*EjcFUE?#?yNkNYw^_>5Dy1dfSmk?!uigQ6RVN_}Czc4E(u6t&GgZsMBsZYhc z3j6VS3`O6mIE~k@WaXU{)R7 z^;r#^cw~*}H<_~++khO;U*qc^KL-O8K}EU-rwSE*?}7wBPTQFE8>Xix=rA+mj4zbr zmEO3y$Wl_lQe*RG@(h<7;P)<#%8^z@I14NwdS5P}@Ly3hAaDZr2xX-fV>(ZEK@gxI zj=E2JK@8*4BnG`l2UldN!bxd@^oP`)1Ayl0F2 zd$^GX-$MfJNFl4_AqR03FZX6x4MX#HX!xQP*5^cq!-!aP3|a>(z#%4wEQd?8;EiZi zk4Hn0MFR`bLBuJP&pD6g^1~R|*8h5l6k#R}Tg32lg>yGL9<8D~=MYEml@HtyPGr^j zdUe<2$VlZ6=sE7esg~3CXJ0Ko=EG4`SjE=G^d?`kf8QudPfjE{=(PQwUlzIWRz!zU z!eemh#|z}(syCtQQ^zJC8!X)0i<}gFYpmm%$p5WRhw^#}HXgwU7537M{ky5xqUYv~ z4YJ7N5pnAN)7Td@#^t3$ykpo58hY3zxe3-MitS6Z4DEA~^rD=rOd*N4;*r}&D~X^!zu%`5tVgP`%rsCRiFsWiaqQXmUurU z2uZ0jl-T%NgCaQ+`gm0<+nD2yKu8IFv>n9pHESw;xE+OUYa_W-_S;`sYn4bE36Jcn zY!dxhx6H8N^~{6AA_>pbHwQ4d00Wqe?_RU;^Gfu{ms3gz>krhhp8Wb(RriE?pLwQO z2?!d=C+XpJ|0h*TC2TaCiZaV6GQUq<4_ApT+y+Y4!?`62AT8`59Q^rL-g{AGheRQc zsSprxFK!Y-HVV`BEOM!zZ@gs%4%kdiRJ`mnXol@TLN7hdM6epH_5NETVh z85tn0M-j(`PwQ^#pkw+dck#$SBC~knLMATI$?L+r+YTgeT?n5Ha4}yNJCY6H5Q>ac zu2e18Qnc`_pki;=I#kt4T3XpKF(sD;Ug7`r7FkDVV)>0@u_Gb77$bs@b7A!yPMt6E zTIV38-Q{6O!%c89H7Qol1|kVBb)|eVo7)fL?^-I5mL?ChgkqCFt|cAi*y>M zI3!Al7D!Y1!ZDJ=DbQ}XHPpvcrnZ6Wp+a}jyNPSl&kVpW=@e%k8Bm=V@5?y~V%FQy zv>+^2yZoIgS267D3GIZ|6pz#i?WTIMas}A{joQ$+E zL$!x!r}ku;!@vTK1v&Za4?Q;&U#il?n6$pSZP*jt4E(4Xl|5~uoiDNzuTnW0^5pPu z3aVc10>ls#{fjzJU5E#KMb;C;FkLu5R9z~K?{_#=dml(&*g)pe)`V+F^gIFQg8Lr9 zvpH>y$CFSTg!th?5O95moY+$=ry7>N1 zC`+G~H}<<&U?Z!#Kb_|JYK|AG-EH{8I{%V5rN$cb;^^HyJQDc(0fV0T_AEOd79p3x zO+bW9gkwNu1OCwU!yAg6>-3g2_ccChq_ZEiS2!dHbAN3axQDq!r>A8$4l+sde!-KH z?Vj2vZ=v26^GM{GT^8?o>Gkp|P;UChwNkYQ1X}1grIZD_n& zvm$~7=a+K4Fh@6G>ftG^I_;n}-zG31j9EZC#+S06MK!226 zFI~_zm&?J|9p$m@P83qFs5CEcrm}-IAMK9P))rKvbO9e}XJ1Hg?OV zDUPIx&o-HDB@Qr^hbXBr=4E0ZXob%(Z-;UKzX5fZ@exS0YmxBo8{k07pt!xq?9{hge`C1d3Hmam$gTsLr9g(e)58GY-_@}?#vd5gwcOzA`F)`*>+ zFn}`7+Kd0~-*0X`eaL^or5CqvPwIs@Vs~luMq!ON;)$I@zotq!JLGvm+_@}W`p>w& zLnJ^Q*0jt^GKm#rODl9(K2)y+V4+ty!J6^z$EO|6Tew0e@UBd=QM0Ad{W^?b&r><_ zL|XCSt)QN--nW4!+vB|se`$v{G0+(L#ND7WBoHBFf?EaqC>GR~A!x!) z&Dh>T?Pn>dz0x2MeTV7yD5}Y3+iJYKcFbo=-dKOOWWJdl`ph?EC!Zt`ZL`)zWOMeO z!z$;D6YjX=MpTY!iYkN3xIF5LmEe!^*dBB}n?cep-tJ=45$c`YZ ze!_d=33DI2N;Qo9of`fEsTx*?`Owx&H0gr#CiEVsUp0}?^rLjE^;UW1Qm0#ob->TTrwv zn8io6JY7tPHn#R`eB%;`Z`U75%4H@`lNMgE4J(54CWuo-)4RQn`D1YUI0LH5V| zMBbb;Z~J?C6YMNj^{CX(W0iG$Ke#I`;mN}8uKw2@@9np_X8B&9Ylu5@uVPu6 zJ3$~Ll1V%9HP+8Ha0TXkYkZM`V1<)ftgt=ASNDi<4iw>FlE1+l7_~dkhCYw)AJyX3l4|d1~!gw@wFp;~vqP zE268ju6c~MHH|T5wt26FRxaVt2$g15oL0=BXUk38q_*@zL{&xJZGdR(wCApT^*U8H z#Ky%g*i=`X<)gi~aKCqsJqU&L7pxs`wgQOR3YvC!gCWa?6XbkZkcHT%`^p@Cy&aOf zd3SM(kfgcYrKNx{Y!G0|UG>dZ{+{2m6(`xqKbUzRxp?gl(+i0%>i*0LNGx&SFW>-_ zyO+}G{lII)s$hiB-VQBS)cPsiNEZ4MIv4ghaEwgoGYwQjLK$Ggi}wTK8lNEYeig$8 z`~vJ3_B%)#OCHBKr^FQBnF2Gei$n+$yAZPZT$%(9Qz863y8Ksv}DDHK-%d4k1RvKU0tH z&H2}sb&)RW-y3?a-<^510dE2jxP5~WP)gAq5g*xvs@?BE8)KAg1c%O?kF0BgLHQ5# zr9q2Rq--;uX^la#KPUPwxX*0>Zb&aNstE z(6Lr&EwK{eUBM`NM}bVlecXEs6Fk ztQ^KsW0^ScT=ZHjZ$y6L)f+q#+wrR5*Ea-c9_4`$fVl&`0=M-utT0#uksDpYItf)V zG1M#OKzhsfTVV~TPsf1K3N8SY@5EcWK-Kn|!*|K{f)ZSKKh%OQ69eAlj1?$-5ttp| z^OGZHyOq%{A2U^6>I5CZ;-jy!s zV*g#pNN^&1|ArH?&wK@Et&gA45=d@b2KlAH5H_jwGdONLYC?iB^)7*|!Zc8yAc2Hm zcJ2$|u!BwMHLDyl08dG)N35gS3*`ZG8+690AM#d*_5dpx%(RL0vkvpgPURnY!rA_E zVI;wHY29PudyEyS&lfq40r}{FPyD8F6y0+Nwy9#4+pe3ad7wYh4h~Vg@MS}qMR_Kg z=%0-ZwNnD!WP*rVn$ja-xHymp|79`2knbba|B-}n@0M5hw|9^R)$7-*52X^}E!f}v z0{=YHT3R4GD>AHnC5@=M%4{Hjf;31Orb)06JPw7St!R$j!iv1eCOy1K;|P^C$-Rd) z**(YxRp5srEab(v(It0tvUvq)kc6oeTg<niG1vy($xhi<+R`2GH6rrJM5Au;DG$>UW9@H{Gc)u}-W{*kXdRZDn|?HpwYB2;ynVAc!~n`CSwLCE1tt zQ*A*6omIq|wzgvMtyxk)Ma49Yozpb!$=q$_JL}1wV8#6q0PZ*^&Z{*e27b`5=UA6^ z&=2@HBkt4{c^2h8_44gZHSu&mBjEMqp32!5`4DvBXH3Dlua!cj+6~S`2wJ|uqQE{` z*&$=}u;VqW^k`U@64B*fxX!4Fnb*)x=s;RMAkw;@Rl}wltaH*2s^_Qw6CURptXP*n z(5pKg=xXj0S_5QVC@8~$WoAP*l8W#aHS`*pE^3Vw>nZ;VG14#at_O%Y0{$AfTL@nC z^1Fz^!1Z>7k6N8iu{Xxc^XAkOQyOdWrJv9nul0OMz3yjyrP!Oktx7Z7t?w01$tJ}3 zy8lYiu4Z&D0&dW^iqTAl_r&UK@wk)lt5mbYoMOWo_-ju@<~jOf197(!{D7deApa^zWeXdEf=a&~ZQ_BiX(pb|6TQRW+J9F+UGL^`lb*3i04mC=LequkT3UYCgTB)po#w~0bn%O(j>!S zqQiZ8;}VDYmVb4_c__Pkr^q@+pTIPf}C!h;N`+?ujhsr#M`b_uwJ)rxwfG-3AvYpcGbj7zLmeW zl3f$UDQRSqCB8kJhtid&bLi>gLoA$z{x=6 zo67fw1jjxHir{b4$w9B|aS!G8r*b^CC&IV|ai25djeExl+ssBFi)8?Zv!L8Z=D~)M zb(}y!P3pP7g(T)0NTlZ*f%_r;ZQ8CVNA3~3 zdfd8^Pb>(h^-gTJE6@fI02m}Z`~}U6-ksVG+*hQy8-Iug_ZAU^_KAQP_&RXKcsdTb zRk8v}%f9Cov-Nfrv%SW8S26g{%NJP|ko+@mkNP9Y&L-^cZnBSecO7ROUQYt$WH4LN zzgf1|#vMu{|9ccHuZDb~9Kxl}X4}ikGr`vW8UIhBxI4cVsQ~{{iMKqQ+hV;T|C!;d z{k_eeV(VSG+V|(yo7#(?BTXQ#hK#Bz{!z3~nF7v_-tRvNsZxV2c1sI9AcO}n+;yq| zv=7K9C&3tY$A$!R|7!uuZ(`f_UI;&Zf>f!o-dR88o%GrjQvBE;XT2`SIAOt4#E^?0 z`$Cld1cwg_==rvQO9FP(?Ok%rF39@W!_skMcnq_dV8#^k6*=*Zv&qfqUip>e-ceS_!v&v|YrANkckE}M z#Dg5Oic4)z3m;?;oQpTe3$Q71uY`@k?c$ZsGu4Yu1HVHK}~cXmS`PYO>_*A+phZYhOKw$(dPY&!#QG;@do zW3Il*?hY z-k|bYuww4{GZyDCV1p8ZLN-oiL+U^bK>V#+;R&a?#98?`rFauw7i*5O78Q$o3IIh3 zRu0cWw;uSu>4<}0sDQX=2lZxbZU}=QhX+xb1`Ji02^P_t>=}rOAo3Kd7X@Gk%O6+N zlZ>t-feX6IJ`fos{rLM{_K~fIKQlF&&7&uVDT+&N%b;!yFKQMYgg6Eh-jv-<3hf*^ z2NXSZ+8}&DQxSC(JByruW{&+T0*H2+1D~aIuEuK%(?*1G?A*%dmsehyHPRoS=~ED= zj@ZR{!+wKJv3*G2tVa9kS~d_6wbbjy-JV&u%s9@pgzHt%GDFS^Z_BCp3_Ck$@==E0 zM27O$@)+U*2@rOQf!u?SN`kzY?==DWki6+ePK~}Fkn0l>o;J1t=ZF1p5MoNLA_~A_ zjv2tb>9()==G5isgv<_rKQM#%GN?^LZm8D=KGh6&CaxuKgbC;i*M3H_GtBFY+E_8%H16(B<5sw9K2N_M{MEIFv0T45SjJz&%V7K7KENmc z(T;8Zwc=P8?)~#b#Ft}2=%M8Jc8IqF_SyjR$hW+9B@I_NtQ!ygRT}U6y?0k1xk30t z6Em;0OQm2DH;L;U098!s$K@N_USyMP$%UMUbv@o_r~_;la|rx3u>ZR7+tUE{vX()lfrAw06GF`) zayd+W`UO|ieW&Vp)+KPBRUCAQj%#2IIcTK)yk4p3gGc16Pr(XNjuvNE=ZN2v&_Qg*@aE&GSaRbKYx zzxZh=k^7_BSAB6#iu;>-@8>+@>7l{fyJy#OA#NTm%8#>pLeuOo0noXHGj{f~wy=MF zVh@6kg2YcDOqiKp);{n%dv#6&zxW;lS3bVvN`8-v{(H+(O4^eiaAq`A0$O4HBzJTk391!qHIS;c7qBq~|MT-QlON z)IXl?2az0;936G7i{{ih2S+6*wfk0V4ND9*N1d4V2Z1Znmie#F%qT1SZ&K$lgM7Ts z!qd4izNZN|YrUd^bz0}p*TV7c+3Z8P$$`RZ%k^vR9asKQdISJ|Qd-u$Kam`1dqmm5 z83mg>Nb|0Fj>Os;w#kPJqCGUG@9&YBmQl^PwgP`{ zUKh357Rm&%Ed0YyLzFFVXXC#|oQed~8g-!uD62)Kx+(^_*r1Wpt-Sz*Zv!Hk9>{GN z$Y%1oL!F4YFm0(Gto)hJ5Hf)$0<^B!NwY)*&v9ae5}XJ9=rZ2L7%$M!DSC0?LjVhX931DEGe5rjkN-X>WN zGX@y4rQYDYmKZchIvWgH1r^0}nnh`*T7`?BtwzGi=x^!)w~tiG1rdnXaVXBryip6! zMty%M1D^y+*y>Ay^I%()S4g?qKVbIG`O}dgZ(ggNKG0oV7(5e{h*|r#d1l}+Yw*q4gDm8$Q{|X?jy^T;#2!K}as_(OOXq+L_8-mqI#W;&nEZ?ITaaM|5L6?l#AK zrX^{Cfg&^0FM|@igNnh6uRqUeo}`-SHftjtYec3R)^_|OZyN~Gv(A@!sTw9W$&9|s zW^5VKt-bPbU2TOry3pK236EBMs}VvAMw`QP6!Al=Q_1Gtw!19)qk?tNx~)5igMR2* zs>WE>DQJj)DhS0~2vSn;Q==?Tn5}q! zm6NvD4y{7N_THLgnZZ(|xkXw5u8<4Og;=B5(|v@f8};>AL^D1cfT*{gx&s<@BS z^}S(B^G~V7W&X%n(XTN<*(#X)i9XlRe=~v42S$rw41}(>h&i!V#9{UIpgM$D=$Wxw zRKjfaeMV=goc$=?r@jc$JV@H1yh{zaOQ?u9ghR`GS+(iz)05k0==@CMD*3LIQ0d90z26WPIC{3mEx?#YRC5_*P6U@h z(=)hV4>6pyLOCv0%XfO^R*LY;V-}A|g6v{p39Rf>eo{Jxe3hj4isScHyN15{`7VGE z)cxWO%fBHh{)6^Z@N=Glea9D6Dw6*|8IC8=SI3vouV_Z|7b3EPOj!>8C{fNRRBlic zD}4?8I)uApOu%n&h`1tB(b2od^5V$e-+<7jW?DUL-{x5|tQY&6Ku>Gg=1tPHWtcrQ ziUTo4$?9x@ZejKwvl4CCDnC}rH?{?%t&zX~ciKIxvS~t?HgTf^(%tsVk0SMbzlSOc zu)RsA4OWU)`H|!n%X%gLk_F{_g`Z*pl%eO4Ar8ZKZCun}D3e&o+aCjn3-V`|dF}zC zm#1UGSG}*&hg`sIVb^<8u1q{}a?J+N*q#(X66j#m5Il4btTk=O9%2V9H-g4Z5Tb{G zfCkX031qZ4@K0mE!Z-Kr~p{n z@CQZSda`}~0BczO>JunE$eE@X^m1~Qw@ zWk}Kjh_&5}=5i$k0QTCyj4@^kxsXJnHGzAeC(tjmx!|D^U}jT<`CN>o8bATWX)zZe zDGopZEm*3=4^ab6Knvz7;ga@%X3&Be##A9(G#$VIKmtHT$_gFoH#4p$V?s`oOpv6G zMg&-bvMsLRqqnu+vW6Ui0p{08(Rtc$gL~3|GSJ&6LLES>oo#eY9POy>H?YSWU8hZC zdW{p!qV3nW=L*PccuF3k1-=jMeSqG64vhfc`}QaSq>WF3L$AQNMz8pxPT-dDHFq>6 zaLeSHCYlG-HND3gy$tFa-$Req0(DL9Q3K$$-*SecfvEK>GkdUrnI^Bep(bFi@ik|3 z8R*RPnkt$WBsjgt28eH@vs4+T0PkS{e6`Pv8?uH5fF2EA5kraqv_`MMp>5E{%w8Sv z!c=A41U~v4cwycUJ0t|WuxLmb0suT4+I|iZ1G$^%jGYCd6@U(=&WzE$^=+X;QUF3w zwWZ3W32}5Y(7~c1Y)ApPa?bYuh5j@deT@A7SFIyP|3i)=M*kPWyDCS&%AEM@Za~Ou z?~)XsmBs03=#penPDI?yx03XZ*2zEBIOdm}z+qBzvwtr6Cnh(+LJXl(Xw^_Cd9wBu zMIf<%iVA0&pa-2*LSfKh1AYF%&5Ch8l_WGab*1gUMSp~-mx|XHh7KteQk{#1zBq1veB{&+lungLOC2RE$pgtZSN+G{L-k*WFA5Uo9gmJc5d|rQT$WJ%Ccij zcP1$nee>#~@|HrK6CDM~xhR`*Ta~D;D8WMoH8Dl+-<%0GqpDJBp~#X{V@^1*=GaB5 zp$ynLDsDRZui~KrT6d0;z4R~5U1L_WEQbp7Vj@+uN>KsoZnFBXVxc`+ZmM$#?&kuM z+YI|WQE`kfI&*9|hm1Hcs-Zo~1v5DQrN?US=U9>-s>2^@cS@3^j4$C)H;IV;<;Nj6 z!~3F_X*?uI{FTQ-=6+gp0#RFZS$RraGqe_5P*Tgt03~aGl>@cb-EQG>MfM_v^Zf=pRDOrS;N# z&C;x?)1)4I&F1?VnA+mu{c*Yeab)l`eL30A@sn@HJNGD?<0nxWyJ)hE2_&H<77orgc-W;=7sL=n$?#+?;+OlcS!R$ zWnF$_)cB#grt06qqniB2?5$OHpBGgIx#Ps>3SePl!nnCQK>2TR>_xsOciy<*+Hw2c zyz4Pqrh+-3d(-13B$h5}tyrdNs$YWjU4j&Y zni?P-cL%P6Cv@CQM?0_44lX0P(GRki*?bjBHLKAMHY1&VF=Rm*qnxT-UM9oHZ)E?k zfqPUQw5ntY`w-T0dA@4y9&Jc+$HAlb6uNE@a|)49gB=3~gE50PeJ+Upr)-1)M~DH33=1G%>ssx4?zT%^P<=yP zPd%I76h4B|{0yd{ECxx`PQ6+ks6MGKe+QGBYgC*9$Kb=j&!FQ|97a-6n!>;!%Yew> z;!_+$GBzihiYN}NCe|NeoS4UI2Rb8V2VUoe-_U|bz|zW(wB@hpPY_QmJO1S#lvgBv zkIZMi&~K1WLablzd!Yl6w!2YYKM=ZLyAZkx3@<)n142KR!KdKw5Ordke6xm-bx?KS z_lf*om;lH(qNj`tmk+rPijU}gk;R&uyv5LeB9I1wH}UQ&AGcsvaMRU4{PNet3&an! zf;VV75$MCqQ_YvWCHLpvGTq(pvaK8638w!s>s<1RZPfvy*jn^o-_-+7WRgKu)c#LDmddZ!+3OZHARx&;@uU?%Vs zm}I94thLjXwd2E6S@!B#mG!T4Vd9^_;^VWU^5yNNQPbP{s>?@n+eR-_JD2y=d(9Zr zn8Mi9D-ShSR=vefU$y;Gl&lBGmqVIQQy@oqgPe`}kCOw9vHLN`$2z54qgwHabmFh> zE}QS~RLhQkFHbGEFGpTqo%KbS&qk0>cpl(iwCxQWX;(iPc#%F9cXsD|D+3zDXMD>8 z`6SgOczo_{Bj;7{INT4%G55AVH^;bMJAYQC=_l$T@T_R`2)3Ji+#EFCP>+?orcvRh zvB)w5dx7owU6iZsy_9^tehw~M@9krJhfP15cAg$?GR6+xGsaqOQq4}^Q_WnT?+)C` z9T^}x+#Gh*l?kTU{%D+9u`#fmi7K7(Hh{8CJ$+(A$H5{PA%CtbCw8Ct@52op@Cu!gf zWb2ng*Zg(?rTgz5eL3776lN?a!r8<`U{9o6xHKFxj(>}~$3ik`L#bO}ej)aZi&l8B z;oXF0++{mWKDjV0Xk~4eA@}Hcwh>9=Lx|?HZb7zpa%F5ESrx6f&{{+Ldt!vtD6{M)T6f{gCN|Xh{1kFB&@jU(35Z$kmsV9=?K3d zYebP)p{?Lbp%uWK`55VloDgb(W1>Q=2<6Z=L6M>;WDp2OP^={7aFv0w`A~H5$E0MY<10O1YliSLSIhk8eHrwpvULkmU%#|Iq+9R*qjae(6kNufj_av^)r z1X29ZybyU&{BWO8uc(|5y`Y}Bub6h6cc`48cu|odMgm6yi@+E=tO-!4knlm!;36<3 zcmN!*L%t)n18GF+B;rKs#B!yuLkJcE8|*mjFz+btpzfT4&%yTK8t`(UZeUiB5Lhdy z4uS`g2dXnjAFQ|ox8t)j0Zt2Kfii@sh4#p&=F(*w6bXA8LI1$pBNcx$jrfjy9Z&xL ze={fqaR7nL2O*9MPJm|p|7rNY9>j!H{LD1MAGsl({DEyyIBaDE{T1&&gJsge-%mJb z?LW8spxHx}Rx@w}KhMf%( zOhVx7RRj?VrJs&7E*2`;DCt2fS^U@?8RTGp%A6C+E7UWwXR#6x;?fDwOWTl|^~aDxA2k5d5ix*(M$cR1$x5PRN)(cGAM%!u)L-86g4)I=ro z6$2gakhB39j0ZI*~z&Z2njZiCL>=CDjKuplwMeTp4v zT-4a@$T^wsW(lJPLkS5*CiDtCDj?0NJR1WrntF)ap(p`TuAU6mXl@{`@<#jRl=UfO z6gVxt>clQ3?xIwc3zgli(cmaueS&qZziEm?+29tWoZBn|Iaw^J~R zO9;R3#|9Ey6Wc`4gx2QH(&P00fzs&1os5B+mTGKc*-0@7*B(I4ZSnk1+X81<=x$h6 z(;n5kOB=3wkY4d9<_W5uS*R8A1M$0M*Hq0BC{#1FDlAXsuX?upWs5U?tif2LY>36t zEZmLlA(Hgg?k{#yucg8P5_KHdEF$hbnpsa&LWzHy#yRD^BGoweH3PF($F0T}#nPxm z;$qmt-t?}7o|s34&gJO1M5@gpSvka1JD*-<`!OuJ%nXx_$A`G>Q5y{HsLl*kLnG*< zlnRHxV&ttc0CrX@Zv&eO_N{MTa?j)K_YNg7hsgvb!dqP}?fyqejn2`I*~<&-swT?i zhi7a>1I4We5*r+s{*TkX)1>K+CT7og? zt%Q5yKi;gmcNAb6Rw0>q2n$v%kbbS&1;b?#&+6QMyJ>YBm9*bavZJh2TtJJk*Gsb3 zqcs*TgIc#zdjUK`ppqs{NksMyrZY{e(leZqsQ?fyevDjGmkfznwHLxvW9&tew{4z3 zcDsIDU7qvtd1T*%bv;{%nEnPFcR+`83ooekZM=hNADdq&H(DaTFY#=5)SMnaR2)4c z=;l%Rzg&%ucyT{<;!N?T*7K~Rt!Pu-GbUXSe|wB8_LZLRF2ZuUJV{g=sc!pbZ-+yS zJ5p%Mlg&kN)`fkpDX43Wz)y@1C>baCB@hBAk*m-cHAAdPdL6R**1;I@l_XYL096_p zF4|4Hkbr72@&&b>@-t3_48!#kb{7-Nc2n9gCL`o!by!HkB*LT($4jj1($F`GaGH(* zSgH!EA@ljdZuFQrMsOFYN8vwW0lV1o2ASNr{yTK+J5#CDzju@SbY*2pxz8%}xapx^Xo z36^iS-SAY~*Vob`esg9bB(i<6r|aQWWW)DW8XoX#`*ACW$+x9$tgd#7HC=qNeKFop zq}%=|5=TgBW0uqPu{B~bqRyJEMNWmdXx z59;g>m6r)4-F4EG7rp#{G-e^_t#V4wifVLdY6}DW@^B3ea^+kxNplgx*B5{rLB>hP062mq|0)2Ui?qc)xO zlF~!4jhQQ*xgHebf6z?VeMvsB)P1=d-!16CpO&~*KwDtSJq~YE4>lBA!A_Z!{Sg)* z3@D42dSlnr+S=0BZR%>DFO1hzVj&0+`aN-xzGSX78eRrlWg!ZqM1pUI8^Xe!&l6un zfG3E;O0*r_VBDIXqDbr^Yg6!&9u$5U)Urbrm(N~2Y%DeaKkk%~KeuNs=q+1LQ^MX8 z)2e}HbUsJW3M`v~>+zS7(#u312oc1ODw;#x&&O!I(Vpip{U_doH|3fBfWmXK?uMyO z_JIVQo~^If`JNk>d7-W0fdc<$33mo0(A-tT&9SxC>?3LGtg)=XQPh-KIpq>(d`J*V zqq(f2&0jQ#SUt%_wK~&`l+RPRyr&(el-nz%P~7|iV8V-x)Zq=m!QXz#vZJ9 z@9~l;~{V1#NMhec=;@R?L!hKBAS9R z<1{tgQ|CXt%ALc2lI1Uvha>dp#I&H=ux<+13UFCYr({;WA!*7`+#F3;`BL)?A=QY- zfZfH%maP^zfJ)S|AAw>?nf;q!{%T9}y&r}V(sO}4fLXqTl_cw%SSbrrNrry5MjspE zc1l(gkxgDP)lKwP(we7i^Wr5K=vdTL^MW|oc`w&t)kb=&?e~;pCgle8hJK!} zO~=h?kN>r@l$MM^O?Q^j$zgrK3hB4vy(s))D5yAotBOL43W-R7-m>Rqji1Hpv%%ck zb5fc**3I;c^+IyIX2qCkz!qREo~Vf+p3^VX;IBQ_&oYg?-Pz` zyEv_|V7bUrmC<5hqv<~un1Qr}Qa=Nx zx|0hRG?~ul$aS5UKdPZL3&Tn*B%_cNlORB&OrjUtrlFO9!;8(2{@bwiTxzI%TKdER z&+0lTnDWmaBa$w~K~`l8B9&B?D{6#6NHJ@>RA{fh*B$f`UrC1;3u6> zf;D}y8%NmdTEsmc9NKGX*5z20sv#=V|3e?q--e@5SZ&1YQI~8PC>#{?~8_vtc(IZ z$EC1S%ZP~4+q=l=>W|9~m2#8Jt~zpSw!!~;CKl67?@suv-bI#5hFqhx1`zB*KMs;udcXF{7vqLbM|yRyc| zXnGIocn`*SoBRr9@pJpd?BkeB9$i*scCFt3ZF=OJFwKRnM9J+D$SJE{@8e;0EQ{vs$F5ZpZps(OtxDKoLgoJ+_(gAL9Etzbb?n~y=im3V` zI+TpKmg)8QFAAmuPv(X?GJDqW)l#}`MXQSJwu$iC6(!a4+M0iLPs_j67?qAJ1VBzx^@h0)THB$Y-|iWyN8!_d==m_& z(v0Ez!2(SNu!LEeO~dOLgqZI6($ zTvGQh&Z}@C=rOw{)q^v!Exy&e7=8$z3Ax;2U+7s|R##$WUBf1bUGNSD)Brgp6Gz_VJ7`)&Q<=vOu&N$iwEyy27=1BUC*psZN3F-ZY z?qjuYCXrMG07I%?;zw=%1niabbTs0OC^l@lyRW9IWItH}3(sn>{9% zrWM^)d(_I7T!|YvXpxE)lUa=rkFWfj;FyN?xtnQV0ex}=bu6>k(v~=BOp4ZM@OKH? z`y{2!kY%nyeVyim@ceK8?UVVat#Dtl2@-cD(CWsl2Cf^eCbyEdHZHhV+yAX&I5I2# zE>ID6bEV#1MRF|IUO}yushaNZhE#X>D(&habQxpP% z1?Esar&RIkXbA~eJD7o=%dQ@dpn8yTblgH2`REwr(Cf9*nM@hs477Y;b4gLPezefZ z3EgK9&vOr{&KyUqV^06O=mNCBUG?jdEm74$EKNZnesh&>Yx%`&Vw7UjEXxw-%tRS7 z*)+QccUPSs%SA(wMcB$oVwJ3ar@RCG*EsMPn#)Yp;eB!d>Y5xfK>O-9E%Y7-BqsBG z2qZfjneeV~kO-L~S^2m5--sp@KQ>wQSPF#l&40nxC0Bf(nAu_3U%mqRLtM!yQAQMhz zSrs%qiT0~txhZLzOzUFL<1JXKoghV4xITe9SBo-w2#^sQI6-#qG_jrz_`|6`>y(zO z>F;x6PMGt9U5hR@C>zT6A7)?Ysu4xiheDC{lS1P=2mTf?%Wmcll+2zIlXW!3dZKYt zJ-qcdCU?5ZfatT8l_JL*rAh!$zQB;NCAQWbUhS|lLT>pTc8QxBghft*{U4$Iqu7IC zFu_jqcq$sPDt@M}dZMlheE$}sph{+O9S>J!c%yP7jTStfy^TIMnvGHqNY)4KB|H)7 z@5Zz*P^aBBJ_UR08Q(Z z6zdG$OC;rot=v$%x4ZJWZ9S6ylGczvj0LjUL$A=Hbq5fIN*P6c%Z-0dG0Qp}5!Sgf z&RS5cDo#bZ)lg9|oe`Ga!WbV7Q5$^`jKgzoLAEp#A9_O0&Ik%APXDquNroEH44I7& z#(*}!9Ta14*1BGz?}LzFI_st`2jhg8=}SJyD8Y=~H#?cIc9NGnb6HtIQPq(j>o%gY zHA(;der=7`>b~yZ72KVrR?!@Fiim*Rd~abhUR4Z9)~2bM zS#&Ob?(Lr3fa~}2LoRly+GM8J>lp*>&WFRi-umpH6WZ--G8e}-ZACo0alz$JyzF!3 z+~10z=86uc<@dzhV|g$0J@I1Wgkdv2OYSwb|pDSuzBk?#1jbVoc26|q>{_tiYL@10 zQm<1?Q(NYH(^26O$XU+RW@?;r9%zM{g*}gYJ)ALaM-v))U&mBXc$Dt-n|!b+9AX#Q z5?wI%Ym;3XWk%I&Mzd3|{{UGvJidMlnn_2aT)fzXhg~B&32B5zGp!dF1cuFiT8-{n zTJDE8`mUevzz%Vy&lDFv_qfgOhGRkdZ0sp6s;Q4~v6x|HhumdbX_qWSWh!9%j^Ah1bJiZhk$-^DzumNjd#s@BDY2~B;dG#=(gbAw zQvFm4sQQ8W0Xc>NHNQU{$cY}@bB{-Na0Z6>$x^-msU86{7}bkaKrsWcIAsGDnXw_d zg*h-Iwdaegs!Zt~kel_U9ag}EB;oUyeKd>JzxnI@uj}TxXe}PIew=@duI?9wsDD<< zOtpx*{+#$v6oyuzI&4DtC||~D^5D>D#kD@$q$<)Y#m#t>r?YIo^%aCRk-Rxw=AudZ zd+nZ|CYev;^9X)cgrp8Nlhx;EtB`CrbYIUl*##IxdokEK{CR&za|rPs3}-NB|4rWF zK8R@Hfd$)z7MJjwTzrj*uvUrV%o|Tc&ZL|Ix=@Auk->^0x=Gc!bWQk4M9{c+MHT;h zImQxgfXn-D#=YcyS@Ck5kq@saOT2fJH3ThcxQ?sQeL1hhmMhvYmbm5MD)2d*Jbp-O zdk%e~@Hli*ftCu_VEziRJ=@}}5ooC&vo#%WlvIC{vCgLx`KqXY)EONF}f z)r=VjS46zoml#l2QQm5V&Y5$!d*Qq`Wi4h;TOJWh)x063?z5K7j*6_-?CD1YTn#D> zxV@X!H?g4Levlr#esGM2eKga_qN@fx|1L**hdTPWw+cw6nKG14dyOy1dwOQAe&!rr zI0_+s`hfmjft0TH!JEp%Y@~mm)^PqZu|oPX;=FNft+$$o=DG7hOtO2sABrE|%b}3T zX+2;3%ySepBb|@=H1r|dCsOk4`$de@@}2H~oc#8a=KpE%{wRvvzeh@?i!LV{gEofW z{DeDxocCUzx4Jp$@VxMTxzg9`42#N2KC%*ESIzltV_M8rg;s}RQU8ARHybow1JbJ# zck-V-)Ro%)&3gTCo=EA9K*zz>{h}xAfSWuUo9%7yH-o$H9|OjecCb&h&?xyt>OX9h znJUFmtoUst?X74sV*!Ojv(R#L?7yJg3nu0mei67Akt{^ND2wRCJBY=@O;S?s;WVo3 zN@`g>>uG^_d_ohKS3Ms7g!1QV&KC|Ns}{LMT@aB{hXJ>+hZtzz=)KQ+~ikQ-Z7ZPY{Z3gpKD{<)A@ zO6TLmuj;5JH1+>X>V0@tx(Qzj(N1iJ0zi}wxL*};lQoN=o=V|-tZ}Ru7ns5WPD7eT zw^V0KobLEnn1d*7ce^y4CMn8KgDE=(_fvx(`m_&5A&4*_^bwc;V!rxSw6((6ybk8c z`Ryall3c|Gda>MP%YE-WyR<=SFIwDd9^Hgd#(d})^d%GNMUh1s>NdExtPNXBcZ1)= zZ8l;%yB}!vH(?x`gGGbe3k^LCDy}Qxc-xBc99UYw%BT7tz=}JZ~6D8cB`jE zNG|QI6lKPeWs-s{Tz3a--Ad{#aAJRz5BkHFpy?ZzppW zkb*pm`hNlib2no%V>jdf?bP=9S+J9_sjayinTffjwSy4t?Cl&}`MrcFjLjTP%=tg#{|vKG zko_N*xY-I(i2dgT8Aw5yOx)4MoQ#W^oynAym5q#>gPDzkkCTHBNXE{}#>(<3Rt_dM zc79e~elFJkg%PC?MkEt-F|*)Tm5}dCU z!phF_Ul8$sLB#(B5&suN{J&<9|F`{r;gkQ(z<)FF-wgaW1OLsye>3p^0f?CEe-{~2 z)^={@E-X@Z#%|^k=BAEj=Krhi;9}+F{NL4u)mE48deXX7lK+v`0_6@m`2sADx?OL(LIGi- z6m?NB*b(CrW=&qt7QOsiJe?}tZ@(2v4SL(PyLA0-Y>!~;ax@~HE5V4hGJ3)_?@Bih z*4H;jrBx#j1?{VzuRYz4!#(;HByv4n_%6)^>Pa^G2N|+em!HUy6Up zUMp1r7rz+5? zI+MbUEP2Gsw$#DTIwqo$&jsl+ykVD=?(T|ZVC%RGZkF=JdP+jxXeWsQy;b)tt?iTc z>6co=mggfY(i=DzpXh&9*V|;!dRwQ+JAYFV!$#_I*-+?1f%_u}x!=IORE}tl3H_}G z(4$K=tKK?W{Ojv#abqadPpTnIRkFEizL1WTU|!UpGH%L9 z%S=h#Ao63Nob`S|9u2 z`f9iW*?n@ebPe@F%BkJbh_EX=5nXq`QYK!y)Hsv!kSx8Dh~Z2M0mCc^&E!YR^fIF& zQZSW=mc{B@kTLo1q!kivR+flu3K2te3pmKWGh%gk3(>$23vS%yT;DH~@PzdF;Xt7@ z(Mm=gh}{0tWKE6ZKp@NCPP<~921JgkT=zaYzXUt6wC5&fK~5&ckeN>A1`Y1}dg!)Y z_PGtk0!^>bhp7}=x^B^22^R${{775^meG$5yKSnu;_7Os&z`Hi$e{gQmQH3*O{6q+ zR=Mpdvd!QHRVZa68Zfd_u->n6kd3t>R1lNNshn34Gi&^I-_KDiW03-s_TZ~K#0m6C z-W}UN0{o+SzWBzcm_`Fz*-4^b(u=$M3w?als9TCtIC4HcZRRn{<^>{=On%gvqsN7+ z8?{V(8rf6dsrJ?2qZ%V+ruHdGeZq)~!#s+SlDJ`^eEtI?zT-d1=%0aYu&#J+OxrqP zsE5P{6XN}#$$?}nU_cHE6_>C(>Z!pRNxT~;d7u6Nk`zvgj4BLyni2w9HH@atyj!L?2lqB*_KtY-XxQt5MMbH!#ah#zWTj z0Mm$Cr#}y0^UEt|C)*khhZrlo8>)Tg*Db&np1uET(Ayp7&?G$0svZA5sCILqp_sN` z@sHaW0yK}cpJuA@Cy#inKi@Ai!#9|dz&`4>k@N!mHaA~wW-@ME9nUvwNrGg)imVi+LL8}!l8~n#Z&>M5^s?hG#Z$$e^~W)MJC^b6bzg|VojERN9WAdvFnn|2tV79eJ~dykj&&3>U== z`1QfxpOgjTu1Gv)mdHIn8EZ@{Tzt7!dF9Mg{*(U+!;^hv3vY@ZT*zU9y&}Kv>wdhB zT2am2rF4)wox8+iLEsP?^qnx%;Gfec83!6=_145V7nmBly2>e#wki^$FVSQ5`)z6KA3i1>xc{GpXK~!-C8nzf4)`L}@Ec|Hxi1H+^73{^SfL7~eATi-N z*@ur$k=7;F;$|p9}!*-MV$Sh~rV?O#@9N{#0ac>UY|zcVcByM%F~MI2q6)jo$8B z<7N2{3G8i1*d8P%ytrK8Z2?E@dquYFHEK}Z*Q@?X)vPWwT1{nL*#mvZrjq9p^)RC+ zbiSuO&aaY=3^=Rj#Z5KfN3=7@Vw+J;F&Py{b)~o52U2@4=^S&5l2S$w%iZ{Yi2i#% zk45e(VS0zUgiTrfqYB}=l8uywMQW}nfiwjjnPWx#@K1)Fs5KT-h5E!p4q!%Cslzxt zc^8~3hXKfrf3Ph=acw}`#ATC>k>R4{IL`7B5-qOU&U9AJ3iw{9!#VhOfS+S=mSPJf zawPA2B2zS-Mz^kSw6SSMR@4f_*tpaU>O+#`eoxxaFN(w;ponb!bpG zt0lqAEzk`S4P)FgUrqA$2a*|PI3wW!Lin~6K=>Cq*J-yaPl4MCK5@+O6v6xl?n3>Y#vp=>YQGZ&S z7X{!9=DM%*R5kcg$Qt=IU6S_6t3TS3FApBX6c3zErznbz@nJy!_1Q!=?cBdYP^7j2 z8?G`GXnC}tB?6`1^*&cVdL2AHui~6v$u-R?1pR(L43xw1FHzAvt&(^h=@M>}^`!*ls%3LJG`?`e(vKd7(0O)OK(B(LPTGiz0{y;Ya-v0uqWmH$7xa2zjvG$A_CyVQ z1eq-=KB-v?pSy-t^fx~{889Y_C`ZU{Jlnn-1eveDYqi07LlFLbCn0TUd8$raKbnDI zQ{Y<^?BtHzMi6VbvOHWA6II{MWc%Tv7{u0W8pIxb?Wiyo{%!b5abTN}RbUdCFC!%yBvoWEo;yd+6}k8Nq*k!W zTF`-u5I}8@`t|19$%uuY9RY&EmI#97nEsx8!cZdMn-0k?M7r^zb_6RPwODXCoDtsX zwrDqrrCNs%-de0uVCc-cW39<4>yl@EwNJWBaJN&`uhJ`iNrTuxB(0Jo$M32G?<Nu3^rum&^Aq%x>}fTm^ynL2ZRl;Q3hAcqOlKI$op zROM>Ny5+DRh`2@Zh0+siGLjVS3m^xAdDj-8&}rS}tK^%qLh&LmZ=Bb+EqM~5=Xivb z7!i?_7j)*}B6kW+TSL@9)c=rH20Ojrsm68@0A|MfMh&h;?k1^2xnB;y20w-TL%9IPp=D~!h3dT$DZy?WQ!%9}bA)H9;>1qDK zUkYbE_=*RGjf;E*bM%kv*M5Wx8I}}w++Q!FumnaZV4fi0Xv~z?cl4_nG1)I3)xfXB zjcER|%3BF>!hppr~IJ!X?XV!_w)+{e8W=nN2vVv_Cv^DK1>Jc?&Z9}Ubi+! zLPpPHHbm~50(*aFP!KJs$?${yO7Bl>cJZAlM+LeIN@tzYkxHc$!!miA-BN*3ifNRH z)puEpIKeh!jt+g*=|93*_XX3my-B>o>Tgu*Gen+k*x565^;z;7^~*okCG~V*td=;h0IQ(gwmaGQ#H6)%3IgD$$X|(;oxYJZzSkixg_s3Zr#R3vw zT8Cun_gogQfY>}!-djO(;W6b1M4$A(1&z*O#~qV%Ls@He5zV%B5Q_pZ%F2?_h zy|)ajYstEW2@u>RxCVE3cXxN&xVyW%ySqz*26u>t2dYt~#~Z~V1{=xa!)9g2-nmvc zcU&Ey=5|>iB>x~poS&|PVu9oJbTj8x!g~l|68|!9otr?*aaGPHFbY^zAvrbdux#^C z_;H&NH#+1$0pf=m^Pui9dsR_UbS za?ugTN$;DsVn9>7l$ng^`?j^Y-dd)W2s;2lM{dDh__UiyPHo!s6wcgz)vQr)P!XH{ zNTMZ=7Lhhd@B%Vix>0K4KksMA{74QkwVH6Yu@^Gnbp-P^V@IR*?3%K|JMlk+z6ylp;BVRT|8W1RO23JH zk}9NXBzLL;B^WR)1BcE$Ttpc0V^__ zm5K<%;qi{6n;1|-imFq1*q&(g2sC5T1f4(x(rlQ?@8p2hXYj0l?9 zJdZv47wHn87ZwfT_W>i=S5Qiic^K>&p1ku~T2odY4u~K2nI^{sc6PlI9}rroh^t43 zniVih3bNvPLLW(u9IDXU!;1X#$)YpcE=ecF44~|!Br^2Sei)MOtBZhUcq(Abgvm^n zcpk>k!&-jpbs1d8SnohINDxZx8)7ZLE<0-@!53Vo$w@-&4zA)18!DnIppf`1&c>3I zhSp><$#?n+L5PBXH0K)V3#ZL(FnEi*5)+UkMyeAGPj$zRCx(U#+v=rUTp1Iefn&eF?B~GIigoT&wcD-TU zlS}z=yl!y6wefIJ8oDRHoyR(Mn383=u)GrJuW5+j`xDt&oVvipGPy^I_}kp^FWdSI zO!V|9v*{fm9;T1a(XV(339`G#iY$lA4+SXZ+Q|L`w@neRhsk0rvt z%f$WSAvm~MJLu6?UjsKI|pl@K~n$^-m>?4F6$n>tt{F8-c>ekQQJG zumV^+I=tP1{-5k^|HQfYr|P_2cr#}FoADbdghSfK$jsPH=xt?~`xg)47Z2eV58*HT zgWnH-#h!j4@C$)o2>e3e7XrT!_%C<}?0-x${yh(YiGlr}cnFmmtI;c8;yQkmZ1n4h z`4u6GhsDFi1Klj{^&Mqqo=btZ)WQm?SQyu*v|_Rxb2@OD+sy z54S<0iBh3)8I@6?P;5HgWM6Um^NEWAbA(WO1>JOyn0qp% zw?DYT$~cX8OZVXXiQWZMA6Wfx?cmArmXqdb@?1GGTBi=?`Qi2M@$+#;QYJ-m=1yL? zaQL&Kr4iV-*K~t*W3&ZjMaOGFr*AAL#X2gh#Wek3z_c_?B8i%aA_~PMh zErAh%F3Pl24t}|Xpq`lU@Qel&La2UbPa|Bp$wbYBq1i_`DjXKyI@ z3r7PXOp`MJhhGSs{&i@(XaUeE14G)Fos3|!-;9H&P#!P?Aue%vV&vVmmm3D1zhM!b zU*PAP{-Oe|<9K;aPkDY-d1v9T`qQ345l%AYTt;+d`2Y*&Zkko z(FFSsA?$ny#gLU5@v029kbJ;BwwiLj-yt=r8H^C{5k=tx3K zg@_^u5+Mg@BD2P|Kf5I()Pdz8g2-F)55(kcKisK$e5|K_f(*r9c6g#t|LmLYf z64ONoD-#(^OTWq2dk-Vdjs=qJE zjHY=$B1*vny4aWi%}WQ(hI$wW`w^dgg%o0e#G#UjJ!AYJw8*0!h+;D1vrY%^Qpm@p z$%o}!zq;=O>>GK!dN?r{S*2^6bw_)>(X!8O+So8Zy*r8Tu_%nldsEthP?k_+q3bou zN08=MrK9KsaZQN016hL66)(qe#fcHormNGG6}kzJRT%1ABz(t7Wpb8Yzim8_xPzOB{oGLT~dGA{? z*1+ma)slEP->&Y`!AuTva-Y}x1@LOzz1_Jj8z-JB;`Yw)o<<=ZXZ^8&&b;)dEuf|9p|!a+MF)~n7hgnxs_wlQJCIU6OaQ&?=6;4Tg=>p zCnkFrdl6${WpN2c1&Cqba$`@jIRv8fARx!ZRyFagU2^EU8|d5{bzFGwmN)`yEY-W5$)Bp1)?)N;Zq9>p!MP+8{14P}(cT^L{ zay{LE=mJ8{-pwY>SW>10a-^*jIV^fX3jOeQ8RRZ zl%D%8bVVh@Tv=s{wskkf{4yc*5~Uwp``t++uVkx*L=mppIEa>37j+NN70Ks2f~*Ab zA*J<&lVaqBOV%749l`59kK2&-&ZkdS8QQ90PE)H9)u^jp?}w%pGe7gMba;~<&NWh| z2e(sL-|H_^m5<}8f230@pB&mOu~DCROu6(SG_E(97%W~6 z+_8QijZ1%h1)=XlzDJAdPiTL!AU#*NSiQKSe>X+hm;Z&@S$e@I=&RmX06W7G#Tvtp zrenm;Wff@obtqr%YZWWfMvJ}nY0=2JriKg}o`*c`>(8<&{NePbpAhG6foso`$AUw% zbF#O!Y>yc;IemtUTVHm^pWxRSFTeHqaThB{Y>AYh^|3{Mn~Ktd zl1--VCJ7X|2Ru;yf*ddLoe>a6))b-QF9Z^k`ncKM}X{SNHJCAcU|swBXij_vfS z**IPC9?JW^Ny-Bodf{!@2T{_7g5>m#K{a7Xkj(C%H9jf1=Y5nBVi5koT91ts7O)7m zWL*Xc2pfbTrEzbl$artqhR;BfJ_013NRk}ygEZ-@9)IwejuLH)_hp%3sP$DJUmNHx z<2Q(?gWt20Ipz!<;CE{clAYgp0ZN<} z8|JXc#$bmanL`5u%T+3%aGye@Wett(xoJFOkHw;@V~T^oxctqBmSf?1taWA)Y?pap_MSb_ajDzQdlI{(opFjr zu@=?&-6ap|5ko{--bP`T(HV$`TuykXD7(YTGeQwJ;Skd`OUG>FO@h*ziNo%v(-Hai z-+HGxCOeZDSg6dGSI)?*V!fsf$1x(~%xj_;e*^Tn^ZScxxo7S+ZR-J}^ z<2qp{z4dnQhUb0GwZb(s_r789`6tc`jtlYA)kVF6>3r0bg__#k4@RI_v^6j0`i)kj zHOiuon!aqNw(?&^7Zk~+g00Q$GN&e)2i+`2Kk;^kPZUz;x;!}3QI8!6(EIk51241B zDu&pahltz01^}{DR5j2CX7kq9fbBLoG;ICM?u8!&$xxv0WF zcn6d}2Kn}1@Y_|NvSWmiD#nj&o1(;&_T7)J!P6w3ahntS%AS1xVNyZb8FN;|;k^@iDjuR3QzVxL6gm!I&#MZD_C(X52kRiyik zY!Fn1qE5in4d2 z@w<~ZhEq??)BRM*1E1W&zE4xNO+DTy5pO!a&The;z)_~RcKg&c$hLj34tAZaB!b*%e9sZ%mx#ULfHwIm!X(JvvRx>=xEBSI+>$4 zGq-nz?VOH2=W4Szt^<^uha=zPTD1xBwd*YoT;0~qMY-cEiYXeC)cn9m?bIrpVI#wz zJJeQ{PiNHz2R`nQLRf{YlTAIG88epQ-X0C)MK(38IA48`#%R&xOO?p4?nKItaC@*y zHo#b@FTp~Eq&Y4dNBhQfd=Rqt@eEIg8_tGV=+e@Q9qh?KkvI2vNq&BL_BjbLrEda4XX!B}E*8=38{?6SF)zp?zhb>r}j*N^)3n?XsDPbRNy^3s}FZms>$9E7W zXOPob@Cj%7*5nDB4}>pTH4oXmTIbI$d+CPYO5`W-h`^eq&;LZ zu_b}|i7!Sl{ZAo0v!I`HvFW>4gO78;^*l7*q|FVfozSg9JdHk%xq9W?h@gzwAj@n3|3odD)2D*@RJ~u|Jcbd1Ul}@7o+9b!bk-Fiv~F z=MY&sac(T*?-#L$y4EsxvjE#G+yuPy&E-nN(2|lc1S5s%#KQI0_x=(8QZH#cxj|-` zP*{WBXWq0EJcbr%>p@8+c|H>K{5(ZtIW*0_ddE1)vsMAN#GJx(#eKi#AybfP&9TRk z>XoA4>3VrFDW%>MzUYm_kMG~m{BYv_ax|LZa7j;XD^nEEMW<#oM;AHyFd?<(^JOe4d9<&}*>l(N@DU2W@{K zuiK-e_VP08H0*o_NQBXZFA7AHsNMt;^G4TzlgXp(EYI(@+;!{Yv=c`i0iM2>#vCG z?;+N|{QkAd{tJO$2>e3e7XrT!_=UiK9&u%0{9}^w?<1~^3``7v-uYdrz8t+ggy^-P zdX+KUUmN1J-EXsvVY6Y0eV32_?pPRKaTew?3VFix(o3u`kN)~a6ua_xF-E+QEGaA* zMXxbmyxt5dmghR|8#+#wy-gdhy-g|T`F5^PQp3zq7?dd{Gh<06EoqE`U+w8tRxRgD z@`S=)lWfc4)gj-b?i>jyX|65OAOVU70))f`IrbHbt5FzSWQat>0StMGRJlG4Vv)&E z^QBstOM{zZdh zg-b#Cxw}0>(>re=Ry1f=KP^RS+z()RR7?lDYIh@QWw{+rwy129MU}O5^o%;~^$2wN zVv-8HTwkZn*G4zL)d(73B;!hLLVuBUetpz0F_)CL%h*xE!KE`nNS5xO9 z7<^Y?;QCtT2;tQVRKXMgZHuOYnX1Z>r|621o{6bQP_>@@9h{9kf~(=ZL0ZmENZgtG z$W(!{zb#-S>uW7zK_CBD$)`evkIV`>ew0K-p3hp@)F=YE`0&<3^%J|hqJDaiM^Jk* zc&5$_VS#Rm-BbvjZq76aW?SA27&MS5n0#BOh$6X7(rm z$;Fa%MTWgkGCf$dFcX2}r|G!@oh!9p+2&S4BBs6|t=v?l%WlFC)*ArUT@@6-pvlC!@sl?#@C1%DjD|4 zm`#X=3@HFl6fwWFFr7+)-7N$2-N`mf&-b!qvMgDbZp#l&6gQtVtRR^;K$JiNzG5@# z5eo!;9I|Z=z4X+eG{ay34!fEr9$gsV5$J`sCds2WD`D!^8NS0(IpaRV%@ritt00ZR zMuwMI=XPcIC>>4%as*VLxG^XpWMEe9coh?mnGI?21W?KlJ7iSCbHsHA5Vz)dBWC+| zXFy=XnYdvz({&%;+j6jiZVA*1e=(ZXPW6|of@Kc0B#}WOMDl%Ka)GnM8Wif2JN_g# z1=PLg>NNX4(8P<7flNRm4?G|Ugv$~wH2vbpxm$eL!ic3m_9D^9sH6MA`c1!2jo?^O zYd|ff60{rpD|_Vul(OdH)QHrPZkbGs>eWHsN|qLB`18_4U>+Ny^QU4+c$l`r%W5)z zX4`D%O*xkJF_K_MOpC+IHC&R|7d3J?hKg;Ncg?#T(uECD@(|xJE7Qm~i*;4k3bKJ@ z-K*&x&J^fv6=1mEnOWguV-yZ|e!vL<2TN2MxQi`G)F#fEK*jLw>89Ap)Me)xk%ZF7 z4%VC4{JxQ3(N3r-o6D>J0&0928gI{KCbrESl2vud$@i9mR>75{aMkW3uR7ZO0YR|z zdf#0*X&Xdkgj66^0W-g$yi{KyoAG=t(W`O^n46yKefq#j)1};_K})j^O}8!;EA$vI zhAJs7Q!#O2U9T%e@@)4h8lSw7zPMob#VDcs+{qD(&I?HMcTfFG0mEaz5e#3^0`@cQ zm9Q6KQCcIms~mOE#e(|e<9U?4z)1MGX3Z1TE$P<7Lv$}J$7c&I5vNRqxtrF-VZx;W zi7ox67|8>|bH&eSVffE?GFY!|PuoGs#a%=uoio##H=*TxXNoRd)kn*MiqH0p11+;t z=rgm}%-e-C_6jXM)#11tZDA`uhW#k8sGx$QoLdoT7@l=qeOuo;bgi~QrFob7O~>V- zWiaa6zMU);_e4mq(--ssKv}u=C?kx{V7$h}Czo=NAO`|viQW;eM?2Pa@nKReDZxct@9b~F#l3G!@ab%{DTjVCGT9X1JfFIR;-SA zD&ED{2QHW&Mjatjoao;zi>|`LDVj^K7}!wzt=6s5nLP%DaGvl^GL$pNF>IxrMvTYK z29|mlxgot}WV)xu@T3_?s|vtUCJ;yVuR`{n8%*?t9oOO4U+%+K3O-hc|3H>eKjmqW zh9rAYRPM@1RicZH^><`QdkzoEfZm3sQkq(oMQcnNgM8rF(IxqmpSy5&3Hlmk^&~IN z>hG#kI`%c_ZRdq`d%t+5u=oOLYu-AG;dP*q6no$PklzdedTvRjJI!gj-E~=&D19C0 z6}XbahbV*P8jHFEaKHWCsqoCmHXH7iqmXA;;sKpwyt^lJ+K1|AmXqpoU4uwblJeVv zv$Alsl<70i0u*mVVH!Lq2 z*^V_|$_+iH$(5y1&4Z{Q8fVS=x$nY{>v5~IA^KuP6weOvAMWXgZF~0_U)j~3Sc8nl zk%N>Fbyj_L0y`c&OX07TBleVeXYsb-RZXy5*B+vBa!NTOZtbO7MD31u30>Ktm7A*D z6O@@Ozp=xnS$w=m3@fgp&&Ijl%`3w#X462}I5BTB zBp}gHM9mz@CB`^t62*wmSqqr7-$RmI>z^fL6O=G#%4_ql2&dajy&P$~n#hT~tHbNS z-%CHuemJeJdT+>wh?7y3ie=!12wmj`#I+$iZk!$dMhmj~!P8`cG>Q zcbU^58Tpv;$JV1gYo z6jS$1iR!P)h)}d}x$NmtqEkWpkz9igQY)woK;&Gw-bA!1`N{2@Ic%fnR=HQ;6khEE|40#+(iP9|tdkQI(0y!lMK&#b?KSsoPWRB~kXFNMF0y)PJn59H{R;YS@GBiK z^L1o6uG-=4PMC~m{<-<5YdVIy=rV5ZoN&T^k9&3&-t?qSKU$z3;T@m04iUjhQ!?Ft zv-)pLNzFB_n3GIWg(i=5ZjsOWT|*Ct()%|nxY^K5IBDYj8dN@K!%o!1Kv3REIRd;O z!~jt^!|BqS*zp6?{JG4W7ZLzX5o}Md`k`o|f5-gZdc*El?8t2J$zBAUE1h~Rx$mr7k4~1_iWEK*_LpcN zs4U~s3a>|M8ne^Htu5vx5?S3KZvc!}CEEOyi?Kw-b|*DqhJ(XXld$RrcKbrMUy=`W zm^WIdVsVES%?ouG5Qa_m(7%eRF#R#%`4v_96;=5aRrwWF`4v_9OEl&8!(S_DzYzF^ zz%K-TA@B=gsY7Nh9vbw=3*CJiV*i zDx^Ze#HlvV+K5WwRY}t|P0}P2_g!A41&@kKyQE%SgP8UYDp%X!X5`RG6g0!!G(7AC z_HKO?t$75!$0Dt4t!%MV{dFGLtK88CxP#6y$X`TDPN%+2P&3i6jahr!Z3qdZ?uP##te zr0iclK58L7mf9r z$3`=!B_-;uV0+GHq$m>j0R!-lZD1ZX#wVKFTgGyE3j?LbG_hN6Nf5u$#dUtr=G;kT zyF)+GETuJ@&hr4I2wyaJ5I8mPZ>h++e_VX5%j9iHjaut`lM_~bR(Z+HYIjmwsxM0uR^1C~TO~ay&iZXwtai z+!hp$$_}XVMyq`;Z)VX)OwsS1Iz>0$!@2jEW97vlT!5475G>c-g=t;dl+O=1!Y6I; z^qC(SK8UG}R?h2n@MIom5mCFWRha%zTuW0yWKFuQYq@DZFwXTZ?)w zPrqB&Kw2K75{Aij)(c;rgCZ&kcnlG5>liA0*fthKs#1?hL@^kH;!m4W_y&r0|4?a? zSGAQCS_H`MNoUq%A(fnf9K~VOnpL0DOfEbM#v2&bqZk_x*iXsV{m|)$lj#q>k!`r$ z$(~MLU9;}E=?}N4@YP>9OJCVEPVI|r%B{0agE5vubd@|mYB$va*e3Z1{f8P&u~915 z*{{V#ivigFV_}TtI#_vuYzcZG8ocnHfkEW>Q$KQ;n0Tg>Y`C($l`NxD6~0q9kB8mr z61ky&TfQBku_?=5P3-3FB%*IV7d z|IyngOaw^E$PVr$0M1u|(~^I}Di7uSjw0HK`2d0i|IA{lgF)b&*E6HVB{RQ}QuJOD z_gLzOQyet$-aXHDZ9CkfU&F0poX71=#$%`R_(9)??dykHH8ajqOdZGhGWMb&+qw;{ ziJN0=ea)#G!t=f!_VXRRJP(w2RVU|PGYe& zS$?|*jeFcXyk-;-IU=c-4(uRQTle}(Y*(|A7n-x(ImT^o`6AotctYc2tHT>3#T_c? zinid2_k7lar~B%e+gQNt`lVF76I6$~Hp{jy1hHJ~O{(-`hu7+ioAlOsY1gwKaAW2n zQ_uN@>Au6Rv6(~B6{>SO%OE{}J3R{prMaT<1WEV$EsMiQtwRjIfA%h|A28mkCkS5l zP0^YldWopZk3492Sf{c$Vg>o*m;PnJ+4NrV@@jWUz_SW&-5n0hyNe=CwVA!eG-m*_ zp8gYTnYY{uRgx!L-^YZEF$(L&IM(!)CpBsLc!@C5QFuS(*H|iGQpSw+O@dU<18WYD zfLz=l%=$EhcRUCK?F!xv{N7Y>%21}6)V+LQlPvFN8~~ww(PQd*rlo)h{a{=YM%3Ey z!EZ1NknI~BR*fGPfeFN*+YvMQ$U{9!X8pKdzwR9$w-c72>}0XYsa3A|INpZYYkw=A zj{Law43VO(2-+s|@tgoLK?2%maPx;o@v3-uHJTthStI%fjd@g7Ak3 zeU7AqTNQm5%4qkhAhXoviE-Pzr%xW45v&(>96ZR6Fq)S8;Z~4tJ(0{M-Jf(gvZavi zBU_}>oY-cl3p{?{?1m=>b}e-VlgGWgdKYq=FEQ~~5UOSG76)p=TAB24e1#i>Nze%@ zf>Ib9z8m`O$SVp-IqWT@tqD%Fjf{D zFD5sAu{C$`LKdxk0FIF6ftD2}l+Fo#k)f&yT0=&%AJtZsg^he{(QdUHa|*6p%S5$74&&>g-SI)u@Co&#?$+A=ezohR3s5u zwVlEgl|#inkHcm`S3Rs^oxv~wmKhBT4-X3uLy_%piRcj}6jq>nZ}}A?l4@2%4Sp+M zY1x{p7DZKc!A!i0cZ0&+VZ})c77>lJkJ!*=zkvDS_hTi727DWvwKdOcZ+}8Ln{Ltg zrVBQNTX+||s4AMCmE`v85R1O(rN_CHpycT0y1l|)eK$Y9)#Dp}1W~;y&*MBC7E?N` zpzqv}R!U9Zc`Jf&zmPt(Hm8Ih?TQ+6Bz$5FlM6PMdC$|mq{uj{nK;GO*rk%ui4wwx z?Kv^aZ2TTo(nG*0qXK9s zchIgInffwt@T%A=WOhjyS%`c|tMPO14>%Lz9Y43Fz-BT&ZX5u`Mdaa*EF#zS&6~08 zyAKYy`&mM(bfV`C=Ij={^J~3-=u>aWujQg*Iq8o;?4HdsN@#!)39N#gTs_xQF^u6+ zLtd;#rYKrYaGdw91*7iK@hN=1wgf@6xxRS{VLB!bxPiB*Bo6a)peb)8uc1m(Wee#v zb#M4e#qN&~Y4%P!;>I2t|6@uy(-J<~kdfr)Q3Xk1!Rh>Md7_B~sHgp&aE~IF z!1595;oa#vCpSd-FQzOuW_-8@t`pD$_hMSxm69_)9$%dsF~YRUkz11zJ*&@0g|i=M zL4V{jmPp*9;nBoKXEA+ef^cd`%P=pB!F>u91GDg96+qikXb~yGFmSCMylM&nyPJ>} zL)@f0rR6nlQBw5#4D+mBT^FU@RO9^N3bpITV;`16yb>(8@I4(Nb zE^Ed!lW12mwP+Blh1HP<%Gd3EVNl+TyoY%(X{Dvw8kpxZg&X&-v1f1K4QWl zoOI&htyP1xSPnKyINcRBXz zN(i|rN#lb#5oh3TZcp^as%)ZJvg2%Zop*ZBS3HUry4#`qV0hiVX5fco-tRxw1_?zi z;@41Qb*SwOwoYI&n2uykpsnk^VjPpnBuP1WFz%JrWRodLxWqyK^d@?5LOTz-Iuat+24!! z1;3=}5ic+zS^rjpaX{39&a2&RZM>)*dH>nr-VwVKg(q~-tngw!K08%X} zx=ww*V9w^ZM~!h?!#ot?EtIu=;(w77BlB_gGTfJ^=Vf;r#MIX}?Ym$*Fw!uZ6VNx4 z(vr&TbISglkRdhyV@iMlExK+uBt|6*+4>~)A}Nseb#)e0JZ zZc}l!mB*BT|9u07QMI5MYe8Gc?nKs#5}G0*u2}dQ8rj%cqf0MjXw!Yjrmv-r_T&rJuagaI>VIQYJtc3hchtt)}?f`oh=k&q2U>oQ2@y#G+@kR&ZI95*(^b@1`a z5=|kiKotGb*uFCWZ}B3=E8O@P#pD>f0btj{aeWjS0PeJqbHYh!j{{NEw;;hIg1(K` zlT35I%$hSKefxeJ`C<#cNNn5>r{$J$ZsqbzWpUdy-Svbt`w@ea^`e2{9rpd#VZQV5 zNYVm_T{4 zz6D(R-S)PHc)sOn4GYbS&p7o_)N$|C%1lYKoANKuaJ5L%40I)eZIygx&+MmxI@jgU z42@Y;P9^F`u_-T`>bAR7%2#tqnk5MoQv2V#((SbSrfH>I)Gsfm*+WNkc1aU~r|)M#Os3c{(t&q2 z4bm9NR>l*+OY3c9Du`LwLMgPxCqf|geS)mqYI3AYvs!tu9C?4vKY9}q z#gRI&#-Z#=t}0~7*C;Du;LAbHVj-LecT1A*VihS`xIft_iW}P9_+3-Jg%Zl3zu=uJ z1Lo(sSDzGyOPbhW);e5eiZH>n{e2=iPrld%-oNry$fGmE|_)$rdV z6hBzuVCD8EsMpjgty9SxV>wjs;8`i5CGKUHGSPxBgA6WL2t`ddioEB1oka3hUUCI5 zM$ajrWk|0FF0#VI)d7-?IJ>ffG1_VWW?Vk3FE2YQ>2^Kr1BwpB8EWhPFM{$+e_|p3 z9+YRLXZ*+F*nc5%OZX;oOZZddmhj(++!Fp@irg~MFwoGy$=tqGnwf!yk(Gg!f$gWv zE!~?!Ej=wg8#O)iPnlafrr)DzzvYJgUlF?f>G3zg-2XAj?Cio;T@V25LGcYI;T` zItC7A`Zri8>E3?Q{msdrEdS!;w;BL`=9&GA4<-fi_6A*w|4!t2|r;l{97mdi-(^LQv}%k^VvTd_O~|t z)S|YvvHuOb|EVVb?C7_e{N=9yYaQYLoMQP;P2zg9=8&_uF>*2l*z@ok+S?fDJK_ty zeF0{cmTwTVzl}pWd@@xtYa<&M2Xd~T{o@b&|Ls2fHr)S{{L_D~|8GS&glr6*ekPn2 z7vgbx>l7Ss4+B#sE4-CL?_YHWt?3 z{r;)!|A^DyzIX*8kvBbb+J6Y5|EY~{eeVC&=Z^;ezq-Sl-(NTRufY8U*DtvKD+K;4 z<6ph&7hL}p0{@lquio{)2G?I0AcQV&bIE@(on>JE6aVA)$q4>$lUxN_nYRg*13n}D z&nd40t*kA;n&0r}TfQa(`_Jsh3bZ0-j@q=cPL7sl);|pyeiGIdXyx=x0B;i}0PWjb z_U{*eJL!PW!2VOdS%LN^g@jg6-%s*-RVLRvOU^nVU7hWJI2X3b6pmCNBI76lS zgI_B(!{H+@Qj^*1v*Z1>^NR;Zn~u(py`zH@Z=Pu!(I4>e1=|5^VN4Z$MQJPsy}AX1 z=YRsAk9mPU1j>LD5#z9;>2u5@Rg_vmZN?5@OwM_YCky>a$4qC-tz~9q&sN7KXvDTIpn$o2GB)2;C7{hK z`tsBOMGuhgI!(F;qp-CQC~vEHiWapP?on6)I6f!B9b6nc3kx6!oX8N{DMUi%5A$1# zV9eFSqGn5fP#rWiPmiMc0gYcEqDj%u--x3$bq;|i4AkVmQ^0dh9^c@Kj{^*E+$9Z7 zUFhdtQ9%d|b@FMX5t?v6QBoJO;aeF*X_L82*#t zz{2`Z!$C<&(w>+AW_b_A%TI1`1AZ`-v5=y|i2imx(kR{AhGi8ZvJUX=`N5I_dz}Zo z!~C=J83to@4ZNi|@OatEOuv)Y$}JMhQrn;iuaJC)X)*QaHnwY~$%<%aaYV+%t>OkMPG)T1E{TF|B?hLY7vi|KP3 z!VGGqu=?d|Gb`UvtnPUYSDunJCgW8jsx^fG>yKt%^iBFa+&;q~gaAZg?zhzW`Wl&LA9 z^rbKRPMvT>3eo&LJ@0WHQ<8|jj)i-O5v(^xlLfHF(hZMYBoYcC*~ukMuE}ijj3&^7 zb*IsF1RcudTFm7s^)yms)pgrxgW#1es!@4;NaZ&PPMGxRNF>et9CIi;=~SLN+HV$dRw(bG*O4~4 zllR5*seOk~;2C&4K+y#PX4va_zMD3Zx9mqgy}2vZ>DVbYupu<`JBMiXI*5$iIR5i% z#-@OB55V%gnE<8C4)PcyeT85avtup2XclFluqir~C2;z?FDKcH4k3hC*J>tc$oBYr znUc^XA=tUtKMt73?5%%bIN`nus&ao{)Vnnder>bqJMoUh1pnd?rfyZ6Sfl;=wxTK* zM7jDeyZN7%RR5=^KHVRbL$NR^CHi6gxMBFuoqeyIs>g1MdsXfViBA}KRzMGR+#`P64xelmFOc+n{pylL>v*^PF^Gu6wi zz_BSBhNuR2Jh|&(kS6K*VnnG&SDA-3=eB6&+e}b(_$`2B=ujce<|wEm#A`=TXLSKZ zPCyl0lMmb@_eTDVUs8a@*4f2^=u=FsmH9yn7AY}VYF)JDjMz1v65@^1bIS`VHHws^ z>=g`fZwZs@$#pvmKCXnkE>Y@IxM{UQ8Nkgx3HocicoK&;Wwkh11E-R;B+=k1rcvWm zGxIs>6bD?F@GWU9LNTUz6pIw3Zcm6=TC5$p5u~Q7(qt6^ z2!ZL%`aEA@aeTd_fRMV(-kz6erUU%3$PZ_7AFDPwKYrhFA~2}-+txwT2KmkuQ}Dwi zT^B7A2wWx%fEQrF2V>a0ka2mm1aXN1^`6C_i+&^-MS+At)FU}UE_!AE8TtU6UdPR@ zU?vW=4G0D;WKHn)t{**&%Tv?l4efNcGpunw-I6s z8gwuVU38o<89n;3XAYDYTKn|uywa*AN?wR;PV6K%nTyfi0*h!T!D8HNAZc5NRC?at zzUqE9B+ohF9d(|X=IW*g-pg6+^+YV5mJaF7e)AV^+BRwujr%}ik%LEY=}aG9GQh=d zqEG>H!D!jOX98uT7jcafKTC8IUGv0zGzja4VTzvj4U49`_dA{xDgCyz3tiuLK+sPV z|9UX|Y17}o9}FxEZ}blb$G3$?D_E$XuRzihD|8U6dng@UYG8)m(Nm0(N>23JS1Y##kMD4a?UwPlAIk-nb8{Z5}>pZUIXa%4TzhYZi!%9ZyPa9LP)e!LCw zEE?tgc=Y2ek(GmxPVf!bWf*&;1Uubn%EDp4`lg7rj4!g6^m}m!*_P;m@@%R{Z;dkY zhYFJPJvXufTW@OLvEw=*>1bsk$wxIeLufYqV-fKRr>dhmMS!-s_@ffj$u%LKv0im2 zYN7HPERif`y*7LyCDV_e5t@cJe(yurnbP|n(wU`qewq{pMG-zoO=bYml~ohJHKELK zuRL5#hhx{VWGjBkRf0+mbBU7R6iV41(y9gd8at1@6MT+Y@o9ulI43CQ;5W1 z8SUa-cvO?+?5*RmEsT}@l-j8Is@j(s^aWrwhYUP3Gq|5}CX>9alsWsI^@?}#74DN+ zA0#;l1Vt-Q+5VxtLLoXY%5u2RnTK8a-k4)x+BAx!v)d{>Dm~+>+mfCB<|JqqdxuYn zD^Njk)n{rmaimi&!?F(^&=c7Arhu9*x)6P8Z6og&OsZSSh`>z{_7vEv z=S=vjuh~WkXM1b5;2xe~s||b+&6iI8)bt+5T5g+vDa*j)9^J&l9DBMn)m8B4qYtNr ztP=4F%>^@-A;g4I<>NZs%4wkn8b)-cqfg!%*y2`bLZ%y$)X>ECtcL5E*yULb0wgm- zc$jd(=@+UB%FgBook)($Z}PjH(FY;Kr^|E+slr@r=1!NrbSS_}iVzWwAS?Ox5W*@~ z!lqd)5)7{%0d&lvGxooS-#>EqpPBuCpSuJ96u5?&TJ<+22<{)tMvLg)$Ji~h8)Rew zfryU_w_mT~7@cZ)u$&2T!PCdyWk`=x=(EwY+mSzqhB>7rCLOQa^lq`{0q~EcTDH`@ zQ+d3YbKDR&zGpgMUyJl(QYF9D^x5{}78U_+n?wptSQqfST4+1ybBTvB@=q(oRgHB9 z^M8sjDOZ9tZcpmf!KC695!VRI8%aGF#HV-UU+r~Ie^@Wm6%jmYa)BL;xO+R@rM7hr zkd8A^Jxrv}9r7U^;2D>7948u;))5*8NeL%5lm*Y$5T3MOoi8P8+Fb-v$d^8?cnBIf zB+xf!by|uPAdBNykx*CYFaR@?rA5wUHfg-XlTJdE@YfkRge=Pa7{yB*%HC;y3DR2=c^pN7BTC z5KsHE`uD{NmAg{&y6%}g4V)J6QxjmcN`*G+nuSOP5oNE-f04Oz={Y~#UeBA@ep__b z(~T3T!1R(djHAJ-BO-J(W{a9Sl4gQkd3wNV5Zis;ZtND}ScoaFV`fVGQbk46A@Fh8 zqF0r$YWR}3p$h8V|*FHNeKfRR1Va8X8P|(Gvwvq7~0eShXNPd89-WCnHQn^r| z9hNz(wDzVUXjIr6)v{kO`aY#E?^dRd7`SB=0MMZNx`iRo<4DAGi~iv2n%jqx9*e6P zc4YhxxpY!>ewZ$>@Ld_Xa3J+_(|oI`jG%Gxk5hU|_tYyJV@21KRJiqW-&wx8T7ChU zPsPB-h44ZWupv%n zWI~a;LN_X}XC~z6bRj(9KOMSXS3z!eqq>PcG~%IdaiszW z_znd-d3z0g=sa^#f3DK2w}(Oz#lVbZlstSZ{H#B_9QQr5b%)h~hZBd}`PGD^2DzG&-#8pok9A&Ox6^iF}6WsMqnd&z0gO%eUyEFX2D)@ zk%P;x9=C4NATg)S{Yw_F08ml1s`_Ng@!2=%v4hHYzw+yPUj@2MOq$5iwoINVfX8t_ zU7qZAI=N)Z(cyD&+8KS_hs*~3Vf3`ujluGcv<)JZZjNq2iImfvwuRqG16_gja*`DnjZIVxNjW#y>v&+RDoW70d6>! zk8ky5_1c4X=K58Ps9TqRALf_k*@=mco%GCA5^jEX%-40#uBMk=q{mR zNT}+UEdloLbqSz9wgfjrsdQ#3lEGz~_BZ))fVXxhBm%F!*>R69v0;5_#%8altS6bm zokW?W@1{G8+N7s#j)9qQp<)^CVtk&FK&L2i)c)r5cAu8ViO*^aSP8J)Guq&@P z4UYBRZdFSd=2b6mG`L@3o9>Gw0ArMDDhc_6S+t8Jn6Mi9fLb@SyfzgnnG`4j;Jy)0 zjukQ^s~Df(>lRjM9ew&q^2)uprklmY8=9|rkHtFMdR|+0%~%CauAY}@gTdJ&T@kg8 zFDlbj$rNzg$8$GkYaC>Yz>zJ}^H(V}x({W{{E^^`c~kX{Oa&E;b)*nyM%AhC>atdQ z>rG1is8GD%a382{SlPWdtT%PufBp*}Ol9a`bVTmsf?v=J@>J|}$*)IDGqSmlAEoSG zVXxR$ly>28V~>g2KSlMVm3=X}a#SKdh>BDiMm1P>u6;SlqPcn2r5zPuM4c3@riY`W z{Q5%gqYIcCig&l1KL~@VJo}}b9OeFd-VAR=W*NT083KONlln)~I79>Pf!MR$d3ghc z`i3^eyEh_NH|pIeYOnpBYFoo_!{jxE=c7k3-v?7k6MHkzOP5dV`h9;nx__j<5LODr zC$wiN>jfTEt~0UmD+)q_kmJU+6AnmEsmMU&Qq0I>pJd#`7`B(Bu)rvIE^*GEP#dZ1ym2HhJh%L@_wvJL!UeDz=V24Ya? zAKO5tp;qc=AVtTy$>1nFVPgK|=2G)UwQ=~ZOj1<1c;*8?Z09Lt8~YW_>$Gex?m53D1IO87-D5p3|qc@!aeMd&dranw;ecH{C$3go`389WC)DfA@WITOz z3m=h!13fXN;b2hg`5VsoevW&KK3W0IA^e%+V-MA}Ti$L+aW>Jh@fD!{13r zGC)d3Lg}Cf@-cM6!(L%N&CwWE%D?%}fObsbSlKH)gsmladugXS`z5V~iCl$4hpEvf z*_{a~)dg=~-#V_Ws)jCK8L#u2?ROWX*gB@(J-w`7UvtM-GMah0?$B{@ z2+Q7c@0=2)wQv{HvK;b1Y+D@2k(Va8&(!0oF18lBYvvrta0Z_?M$JQ<)V$(i$VjEV z6IjWH4&oOVj2PngEKKD>s2(uJ^2_lv1|2$cE=X(1G_9--<^m-BB1uQCtfLv1&J|3M zb7E@3TJv`Du-oHSm6wKRHUfIv9>WyLjk%q5vHR@WZTW87iw@>6>oVMFUzRQ2wik2J z1TmNUCnfW$$(-F@z#lH(MO4jSRW5&Ak-I>zupUhfvC*Qc_|nJ;)EoEGkfa5ogQ6S7uGFN}+2DOeia}R~}an4hYy@Ts-dW zn4NOm_Boym%4-tA;L>3e0pLEwV$bINfgdRT*(6$Jr0x;}?_M}&2?o$~wY(F)_4XDQ z07f6!)<<}eI75P|deMnkS=A|xy4Lyh!mtwqqtB{m$xT~Qqvw=Udw`}mVH3hx@}4V! zQ8_*vpsw5FZJT1g@H{VsZc=Y>!=BZ=2ldoovfmAK9Ct4$=D|(d9+e(kMhoqdIFH2J z5VgmHw7ceBZs6E#GK>VSW@7Od_b7AX9`}%w`BLtBV|W&~0K}Jw={)OkPztTZ#SZ(e zUb|RrA{wdgF36AhLEhjDajb{}xs0s7g@#oT{{C{!J7O4#j#omAUdYu#PV*;b=4MP! z7_&q5%C6f^VBA}qoE=0b-ym`0$FZ9=7y4@$)`JNy4eR9QgP|Cb%~h2>#dOaANiFv$zjkX@OZyMC`9#cmD6BZx9^e7(Nk z=2zpFmz9yp>PL-`UQ72}i|^Fz-)8EIU)+(9)>ABV8fa-3*LxMYB!Pn1=x!AW;3DkF~HY8(v6(Nfn^`?Qr&dDT*8yl|x@5Stov+hkJQm?HPZP zHk5qMvP{LG=}36WnCOafB^(LI;KHQs!=}y0^o`&EVjI7q6UTzpVx2@-Q;>~g(Z%8S zhm(WhB40^}Zc%>4J&IJ<#a@Hobi(Alg)dGj68_lZpcz?S3w}Y$+)eV8@F*r~4(iGB6mR*>>5g^_R&`YB+sz%^6UktF)`-0B z27+601f+TjB$Rll$P*4qRiQ2wG4^Ox3H{hcc1u-&H!&l;B+Z|XBq6e}h(xcMPsflT zAzEDld5gA4Ku<+)vy632c#O{l?~qVSS+Ia~M9c|rAmGa)No^S}q5{jG9(gq4KQg}p zV<3r%8GU=(MjABt7-vvY6SRa;LhHi{31{pcw5_`x@B(w3Y&{lXi~AL8BXk3_A+f>k z$8t`m(o>Ea^xSG8U?G1LE7%L28$#iEQMUxN)7&BUh&t|W*quC~3YO_44#Sp9$>UX} z_9gJUZ!M2y11X3ck#AAFr+QC`i|R}Kcn=2Wqn7yFaB1y!c^2-@oBaQ^rX|eL=lxy-~fR zy^0foR?d==Mq$>7_DGD=_9H%kmsyu-y`9>J^taqT-U;3*-Yl*1t;VfV-rI|-d+vMj zd#@Iy7TEanX%1+yXuz~&0v7s@jf(}Q1nOysX?6H!Od_)mvaqto`EQ_gt|vyudPln2 zMl&_RdI4t9C8)CWeEkw{V}yEEp-atxk+6YG=~e&8W(96ncFEF7=1Q@m^7H~chcQSETFAv(y26Y)izg=(hSP+ zi#;k%d7AP_J4L%;NWrqcI?DZt`?D>iOcJXCKyN#IMl0m@ti0Tv*Xq|Xm~Ph2Z=mAV zz*?G)u|R$g=kK3v6w(VaY+`I0=E^8P_Vw}&mvyg1*OhqP-*unAGq1x#$D_t`$V0A) z&@_3xla3($~46@B{r!oGGRJodgpcU zp!W9^XA@VLt8F4&QGvU0u?(iUtpOWPR~%vL>S)^9#Jhkwz2adaZHH?ga;e=Una{ew-H zc1LtX6bP$@#Zl4Fj?ofOWl+V3adZN}#$_{RZK^Oo8?1zQ2s z($p?gOH`Lssazsl(cDcu^XWwyI=lAw;@fj+vNB&}Mi@qoMUJJ3<}`LVLxMd*6(ln| zc{&Vk(OzmE65fqAk1k?7Vl-m(fG9w89Ml|l>(A;B8~p8c7Fw2rJII<+nyRHtg#CTC zRsu^8o0#)SLCkG17q`J#@y>p`&!Zpje~ixw>x1+Qehlj1cGEEF^u8*&sHGIpc6<57O$E8^zCCNxX?LBX6TQ*ps>GwTsKl0F`fj2Vd|832jDA)pb*>?#LAKSk zf!bu6+N?cyD))8PUj52CE3=6>sPC$u2r4?dBWz`Sry;dgr^TbVtF+MP_0pB)xt!s9 zlbLewYL_bGiWxl_|L_p&o&5Q?51AI96h1kQZHe6)4;*WraGuCwd(Sc?e$gn~*m>;w zej=Hah*XJ`Ep;a)iG@^p>a)|AuV0JD3_p>;h;vF)lvrP#Ca=Su3C9RGRW*(m&i`oH zIce}HdVNE3E~N0BYu^3?dg2A6mwHn~(*;KxN7*SSDQQDHLlFjo+IRgY{hC5N4ldI^ zyOdTA_2u$QXo%;8`OR4jwNCDrFP8#(EE(2Q)%IG7eKUkE1Sxm>Vr1N7ayGPkO?g>sJ;xIJ1O5=1B*~qY#lZDF zWX<%%_KS81kOu7QFYbMBLF9aEp`$nmu?sewX=h)qg(uLAR>I&-Q42O);(r=~ivgVg=7XfSgr>l{9&%>%i z3(irmtXRTUb4;O-tMqH{xe1iXUQJ5-oujs(@vx)aoCVmvPqfG4ENe*by@_+#jmw_k zCH#%pgw#N`SF!J;pMqGIr(RSe#rDhBVrQZab{ zmnsHS1dJ9j(142=L<9tp5QRwm6fr5)x>e0}S|Ue#C#C9}y}E6%_`;&=LpdPq6GC zGRSWthRnZF`gb1cr*QFal>ZY9F2ng7Np5F@a1gb(M+k#N?GVDE5GY94*1^_R7>aPP z6PEzPfZ{gL-=7YB{eME=f4^Q70*{3jpdx6^;*?qxXPXxs|Hu8R&WO=nwg8}ERhp#I<8pFevV|Kw-y5B?wnMAP>_6f3UyXmODVj?|;XtSuTuEMD9;^t4!;}?3P#_ep2!SiYU?4>?C0S_+ zF-37PMNxTW^bi6FCJKa$i7P9^AuvT{1t3frC@*UXP;+z`!{W!482!|3=#1)<-x1Ow2*-5O$E|A4lG8ZLO_!ZLQIpMuh#@aK7$E zUVH?6i4^-m$X7xk)?pm`Al55slx>a*2$g%iz|BHDBU^aRVYy*RS{BJ(G{GAPLtexQ4^$lw6@f_sFGtm7zwcY|rssO4!f3>@QsGIELkS08DbN?l^`ATuJ=By- zJGkoYy6W8t+YJjYkO}7k(iJkO?gOTWBT|d>yid}Sd{nw^;?mxvBd^F5)u`}9)UF`m zJtk4=S3jV)_O9DLTBk*`r%`A;U~&irp0a4}MXHuOvlioxv!BCiFnxoXxW(l!ScbRy zB;L>^8mPM#ie53CC_^dlF#{t5&YRBLM4XJIZx?KVgnkkY*kApb-&|di(_Itsyyj4K z;HiB=LeYQ4pb9RrKwuC6F?>JUGW{Z&&_bxU?>%pX11XV&1P}}%CF15*(orTQ`VUk| B599y< literal 0 HcmV?d00001 diff --git a/examples/dpdk_qat/Makefile b/examples/dpdk_qat/Makefile new file mode 100644 index 0000000000..f1b0cbb7e9 --- /dev/null +++ b/examples/dpdk_qat/Makefile @@ -0,0 +1,81 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +ifeq ($(ICP_ROOT),) +$(error "Please define ICP_ROOT environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp") +$(error This application can only operate in a linuxapp environment, \ +please change the definition of the RTE_TARGET environment variable) +endif + +ifneq ($(CONFIG_RTE_ARCH),"x86_64") +$(error This application can only operate in a x86_64 environment, \ +please change the definition of the RTE_TARGET environment variable) +endif + +# binary name +APP = dpdk_qat + +# all source are stored in SRCS-y +SRCS-y := main.c crypto.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(ICP_ROOT)/quickassist/include \ + -I$(ICP_ROOT)/quickassist/include/lac \ + -I$(ICP_ROOT)/quickassist/lookaside/access_layer/include + +LDLIBS += -L$(ICP_ROOT)/build +LDLIBS += $(ICP_ROOT)/build/icp_qa_al.a \ + -losal \ + -ladf_proxy \ + -lcrypto + +# workaround for a gcc bug with noreturn attribute +# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603 +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_main.o += -Wno-return-type +endif + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev0.conf b/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev0.conf new file mode 100644 index 0000000000..6949b43632 --- /dev/null +++ b/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev0.conf @@ -0,0 +1,537 @@ +######################################################################### +# +# @par +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +######################################################################### +######################################################## +# +# This file is the configuration for a single dh89xxcc_qa +# device. +# +# Each device has up to two accelerators. +# - The client may load balance between these +# accelerators. +# Each accelerator has 8 independent ring banks. +# - The interrupt for each can be directed to a +# specific core. +# Each ring bank as 16 rings (hardware assisted queues). +# +######################################################## + +############################################## +# General Section +############################################## + +[GENERAL] +ServicesEnabled = cy0;cy1 + +# Look Aside Cryptographic Configuration +cyHmacAuthMode = 1 + +# Look Aside Compression Configuration +dcTotalSRAMAvailable = 0 +dcSRAMPerInstance = 0 + +# Firmware Location Configuration +Firmware_UofPath = uof_firmware.bin +Firmware_MmpPath = mmp_firmware.bin + +# QAT Parameters +Accel0AdminBankNumber = 0 +Accel0AcceleratorNumber = 0 +Accel0AdminTx = 0 +Accel0AdminRx = 1 + +Accel1AcceleratorNumber = 1 +Accel1AdminBankNumber = 0 +Accel1AdminTx = 0 +Accel1AdminRx = 1 + +#Statistics, valid values: 1,0 +statsGeneral = 1 +statsDc = 1 +statsDh = 1 +statsDrbg = 1 +statsDsa = 1 +statsEcc = 1 +statsKeyGen = 1 +statsLn = 1 +statsPrime = 1 +statsRsa = 1 +statsSym = 1 + +#Debug feature, if set to 1 it enables additional entries in /proc filesystem +ProcDebug = 1 + + +################################################ +# +# Hardware Access Ring Bank Configuration +# Each Accelerator has 8 ring banks (0-7) +# If the OS supports MSI-X, each ring bank has an +# steerable MSI-x interrupt which may be +# affinitized to a particular node/core. +# +################################################ + + +[Accelerator0] +Bank0InterruptCoalescingEnabled = 1 +Bank0InterruptCoalescingTimerNs = 10000 +Bank0CoreIDAffinity = 0 +Bank0InterruptCoalescingNumResponses = 0 + +Bank1InterruptCoalescingEnabled = 1 +Bank1InterruptCoalescingTimerNs = 10000 +Bank1CoreIDAffinity = 2 +Bank1InterruptCoalescingNumResponses = 0 + +Bank2InterruptCoalescingEnabled = 1 +Bank2InterruptCoalescingTimerNs = 10000 +Bank2CoreIDAffinity = 4 +Bank2InterruptCoalescingNumResponses = 0 + +Bank3InterruptCoalescingEnabled = 1 +Bank3InterruptCoalescingTimerNs = 10000 +Bank3CoreIDAffinity = 6 +Bank3InterruptCoalescingNumResponses = 0 + +Bank4InterruptCoalescingEnabled = 1 +Bank4InterruptCoalescingTimerNs = 10000 +Bank4CoreIDAffinity = 16 +Bank4InterruptCoalescingNumResponses = 0 + +Bank5InterruptCoalescingEnabled = 1 +Bank5InterruptCoalescingTimerNs = 10000 +Bank5CoreIDAffinity = 18 +Bank5InterruptCoalescingNumResponses = 0 + +Bank6InterruptCoalescingEnabled = 1 +Bank6InterruptCoalescingTimerNs = 10000 +Bank6CoreIDAffinity = 20 +Bank6InterruptCoalescingNumResponses = 0 + +Bank7InterruptCoalescingEnabled = 1 +Bank7InterruptCoalescingTimerNs = 10000 +Bank7CoreIDAffinity = 22 +Bank7InterruptCoalescingNumResponses = 0 + +[Accelerator1] +Bank0InterruptCoalescingEnabled = 1 +Bank0InterruptCoalescingTimerNs = 10000 +Bank0CoreIDAffinity = 1 +Bank0InterruptCoalescingNumResponses = 0 + +Bank1InterruptCoalescingEnabled = 1 +Bank1InterruptCoalescingTimerNs = 10000 +Bank1CoreIDAffinity = 3 +Bank1InterruptCoalescingNumResponses = 0 + +Bank2InterruptCoalescingEnabled = 1 +Bank2InterruptCoalescingTimerNs = 10000 +Bank2CoreIDAffinity = 5 +Bank2InterruptCoalescingNumResponses = 0 + +Bank3InterruptCoalescingEnabled = 1 +Bank3InterruptCoalescingTimerNs = 10000 +Bank3CoreIDAffinity = 7 +Bank3InterruptCoalescingNumResponses = 0 + +Bank4InterruptCoalescingEnabled = 1 +Bank4InterruptCoalescingTimerNs = 10000 +Bank4CoreIDAffinity = 17 +Bank4InterruptCoalescingNumResponses = 0 + +Bank5InterruptCoalescingEnabled = 1 +Bank5InterruptCoalescingTimerNs = 10000 +Bank5CoreIDAffinity = 19 +Bank5InterruptCoalescingNumResponses = 0 + +Bank6InterruptCoalescingEnabled = 1 +Bank6InterruptCoalescingTimerNs = 10000 +Bank6CoreIDAffinity = 21 +Bank6InterruptCoalescingNumResponses = 0 + +Bank7InterruptCoalescingEnabled = 1 +Bank7InterruptCoalescingTimerNs = 10000 +Bank7CoreIDAffinity = 23 +Bank7InterruptCoalescingNumResponses = 0 + +####################################################### +# +# Logical Instances Section +# A logical instance allows each address domain +# (kernel space and individual user space processes) +# to configure rings (i.e. hardware assisted queues) +# to be used by that address domain and to define the +# behavior of that ring. +# +# The address domains are in the following format +# - For kernel address domains +# [KERNEL] +# - For user process address domains +# [xxxxx] +# Where xxxxx may be any ascii value which uniquely identifies +# the user mode process. +# To allow the driver correctly configure the +# logical instances associated with this user process, +# the process must call the icp_sal_userStart(...) +# passing the xxxxx string during process initialisation. +# When the user space process is finish it must call +# icp_sal_userStop(...) to free resources. +# If there are multiple devices present in the system all conf +# files that describe the devices must have the same address domain +# sections even if the address domain does not configure any instances +# on that particular device. So if icp_sal_userStart("xxxxx") is called +# then user process address domain [xxxxx] needs to be present in all +# conf files for all devices in the system. +# +# Items configurable by a logical instance are: +# - Name of the logical instance +# - The accelerator associated with this logical +# instance +# - The ring bank associated with this logical +# instance. +# - The response mode associated wth this logical instance (0 +# for IRQ or 1 for polled). +# - The ring for receiving and the ring for transmitting. +# - The number of concurrent requests supported by a pair of +# rings on this instance (tx + rx). Note this number affects +# the amount of memory allocated by the driver. Also +# BankInterruptCoalescingNumResponses is only supported for +# number of concurrent requests equal to 512. +# +# Note: Logical instances may not share the same ring, but +# may share a ring bank. +# +# The format of the logical instances are: +# - For crypto: +# CyName = "xxxx" +# CyAcceleratorNumber = 0|1 +# CyBankNumber = 0-7 +# CyIsPolled = 0|1 +# CyNumConcurrentSymRequests = 64|128|256|512|1024|2048|4096 +# CyNumConcurrentAsymRequests = 64|128|256|512|1024|2048|4096 +# CyRingAsymTx = 0-15 +# CyRingAsymRx = 0-15 +# CyRingSymTxHi = 0-15 +# CyRingSymRxHi = 0-15 +# CyRingSymRx = 0-15 +# +# - For Data Compression +# DcName = "xxxx" +# DcAcceleratorNumber = 0|1 +# DcBankNumber = 0-7 +# DcIsPolled = 0|1 +# DcNumConcurrentRequests = 64|128|256|512|1024|2048|4096 +# DcRingTx = 0-15 +# DcRingRx = 0-15 +# +# Where: +# - n is the number of this logical instance starting at 0. +# - xxxx may be any ascii value which identifies the logical instance. +# +######################################################## + +############################################## +# Kernel Instances Section +############################################## +[KERNEL] +NumberCyInstances = 0 +NumberDcInstances = 0 + + +############################################## +# User Process Instance Section +############################################## +[SSL] +NumberCyInstances = 16 +NumberDcInstances = 0 + +# Crypto - User instance #0 +Cy0Name = "SSL0" +Cy0IsPolled = 1 +Cy0AcceleratorNumber = 0 +Cy0ExecutionEngine = 0 +Cy0BankNumber = 0 +Cy0NumConcurrentSymRequests = 512 +Cy0NumConcurrentAsymRequests = 64 + +Cy0RingAsymTx = 2 +Cy0RingAsymRx = 3 +Cy0RingSymTxHi = 4 +Cy0RingSymRxHi = 5 +Cy0RingSymTxLo = 6 +Cy0RingSymRxLo = 7 + +# Crypto - User instance #1 +Cy1Name = "SSL1" +Cy1AcceleratorNumber = 1 +Cy1ExecutionEngine = 0 +Cy1BankNumber = 0 +Cy1IsPolled = 1 +Cy1NumConcurrentSymRequests = 512 +Cy1NumConcurrentAsymRequests = 64 + +Cy1RingAsymTx = 2 +Cy1RingAsymRx = 3 +Cy1RingSymTxHi = 4 +Cy1RingSymRxHi = 5 +Cy1RingSymTxLo = 6 +Cy1RingSymRxLo = 7 + +# Crypto - User instance #2 +Cy2Name = "SSL2" +Cy2IsPolled= 1 +Cy2AcceleratorNumber = 0 +Cy2ExecutionEngine = 1 +Cy2BankNumber = 1 +Cy2NumConcurrentSymRequests = 512 +Cy2NumConcurrentAsymRequests = 64 + +Cy2RingAsymTx = 0 +Cy2RingAsymRx = 1 +Cy2RingSymTxHi = 2 +Cy2RingSymRxHi = 3 +Cy2RingSymTxLo = 4 +Cy2RingSymRxLo = 5 + +# Crypto - User instance #3 +Cy3Name = "SSL3" +Cy3AcceleratorNumber = 1 +Cy3ExecutionEngine = 1 +Cy3BankNumber = 1 +Cy3IsPolled = 1 +Cy3NumConcurrentSymRequests = 512 +Cy3NumConcurrentAsymRequests = 64 + +Cy3RingAsymTx = 0 +Cy3RingAsymRx = 1 +Cy3RingSymTxHi = 2 +Cy3RingSymRxHi = 3 +Cy3RingSymTxLo = 4 +Cy3RingSymRxLo = 5 + + +# Crypto - User instance #4 +Cy4Name = "SSL4" +Cy4IsPolled= 1 +Cy4AcceleratorNumber = 0 +Cy4ExecutionEngine = 0 +Cy4BankNumber = 2 +Cy4NumConcurrentSymRequests = 512 +Cy4NumConcurrentAsymRequests = 64 + +Cy4RingAsymTx = 0 +Cy4RingAsymRx = 1 +Cy4RingSymTxHi = 2 +Cy4RingSymRxHi = 3 +Cy4RingSymTxLo = 4 +Cy4RingSymRxLo = 5 + +# Crypto - User instance #5 +Cy5Name = "SSL5" +Cy5AcceleratorNumber = 1 +Cy5ExecutionEngine = 0 +Cy5BankNumber = 2 +Cy5IsPolled = 1 +Cy5NumConcurrentSymRequests = 512 +Cy5NumConcurrentAsymRequests = 64 + +Cy5RingAsymTx = 0 +Cy5RingAsymRx = 1 +Cy5RingSymTxHi = 2 +Cy5RingSymRxHi = 3 +Cy5RingSymTxLo = 4 +Cy5RingSymRxLo = 5 + +# Crypto - User instance #6 +Cy6Name = "SSL6" +Cy6IsPolled = 1 +Cy6AcceleratorNumber = 0 +Cy6ExecutionEngine = 1 +Cy6BankNumber = 3 +Cy6NumConcurrentSymRequests = 512 +Cy6NumConcurrentAsymRequests = 64 + +Cy6RingAsymTx = 0 +Cy6RingAsymRx = 1 +Cy6RingSymTxHi = 2 +Cy6RingSymRxHi = 3 +Cy6RingSymTxLo = 4 +Cy6RingSymRxLo = 5 + +# Crypto - User instance #7 +Cy7Name = "SSL7" +Cy7AcceleratorNumber = 1 +Cy7ExecutionEngine = 1 +Cy7BankNumber = 3 +Cy7IsPolled = 1 +Cy7NumConcurrentSymRequests = 512 +Cy7NumConcurrentAsymRequests = 64 + +Cy7RingAsymTx = 0 +Cy7RingAsymRx = 1 +Cy7RingSymTxHi = 2 +Cy7RingSymRxHi = 3 +Cy7RingSymTxLo = 4 +Cy7RingSymRxLo = 5 + +# Crypto - User instance #8 +Cy8Name = "SSL8" +Cy8IsPolled = 1 +Cy8AcceleratorNumber = 0 +Cy8ExecutionEngine = 0 +Cy8BankNumber = 4 +Cy8NumConcurrentSymRequests = 512 +Cy8NumConcurrentAsymRequests = 64 + +Cy8RingAsymTx = 0 +Cy8RingAsymRx = 1 +Cy8RingSymTxHi = 2 +Cy8RingSymRxHi = 3 +Cy8RingSymTxLo = 4 +Cy8RingSymRxLo = 5 + +# Crypto - User instance #9 +Cy9Name = "SSL9" +Cy9IsPolled = 1 +Cy9AcceleratorNumber = 1 +Cy9ExecutionEngine = 0 +Cy9BankNumber = 4 +Cy9NumConcurrentSymRequests = 512 +Cy9NumConcurrentAsymRequests = 64 + +Cy9RingAsymTx = 0 +Cy9RingAsymRx = 1 +Cy9RingSymTxHi = 2 +Cy9RingSymRxHi = 3 +Cy9RingSymTxLo = 4 +Cy9RingSymRxLo = 5 + +# Crypto - User instance #10 +Cy10Name = "SSL10" +Cy10IsPolled = 1 +Cy10AcceleratorNumber = 0 +Cy10ExecutionEngine = 1 +Cy10BankNumber = 5 +Cy10NumConcurrentSymRequests = 512 +Cy10NumConcurrentAsymRequests = 64 + +Cy10RingAsymTx = 0 +Cy10RingAsymRx = 1 +Cy10RingSymTxHi = 2 +Cy10RingSymRxHi = 3 +Cy10RingSymTxLo = 4 +Cy10RingSymRxLo = 5 + +# Crypto - User instance #11 +Cy11Name = "SSL11" +Cy11IsPolled = 1 +Cy11AcceleratorNumber = 1 +Cy11ExecutionEngine = 1 +Cy11BankNumber = 5 +Cy11NumConcurrentSymRequests = 512 +Cy11NumConcurrentAsymRequests = 64 + +Cy11RingAsymTx = 0 +Cy11RingAsymRx = 1 +Cy11RingSymTxHi = 2 +Cy11RingSymRxHi = 3 +Cy11RingSymTxLo = 4 +Cy11RingSymRxLo = 5 + +# Crypto - User instance #12 +Cy12Name = "SSL12" +Cy12IsPolled = 1 +Cy12AcceleratorNumber = 0 +Cy12ExecutionEngine = 0 +Cy12BankNumber = 6 +Cy12NumConcurrentSymRequests = 512 +Cy12NumConcurrentAsymRequests = 64 + +Cy12RingAsymTx = 0 +Cy12RingAsymRx = 1 +Cy12RingSymTxHi = 2 +Cy12RingSymRxHi = 3 +Cy12RingSymTxLo = 4 +Cy12RingSymRxLo = 5 + +# Crypto - User instance #13 +Cy13Name = "SSL13" +Cy13IsPolled = 1 +Cy13AcceleratorNumber = 1 +Cy13ExecutionEngine = 0 +Cy13BankNumber = 6 +Cy13NumConcurrentSymRequests = 512 +Cy13NumConcurrentAsymRequests = 64 + +Cy13RingAsymTx = 0 +Cy13RingAsymRx = 1 +Cy13RingSymTxHi = 2 +Cy13RingSymRxHi = 3 +Cy13RingSymTxLo = 4 +Cy13RingSymRxLo = 5 + +# Crypto - User instance #14 +Cy14Name = "SSL14" +Cy14IsPolled = 1 +Cy14AcceleratorNumber = 0 +Cy14ExecutionEngine = 1 +Cy14BankNumber = 7 +Cy14NumConcurrentSymRequests = 512 +Cy14NumConcurrentAsymRequests = 64 + +Cy14RingAsymTx = 0 +Cy14RingAsymRx = 1 +Cy14RingSymTxHi = 2 +Cy14RingSymRxHi = 3 +Cy14RingSymTxLo = 4 +Cy14RingSymRxLo = 5 + +# Crypto - User instance #15 +Cy15Name = "SSL15" +Cy15IsPolled = 1 +Cy15AcceleratorNumber = 1 +Cy15ExecutionEngine = 1 +Cy15BankNumber = 7 +Cy15NumConcurrentSymRequests = 512 +Cy15NumConcurrentAsymRequests = 64 + +Cy15RingAsymTx = 0 +Cy15RingAsymRx = 1 +Cy15RingSymTxHi = 2 +Cy15RingSymRxHi = 3 +Cy15RingSymTxLo = 4 +Cy15RingSymRxLo = 5 diff --git a/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev1.conf b/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev1.conf new file mode 100644 index 0000000000..836de07c98 --- /dev/null +++ b/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev1.conf @@ -0,0 +1,537 @@ +######################################################################### +# +# @par +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +######################################################################### +######################################################## +# +# This file is the configuration for a single dh89xxcc_qa +# device. +# +# Each device has up to two accelerators. +# - The client may load balance between these +# accelerators. +# Each accelerator has 8 independent ring banks. +# - The interrupt for each can be directed to a +# specific core. +# Each ring bank as 16 rings (hardware assisted queues). +# +######################################################## + +############################################## +# General Section +############################################## + +[GENERAL] +ServicesEnabled = cy0;cy1 + +# Look Aside Cryptographic Configuration +cyHmacAuthMode = 1 + +# Look Aside Compression Configuration +dcTotalSRAMAvailable = 0 +dcSRAMPerInstance = 0 + +# Firmware Location Configuration +Firmware_UofPath = uof_firmware.bin +Firmware_MmpPath = mmp_firmware.bin + +# QAT Parameters +Accel0AdminBankNumber = 0 +Accel0AcceleratorNumber = 0 +Accel0AdminTx = 0 +Accel0AdminRx = 1 + +Accel1AcceleratorNumber = 1 +Accel1AdminBankNumber = 0 +Accel1AdminTx = 0 +Accel1AdminRx = 1 + +#Statistics, valid values: 1,0 +statsGeneral = 1 +statsDc = 1 +statsDh = 1 +statsDrbg = 1 +statsDsa = 1 +statsEcc = 1 +statsKeyGen = 1 +statsLn = 1 +statsPrime = 1 +statsRsa = 1 +statsSym = 1 + +#Debug feature, if set to 1 it enables additional entries in /proc filesystem +ProcDebug = 1 + + +################################################ +# +# Hardware Access Ring Bank Configuration +# Each Accelerator has 8 ring banks (0-7) +# If the OS supports MSI-X, each ring bank has an +# steerable MSI-x interrupt which may be +# affinitized to a particular node/core. +# +################################################ + + +[Accelerator0] +Bank0InterruptCoalescingEnabled = 1 +Bank0InterruptCoalescingTimerNs = 10000 +Bank0CoreIDAffinity = 8 +Bank0InterruptCoalescingNumResponses = 0 + +Bank1InterruptCoalescingEnabled = 1 +Bank1InterruptCoalescingTimerNs = 10000 +Bank1CoreIDAffinity = 10 +Bank1InterruptCoalescingNumResponses = 0 + +Bank2InterruptCoalescingEnabled = 1 +Bank2InterruptCoalescingTimerNs = 10000 +Bank2CoreIDAffinity = 12 +Bank2InterruptCoalescingNumResponses = 0 + +Bank3InterruptCoalescingEnabled = 1 +Bank3InterruptCoalescingTimerNs = 10000 +Bank3CoreIDAffinity = 14 +Bank3InterruptCoalescingNumResponses = 0 + +Bank4InterruptCoalescingEnabled = 1 +Bank4InterruptCoalescingTimerNs = 10000 +Bank4CoreIDAffinity = 24 +Bank4InterruptCoalescingNumResponses = 0 + +Bank5InterruptCoalescingEnabled = 1 +Bank5InterruptCoalescingTimerNs = 10000 +Bank5CoreIDAffinity = 26 +Bank5InterruptCoalescingNumResponses = 0 + +Bank6InterruptCoalescingEnabled = 1 +Bank6InterruptCoalescingTimerNs = 10000 +Bank6CoreIDAffinity = 28 +Bank6InterruptCoalescingNumResponses = 0 + +Bank7InterruptCoalescingEnabled = 1 +Bank7InterruptCoalescingTimerNs = 10000 +Bank7CoreIDAffinity = 30 +Bank7InterruptCoalescingNumResponses = 0 + +[Accelerator1] +Bank0InterruptCoalescingEnabled = 1 +Bank0InterruptCoalescingTimerNs = 10000 +Bank0CoreIDAffinity = 9 +Bank0InterruptCoalescingNumResponses = 0 + +Bank1InterruptCoalescingEnabled = 1 +Bank1InterruptCoalescingTimerNs = 10000 +Bank1CoreIDAffinity = 11 +Bank1InterruptCoalescingNumResponses = 0 + +Bank2InterruptCoalescingEnabled = 1 +Bank2InterruptCoalescingTimerNs = 10000 +Bank2CoreIDAffinity = 13 +Bank2InterruptCoalescingNumResponses = 0 + +Bank3InterruptCoalescingEnabled = 1 +Bank3InterruptCoalescingTimerNs = 10000 +Bank3CoreIDAffinity = 15 +Bank3InterruptCoalescingNumResponses = 0 + +Bank4InterruptCoalescingEnabled = 1 +Bank4InterruptCoalescingTimerNs = 10000 +Bank4CoreIDAffinity = 25 +Bank4InterruptCoalescingNumResponses = 0 + +Bank5InterruptCoalescingEnabled = 1 +Bank5InterruptCoalescingTimerNs = 10000 +Bank5CoreIDAffinity = 27 +Bank5InterruptCoalescingNumResponses = 0 + +Bank6InterruptCoalescingEnabled = 1 +Bank6InterruptCoalescingTimerNs = 10000 +Bank6CoreIDAffinity = 29 +Bank6InterruptCoalescingNumResponses = 0 + +Bank7InterruptCoalescingEnabled = 1 +Bank7InterruptCoalescingTimerNs = 10000 +Bank7CoreIDAffinity = 31 +Bank7InterruptCoalescingNumResponses = 0 + +####################################################### +# +# Logical Instances Section +# A logical instance allows each address domain +# (kernel space and individual user space processes) +# to configure rings (i.e. hardware assisted queues) +# to be used by that address domain and to define the +# behavior of that ring. +# +# The address domains are in the following format +# - For kernel address domains +# [KERNEL] +# - For user process address domains +# [xxxxx] +# Where xxxxx may be any ascii value which uniquely identifies +# the user mode process. +# To allow the driver correctly configure the +# logical instances associated with this user process, +# the process must call the icp_sal_userStart(...) +# passing the xxxxx string during process initialisation. +# When the user space process is finish it must call +# icp_sal_userStop(...) to free resources. +# If there are multiple devices present in the system all conf +# files that describe the devices must have the same address domain +# sections even if the address domain does not configure any instances +# on that particular device. So if icp_sal_userStart("xxxxx") is called +# then user process address domain [xxxxx] needs to be present in all +# conf files for all devices in the system. +# +# Items configurable by a logical instance are: +# - Name of the logical instance +# - The accelerator associated with this logical +# instance +# - The ring bank associated with this logical +# instance. +# - The response mode associated wth this logical instance (0 +# for IRQ or 1 for polled). +# - The ring for receiving and the ring for transmitting. +# - The number of concurrent requests supported by a pair of +# rings on this instance (tx + rx). Note this number affects +# the amount of memory allocated by the driver. Also +# BankInterruptCoalescingNumResponses is only supported for +# number of concurrent requests equal to 512. +# +# Note: Logical instances may not share the same ring, but +# may share a ring bank. +# +# The format of the logical instances are: +# - For crypto: +# CyName = "xxxx" +# CyAcceleratorNumber = 0|1 +# CyBankNumber = 0-7 +# CyIsPolled = 0|1 +# CyNumConcurrentSymRequests = 64|128|256|512|1024|2048|4096 +# CyNumConcurrentAsymRequests = 64|128|256|512|1024|2048|4096 +# CyRingAsymTx = 0-15 +# CyRingAsymRx = 0-15 +# CyRingSymTxHi = 0-15 +# CyRingSymRxHi = 0-15 +# CyRingSymRx = 0-15 +# +# - For Data Compression +# DcName = "xxxx" +# DcAcceleratorNumber = 0|1 +# DcBankNumber = 0-7 +# DcIsPolled = 0|1 +# DcNumConcurrentRequests = 64|128|256|512|1024|2048|4096 +# DcRingTx = 0-15 +# DcRingRx = 0-15 +# +# Where: +# - n is the number of this logical instance starting at 0. +# - xxxx may be any ascii value which identifies the logical instance. +# +######################################################## + +############################################## +# Kernel Instances Section +############################################## +[KERNEL] +NumberCyInstances = 0 +NumberDcInstances = 0 + + +############################################## +# User Process Instance Section +############################################## +[SSL] +NumberCyInstances = 16 +NumberDcInstances = 0 + +# Crypto - User instance #0 +Cy0Name = "SSL0" +Cy0IsPolled = 1 +Cy0AcceleratorNumber = 0 +Cy0ExecutionEngine = 0 +Cy0BankNumber = 0 +Cy0NumConcurrentSymRequests = 512 +Cy0NumConcurrentAsymRequests = 64 + +Cy0RingAsymTx = 2 +Cy0RingAsymRx = 3 +Cy0RingSymTxHi = 4 +Cy0RingSymRxHi = 5 +Cy0RingSymTxLo = 6 +Cy0RingSymRxLo = 7 + +# Crypto - User instance #1 +Cy1Name = "SSL1" +Cy1AcceleratorNumber = 1 +Cy1ExecutionEngine = 0 +Cy1BankNumber = 0 +Cy1IsPolled = 1 +Cy1NumConcurrentSymRequests = 512 +Cy1NumConcurrentAsymRequests = 64 + +Cy1RingAsymTx = 2 +Cy1RingAsymRx = 3 +Cy1RingSymTxHi = 4 +Cy1RingSymRxHi = 5 +Cy1RingSymTxLo = 6 +Cy1RingSymRxLo = 7 + +# Crypto - User instance #2 +Cy2Name = "SSL2" +Cy2IsPolled= 1 +Cy2AcceleratorNumber = 0 +Cy2ExecutionEngine = 1 +Cy2BankNumber = 1 +Cy2NumConcurrentSymRequests = 512 +Cy2NumConcurrentAsymRequests = 64 + +Cy2RingAsymTx = 0 +Cy2RingAsymRx = 1 +Cy2RingSymTxHi = 2 +Cy2RingSymRxHi = 3 +Cy2RingSymTxLo = 4 +Cy2RingSymRxLo = 5 + +# Crypto - User instance #3 +Cy3Name = "SSL3" +Cy3AcceleratorNumber = 1 +Cy3ExecutionEngine = 1 +Cy3BankNumber = 1 +Cy3IsPolled = 1 +Cy3NumConcurrentSymRequests = 512 +Cy3NumConcurrentAsymRequests = 64 + +Cy3RingAsymTx = 0 +Cy3RingAsymRx = 1 +Cy3RingSymTxHi = 2 +Cy3RingSymRxHi = 3 +Cy3RingSymTxLo = 4 +Cy3RingSymRxLo = 5 + + +# Crypto - User instance #4 +Cy4Name = "SSL4" +Cy4IsPolled= 1 +Cy4AcceleratorNumber = 0 +Cy4ExecutionEngine = 0 +Cy4BankNumber = 2 +Cy4NumConcurrentSymRequests = 512 +Cy4NumConcurrentAsymRequests = 64 + +Cy4RingAsymTx = 0 +Cy4RingAsymRx = 1 +Cy4RingSymTxHi = 2 +Cy4RingSymRxHi = 3 +Cy4RingSymTxLo = 4 +Cy4RingSymRxLo = 5 + +# Crypto - User instance #5 +Cy5Name = "SSL5" +Cy5AcceleratorNumber = 1 +Cy5ExecutionEngine = 0 +Cy5BankNumber = 2 +Cy5IsPolled = 1 +Cy5NumConcurrentSymRequests = 512 +Cy5NumConcurrentAsymRequests = 64 + +Cy5RingAsymTx = 0 +Cy5RingAsymRx = 1 +Cy5RingSymTxHi = 2 +Cy5RingSymRxHi = 3 +Cy5RingSymTxLo = 4 +Cy5RingSymRxLo = 5 + +# Crypto - User instance #6 +Cy6Name = "SSL6" +Cy6IsPolled = 1 +Cy6AcceleratorNumber = 0 +Cy6ExecutionEngine = 1 +Cy6BankNumber = 3 +Cy6NumConcurrentSymRequests = 512 +Cy6NumConcurrentAsymRequests = 64 + +Cy6RingAsymTx = 0 +Cy6RingAsymRx = 1 +Cy6RingSymTxHi = 2 +Cy6RingSymRxHi = 3 +Cy6RingSymTxLo = 4 +Cy6RingSymRxLo = 5 + +# Crypto - User instance #7 +Cy7Name = "SSL7" +Cy7AcceleratorNumber = 1 +Cy7ExecutionEngine = 1 +Cy7BankNumber = 3 +Cy7IsPolled = 1 +Cy7NumConcurrentSymRequests = 512 +Cy7NumConcurrentAsymRequests = 64 + +Cy7RingAsymTx = 0 +Cy7RingAsymRx = 1 +Cy7RingSymTxHi = 2 +Cy7RingSymRxHi = 3 +Cy7RingSymTxLo = 4 +Cy7RingSymRxLo = 5 + +# Crypto - User instance #8 +Cy8Name = "SSL8" +Cy8IsPolled = 1 +Cy8AcceleratorNumber = 0 +Cy8ExecutionEngine = 0 +Cy8BankNumber = 4 +Cy8NumConcurrentSymRequests = 512 +Cy8NumConcurrentAsymRequests = 64 + +Cy8RingAsymTx = 0 +Cy8RingAsymRx = 1 +Cy8RingSymTxHi = 2 +Cy8RingSymRxHi = 3 +Cy8RingSymTxLo = 4 +Cy8RingSymRxLo = 5 + +# Crypto - User instance #9 +Cy9Name = "SSL9" +Cy9IsPolled = 1 +Cy9AcceleratorNumber = 1 +Cy9ExecutionEngine = 0 +Cy9BankNumber = 4 +Cy9NumConcurrentSymRequests = 512 +Cy9NumConcurrentAsymRequests = 64 + +Cy9RingAsymTx = 0 +Cy9RingAsymRx = 1 +Cy9RingSymTxHi = 2 +Cy9RingSymRxHi = 3 +Cy9RingSymTxLo = 4 +Cy9RingSymRxLo = 5 + +# Crypto - User instance #10 +Cy10Name = "SSL10" +Cy10IsPolled = 1 +Cy10AcceleratorNumber = 0 +Cy10ExecutionEngine = 1 +Cy10BankNumber = 5 +Cy10NumConcurrentSymRequests = 512 +Cy10NumConcurrentAsymRequests = 64 + +Cy10RingAsymTx = 0 +Cy10RingAsymRx = 1 +Cy10RingSymTxHi = 2 +Cy10RingSymRxHi = 3 +Cy10RingSymTxLo = 4 +Cy10RingSymRxLo = 5 + +# Crypto - User instance #11 +Cy11Name = "SSL11" +Cy11IsPolled = 1 +Cy11AcceleratorNumber = 1 +Cy11ExecutionEngine = 1 +Cy11BankNumber = 5 +Cy11NumConcurrentSymRequests = 512 +Cy11NumConcurrentAsymRequests = 64 + +Cy11RingAsymTx = 0 +Cy11RingAsymRx = 1 +Cy11RingSymTxHi = 2 +Cy11RingSymRxHi = 3 +Cy11RingSymTxLo = 4 +Cy11RingSymRxLo = 5 + +# Crypto - User instance #12 +Cy12Name = "SSL12" +Cy12IsPolled = 1 +Cy12AcceleratorNumber = 0 +Cy12ExecutionEngine = 0 +Cy12BankNumber = 6 +Cy12NumConcurrentSymRequests = 512 +Cy12NumConcurrentAsymRequests = 64 + +Cy12RingAsymTx = 0 +Cy12RingAsymRx = 1 +Cy12RingSymTxHi = 2 +Cy12RingSymRxHi = 3 +Cy12RingSymTxLo = 4 +Cy12RingSymRxLo = 5 + +# Crypto - User instance #13 +Cy13Name = "SSL13" +Cy13IsPolled = 1 +Cy13AcceleratorNumber = 1 +Cy13ExecutionEngine = 0 +Cy13BankNumber = 6 +Cy13NumConcurrentSymRequests = 512 +Cy13NumConcurrentAsymRequests = 64 + +Cy13RingAsymTx = 0 +Cy13RingAsymRx = 1 +Cy13RingSymTxHi = 2 +Cy13RingSymRxHi = 3 +Cy13RingSymTxLo = 4 +Cy13RingSymRxLo = 5 + +# Crypto - User instance #14 +Cy14Name = "SSL14" +Cy14IsPolled = 1 +Cy14AcceleratorNumber = 0 +Cy14ExecutionEngine = 1 +Cy14BankNumber = 7 +Cy14NumConcurrentSymRequests = 512 +Cy14NumConcurrentAsymRequests = 64 + +Cy14RingAsymTx = 0 +Cy14RingAsymRx = 1 +Cy14RingSymTxHi = 2 +Cy14RingSymRxHi = 3 +Cy14RingSymTxLo = 4 +Cy14RingSymRxLo = 5 + +# Crypto - User instance #15 +Cy15Name = "SSL15" +Cy15IsPolled = 1 +Cy15AcceleratorNumber = 1 +Cy15ExecutionEngine = 1 +Cy15BankNumber = 7 +Cy15NumConcurrentSymRequests = 512 +Cy15NumConcurrentAsymRequests = 64 + +Cy15RingAsymTx = 0 +Cy15RingAsymRx = 1 +Cy15RingSymTxHi = 2 +Cy15RingSymRxHi = 3 +Cy15RingSymTxLo = 4 +Cy15RingSymRxLo = 5 diff --git a/examples/dpdk_qat/config_files/stargo_B0/dh89xxcc_qa_dev0.conf b/examples/dpdk_qat/config_files/stargo_B0/dh89xxcc_qa_dev0.conf new file mode 100644 index 0000000000..4639e4b0aa --- /dev/null +++ b/examples/dpdk_qat/config_files/stargo_B0/dh89xxcc_qa_dev0.conf @@ -0,0 +1,409 @@ +######################################################################### +# +# @par +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +######################################################################### +######################################################## +# +# This file is the configuration for a single dh89xxcc_qa +# device. +# +# Each device has up to two accelerators. +# - The client may load balance between these +# accelerators. +# Each accelerator has 8 independent ring banks. +# - The interrupt for each can be directed to a +# specific core. +# Each ring bank as 16 rings (hardware assisted queues). +# +######################################################## + +############################################## +# General Section +############################################## + +[GENERAL] +ServicesEnabled = cy0;cy1 + +# Look Aside Cryptographic Configuration +cyHmacAuthMode = 1 + +# Look Aside Compression Configuration +dcTotalSRAMAvailable = 0 +dcSRAMPerInstance = 0 + +# Firmware Location Configuration +Firmware_UofPath = uof_firmware.bin +Firmware_MmpPath = mmp_firmware.bin + +# QAT Parameters +Accel0AdminBankNumber = 0 +Accel0AcceleratorNumber = 0 +Accel0AdminTx = 0 +Accel0AdminRx = 1 + +Accel1AcceleratorNumber = 1 +Accel1AdminBankNumber = 0 +Accel1AdminTx = 0 +Accel1AdminRx = 1 + +#Statistics, valid values: 1,0 +statsGeneral = 1 +statsDc = 1 +statsDh = 1 +statsDrbg = 1 +statsDsa = 1 +statsEcc = 1 +statsKeyGen = 1 +statsLn = 1 +statsPrime = 1 +statsRsa = 1 +statsSym = 1 + +#Debug feature, if set to 1 it enables additional entries in /proc filesystem +ProcDebug = 1 + + +################################################ +# +# Hardware Access Ring Bank Configuration +# Each Accelerator has 8 ring banks (0-7) +# If the OS supports MSI-X, each ring bank has an +# steerable MSI-x interrupt which may be +# affinitized to a particular node/core. +# +################################################ + + +[Accelerator0] +Bank0InterruptCoalescingEnabled = 1 +Bank0InterruptCoalescingTimerNs = 10000 +Bank0CoreIDAffinity = 0 +Bank0InterruptCoalescingNumResponses = 0 + +Bank1InterruptCoalescingEnabled = 1 +Bank1InterruptCoalescingTimerNs = 10000 +Bank1CoreIDAffinity = 2 +Bank1InterruptCoalescingNumResponses = 0 + +Bank2InterruptCoalescingEnabled = 1 +Bank2InterruptCoalescingTimerNs = 10000 +Bank2CoreIDAffinity = 4 +Bank2InterruptCoalescingNumResponses = 0 + +Bank3InterruptCoalescingEnabled = 1 +Bank3InterruptCoalescingTimerNs = 10000 +Bank3CoreIDAffinity = 6 +Bank3InterruptCoalescingNumResponses = 0 + +Bank4InterruptCoalescingEnabled = 1 +Bank4InterruptCoalescingTimerNs = 10000 +Bank4CoreIDAffinity = 7 +Bank4InterruptCoalescingNumResponses = 0 + +Bank5InterruptCoalescingEnabled = 1 +Bank5InterruptCoalescingTimerNs = 10000 +Bank5CoreIDAffinity = 7 +Bank5InterruptCoalescingNumResponses = 0 + +Bank6InterruptCoalescingEnabled = 1 +Bank6InterruptCoalescingTimerNs = 10000 +Bank6CoreIDAffinity = 7 +Bank6InterruptCoalescingNumResponses = 0 + +Bank7InterruptCoalescingEnabled = 1 +Bank7InterruptCoalescingTimerNs = 10000 +Bank7CoreIDAffinity = 7 +Bank7InterruptCoalescingNumResponses = 0 + +[Accelerator1] +Bank0InterruptCoalescingEnabled = 1 +Bank0InterruptCoalescingTimerNs = 10000 +Bank0CoreIDAffinity = 1 +Bank0InterruptCoalescingNumResponses = 0 + +Bank1InterruptCoalescingEnabled = 1 +Bank1InterruptCoalescingTimerNs = 10000 +Bank1CoreIDAffinity = 3 +Bank1InterruptCoalescingNumResponses = 0 + +Bank2InterruptCoalescingEnabled = 1 +Bank2InterruptCoalescingTimerNs = 10000 +Bank2CoreIDAffinity = 5 +Bank2InterruptCoalescingNumResponses = 0 + +Bank3InterruptCoalescingEnabled = 1 +Bank3InterruptCoalescingTimerNs = 10000 +Bank3CoreIDAffinity = 7 +Bank3InterruptCoalescingNumResponses = 0 + +Bank4InterruptCoalescingEnabled = 1 +Bank4InterruptCoalescingTimerNs = 10000 +Bank4CoreIDAffinity = 7 +Bank4InterruptCoalescingNumResponses = 0 + +Bank5InterruptCoalescingEnabled = 1 +Bank5InterruptCoalescingTimerNs = 10000 +Bank5CoreIDAffinity = 7 +Bank5InterruptCoalescingNumResponses = 0 + +Bank6InterruptCoalescingEnabled = 1 +Bank6InterruptCoalescingTimerNs = 10000 +Bank6CoreIDAffinity = 7 +Bank6InterruptCoalescingNumResponses = 0 + +Bank7InterruptCoalescingEnabled = 1 +Bank7InterruptCoalescingTimerNs = 10000 +Bank7CoreIDAffinity = 7 +Bank7InterruptCoalescingNumResponses = 0 + +####################################################### +# +# Logical Instances Section +# A logical instance allows each address domain +# (kernel space and individual user space processes) +# to configure rings (i.e. hardware assisted queues) +# to be used by that address domain and to define the +# behavior of that ring. +# +# The address domains are in the following format +# - For kernel address domains +# [KERNEL] +# - For user process address domains +# [xxxxx] +# Where xxxxx may be any ascii value which uniquely identifies +# the user mode process. +# To allow the driver correctly configure the +# logical instances associated with this user process, +# the process must call the icp_sal_userStart(...) +# passing the xxxxx string during process initialisation. +# When the user space process is finish it must call +# icp_sal_userStop(...) to free resources. +# If there are multiple devices present in the system all conf +# files that describe the devices must have the same address domain +# sections even if the address domain does not configure any instances +# on that particular device. So if icp_sal_userStart("xxxxx") is called +# then user process address domain [xxxxx] needs to be present in all +# conf files for all devices in the system. +# +# Items configurable by a logical instance are: +# - Name of the logical instance +# - The accelerator associated with this logical +# instance +# - The ring bank associated with this logical +# instance. +# - The response mode associated wth this logical instance (0 +# for IRQ or 1 for polled). +# - The ring for receiving and the ring for transmitting. +# - The number of concurrent requests supported by a pair of +# rings on this instance (tx + rx). Note this number affects +# the amount of memory allocated by the driver. Also +# BankInterruptCoalescingNumResponses is only supported for +# number of concurrent requests equal to 512. +# +# Note: Logical instances may not share the same ring, but +# may share a ring bank. +# +# The format of the logical instances are: +# - For crypto: +# CyName = "xxxx" +# CyAcceleratorNumber = 0|1 +# CyBankNumber = 0-7 +# CyIsPolled = 0|1 +# CyNumConcurrentSymRequests = 64|128|256|512|1024|2048|4096 +# CyNumConcurrentAsymRequests = 64|128|256|512|1024|2048|4096 +# CyRingAsymTx = 0-15 +# CyRingAsymRx = 0-15 +# CyRingSymTxHi = 0-15 +# CyRingSymRxHi = 0-15 +# CyRingSymRx = 0-15 +# +# - For Data Compression +# DcName = "xxxx" +# DcAcceleratorNumber = 0|1 +# DcBankNumber = 0-7 +# DcIsPolled = 0|1 +# DcNumConcurrentRequests = 64|128|256|512|1024|2048|4096 +# DcRingTx = 0-15 +# DcRingRx = 0-15 +# +# Where: +# - n is the number of this logical instance starting at 0. +# - xxxx may be any ascii value which identifies the logical instance. +# +######################################################## + +############################################## +# Kernel Instances Section +############################################## +[KERNEL] +NumberCyInstances = 0 +NumberDcInstances = 0 + + +############################################## +# User Process Instance Section +############################################## +[SSL] +NumberCyInstances = 8 +NumberDcInstances = 0 + +# Crypto - User instance #0 +Cy0Name = "SSL0" +Cy0IsPolled = 1 +Cy0AcceleratorNumber = 0 +Cy0ExecutionEngine = 0 +Cy0BankNumber = 0 +Cy0NumConcurrentSymRequests = 512 +Cy0NumConcurrentAsymRequests = 64 + +Cy0RingAsymTx = 2 +Cy0RingAsymRx = 3 +Cy0RingSymTxHi = 4 +Cy0RingSymRxHi = 5 +Cy0RingSymTxLo = 6 +Cy0RingSymRxLo = 7 + +# Crypto - User instance #1 +Cy1Name = "SSL1" +Cy1AcceleratorNumber = 1 +Cy1ExecutionEngine = 0 +Cy1BankNumber = 0 +Cy1IsPolled = 1 +Cy1NumConcurrentSymRequests = 512 +Cy1NumConcurrentAsymRequests = 64 + +Cy1RingAsymTx = 2 +Cy1RingAsymRx = 3 +Cy1RingSymTxHi = 4 +Cy1RingSymRxHi = 5 +Cy1RingSymTxLo = 6 +Cy1RingSymRxLo = 7 + +# Crypto - User instance #2 +Cy2Name = "SSL2" +Cy2IsPolled= 1 +Cy2AcceleratorNumber = 0 +Cy2ExecutionEngine = 1 +Cy2BankNumber = 1 +Cy2NumConcurrentSymRequests = 512 +Cy2NumConcurrentAsymRequests = 64 + +Cy2RingAsymTx = 0 +Cy2RingAsymRx = 1 +Cy2RingSymTxHi = 2 +Cy2RingSymRxHi = 3 +Cy2RingSymTxLo = 4 +Cy2RingSymRxLo = 5 + +# Crypto - User instance #3 +Cy3Name = "SSL3" +Cy3AcceleratorNumber = 1 +Cy3ExecutionEngine = 1 +Cy3BankNumber = 1 +Cy3IsPolled = 1 +Cy3NumConcurrentSymRequests = 512 +Cy3NumConcurrentAsymRequests = 64 + +Cy3RingAsymTx = 0 +Cy3RingAsymRx = 1 +Cy3RingSymTxHi = 2 +Cy3RingSymRxHi = 3 +Cy3RingSymTxLo = 4 +Cy3RingSymRxLo = 5 + + +# Crypto - User instance #4 +Cy4Name = "SSL4" +Cy4IsPolled= 1 +Cy4AcceleratorNumber = 0 +Cy4ExecutionEngine = 0 +Cy4BankNumber = 2 +Cy4NumConcurrentSymRequests = 512 +Cy4NumConcurrentAsymRequests = 64 + +Cy4RingAsymTx = 0 +Cy4RingAsymRx = 1 +Cy4RingSymTxHi = 2 +Cy4RingSymRxHi = 3 +Cy4RingSymTxLo = 4 +Cy4RingSymRxLo = 5 + +# Crypto - User instance #5 +Cy5Name = "SSL5" +Cy5AcceleratorNumber = 1 +Cy5ExecutionEngine = 0 +Cy5BankNumber = 2 +Cy5IsPolled = 1 +Cy5NumConcurrentSymRequests = 512 +Cy5NumConcurrentAsymRequests = 64 + +Cy5RingAsymTx = 0 +Cy5RingAsymRx = 1 +Cy5RingSymTxHi = 2 +Cy5RingSymRxHi = 3 +Cy5RingSymTxLo = 4 +Cy5RingSymRxLo = 5 + +# Crypto - User instance #6 +Cy6Name = "SSL6" +Cy6IsPolled = 1 +Cy6AcceleratorNumber = 0 +Cy6ExecutionEngine = 1 +Cy6BankNumber = 3 +Cy6NumConcurrentSymRequests = 512 +Cy6NumConcurrentAsymRequests = 64 + +Cy6RingAsymTx = 0 +Cy6RingAsymRx = 1 +Cy6RingSymTxHi = 2 +Cy6RingSymRxHi = 3 +Cy6RingSymTxLo = 4 +Cy6RingSymRxLo = 5 + +# Crypto - User instance #7 +Cy7Name = "SSL7" +Cy7AcceleratorNumber = 1 +Cy7ExecutionEngine = 1 +Cy7BankNumber = 3 +Cy7IsPolled = 1 +Cy7NumConcurrentSymRequests = 512 +Cy7NumConcurrentAsymRequests = 64 + +Cy7RingAsymTx = 0 +Cy7RingAsymRx = 1 +Cy7RingSymTxHi = 2 +Cy7RingSymRxHi = 3 +Cy7RingSymTxLo = 4 +Cy7RingSymRxLo = 5 diff --git a/examples/dpdk_qat/crypto.c b/examples/dpdk_qat/crypto.c new file mode 100644 index 0000000000..99680be5ab --- /dev/null +++ b/examples/dpdk_qat/crypto.c @@ -0,0 +1,921 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CPA_CY_SYM_DP_TMP_WORKAROUND 1 + +#include "cpa.h" +#include "cpa_types.h" +#include "cpa_cy_sym_dp.h" +#include "cpa_cy_common.h" +#include "cpa_cy_im.h" +#include "icp_sal_user.h" +#include "icp_sal_poll.h" + +#include "crypto.h" + +#define NUM_HMAC (10) +#define NUM_CRYPTO (7) + + +/* CIPHER KEY LENGTHS */ +#define KEY_SIZE_64_IN_BYTES (64 / 8) +#define KEY_SIZE_56_IN_BYTES (56 / 8) +#define KEY_SIZE_128_IN_BYTES (128 / 8) +#define KEY_SIZE_168_IN_BYTES (168 / 8) +#define KEY_SIZE_192_IN_BYTES (192 / 8) +#define KEY_SIZE_256_IN_BYTES (256 / 8) + +/* HMAC AUTH KEY LENGTHS */ +#define AES_XCBC_AUTH_KEY_LENGTH_IN_BYTES (128 / 8) +#define SHA1_AUTH_KEY_LENGTH_IN_BYTES (160 / 8) +#define SHA224_AUTH_KEY_LENGTH_IN_BYTES (224 / 8) +#define SHA256_AUTH_KEY_LENGTH_IN_BYTES (256 / 8) +#define SHA384_AUTH_KEY_LENGTH_IN_BYTES (384 / 8) +#define SHA512_AUTH_KEY_LENGTH_IN_BYTES (512 / 8) +#define MD5_AUTH_KEY_LENGTH_IN_BYTES (128 / 8) + +/* HASH DIGEST LENGHTS */ +#define AES_XCBC_DIGEST_LENGTH_IN_BYTES (128 / 8) +#define AES_XCBC_96_DIGEST_LENGTH_IN_BYTES (96 / 8) +#define MD5_DIGEST_LENGTH_IN_BYTES (128 / 8) +#define SHA1_DIGEST_LENGTH_IN_BYTES (160 / 8) +#define SHA1_96_DIGEST_LENGTH_IN_BYTES (96 / 8) +#define SHA224_DIGEST_LENGTH_IN_BYTES (224 / 8) +#define SHA256_DIGEST_LENGTH_IN_BYTES (256 / 8) +#define SHA384_DIGEST_LENGTH_IN_BYTES (384 / 8) +#define SHA512_DIGEST_LENGTH_IN_BYTES (512 / 8) + +#define IV_LENGTH_16_BYTES (16) +#define IV_LENGTH_8_BYTES (8) + + +/* + * rte_memzone is used to allocate physically contiguous virtual memory. + * In this application we allocate a single block and divide between variables + * which require a virtual to physical mapping for use by the QAT driver. + * Virt2phys is only performed during initialisation and not on the data-path. + */ + +#define LCORE_MEMZONE_SIZE (1 << 22) + +struct lcore_memzone +{ + const struct rte_memzone *memzone; + void *next_free_address; +}; + +/* + * Size the qa software response queue. + * Note: Head and Tail are 8 bit, therefore, the queue is + * fixed to 256 entries. + */ +#define CRYPTO_SOFTWARE_QUEUE_SIZE 256 + +struct qa_callbackQueue { + uint8_t head; + uint8_t tail; + uint16_t numEntries; + struct rte_mbuf *qaCallbackRing[CRYPTO_SOFTWARE_QUEUE_SIZE]; +}; + +struct qa_core_conf { + CpaCySymDpSessionCtx *encryptSessionHandleTbl[NUM_CRYPTO][NUM_HMAC]; + CpaCySymDpSessionCtx *decryptSessionHandleTbl[NUM_CRYPTO][NUM_HMAC]; + CpaInstanceHandle instanceHandle; + struct qa_callbackQueue callbackQueue; + uint64_t qaOutstandingRequests; + uint64_t numResponseAttempts; + uint8_t kickFreq; + void *pPacketIV; + CpaPhysicalAddr packetIVPhy; + struct lcore_memzone lcoreMemzone; +} __rte_cache_aligned; + +#define MAX_CORES (RTE_MAX_LCORE) + +static struct qa_core_conf qaCoreConf[MAX_CORES]; + +/* + *Create maximum possible key size, + *One for cipher and one for hash + */ +struct glob_keys { + uint8_t cipher_key[32]; + uint8_t hash_key[64]; + uint8_t iv[16]; +}; + +struct glob_keys g_crypto_hash_keys = { + .cipher_key = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08, + 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10, + 0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18, + 0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20}, + .hash_key = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08, + 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10, + 0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18, + 0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20, + 0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28, + 0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f,0x30, + 0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38, + 0x39,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f,0x50}, + .iv = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08, + 0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10} +}; + +/* + * Offsets from the start of the packet. + * + */ +#define PACKET_DATA_START_PHYS(p) \ + ((p)->buf_physaddr + ((char *)p->pkt.data - (char *)p->buf_addr)) + +/* + * A fixed offset to where the crypto is to be performed, which is the first + * byte after the Ethernet(14 bytes) and IPv4 headers(20 bytes) + */ +#define CRYPTO_START_OFFSET (14+20) +#define HASH_START_OFFSET (14+20) +#define CIPHER_BLOCK_DEFAULT_SIZE (16) +#define HASH_BLOCK_DEFAULT_SIZE (16) + +/* + * Offset to the opdata from the start of the data portion of packet. + * Assumption: The buffer is physically contiguous. + * +18 takes this to the next cache line. + */ + +#define CRYPTO_OFFSET_TO_OPDATA (ETHER_MAX_LEN+18) + +/* + * Default number of requests to place on the hardware ring before kicking the + * ring pointers. + */ +#define CRYPTO_BURST_TX (16) + +/* + * Only call the qa poll function when the number responses in the software + * queue drops below this number. + */ +#define CRYPTO_QUEUED_RESP_POLL_THRESHOLD (32) + +/* + * Limit the number of polls per call to get_next_response. + */ +#define GET_NEXT_RESPONSE_FREQ (32) + +/* + * Max number of responses to pull from the qa in one poll. + */ +#define CRYPTO_MAX_RESPONSE_QUOTA \ + (CRYPTO_SOFTWARE_QUEUE_SIZE-CRYPTO_QUEUED_RESP_POLL_THRESHOLD-1) + +#if (CRYPTO_QUEUED_RESP_POLL_THRESHOLD + CRYPTO_MAX_RESPONSE_QUOTA >= \ + CRYPTO_SOFTWARE_QUEUE_SIZE) +#error Its possible to overflow the qa response Q with current poll and \ + response quota. +#endif + +static void +crypto_callback(CpaCySymDpOpData *pOpData, + __rte_unused CpaStatus status, + __rte_unused CpaBoolean verifyResult) +{ + uint32_t lcore_id; + lcore_id = rte_lcore_id(); + struct qa_callbackQueue *callbackQ = &(qaCoreConf[lcore_id].callbackQueue); + + /* + * Received a completion from the QA hardware. + * Place the response on the return queue. + */ + callbackQ->qaCallbackRing[callbackQ->head] = pOpData->pCallbackTag; + callbackQ->head++; + callbackQ->numEntries++; + qaCoreConf[lcore_id].qaOutstandingRequests--; +} + +static void +qa_crypto_callback(CpaCySymDpOpData *pOpData, CpaStatus status, + CpaBoolean verifyResult) +{ + crypto_callback(pOpData, status, verifyResult); +} + +/* + * Each allocation from a particular memzone lasts for the life-time of + * the application. No freeing of previous allocations will occur. + */ +static void * +alloc_memzone_region(uint32_t length, uint32_t lcore_id) +{ + char *current_free_addr_ptr = NULL; + struct lcore_memzone *lcore_memzone = &(qaCoreConf[lcore_id].lcoreMemzone); + + current_free_addr_ptr = lcore_memzone->next_free_address; + + if (current_free_addr_ptr + length >= + (char *)lcore_memzone->memzone->addr + lcore_memzone->memzone->len) { + printf("Crypto: No memory available in memzone\n"); + return NULL; + } + lcore_memzone->next_free_address = current_free_addr_ptr + length; + + return (void *)current_free_addr_ptr; +} + +/* + * Virtual to Physical Address translation is only executed during initialization + * and not on the data-path. + */ +static CpaPhysicalAddr +qa_v2p(void *ptr) +{ + const struct rte_memzone *memzone = NULL; + uint32_t lcore_id = 0; + RTE_LCORE_FOREACH(lcore_id) { + memzone = qaCoreConf[lcore_id].lcoreMemzone.memzone; + + if ((char*) ptr >= (char *) memzone->addr && + (char*) ptr < ((char*) memzone->addr + memzone->len)) { + return (CpaPhysicalAddr) + (memzone->phys_addr + ((char *) ptr - (char*) memzone->addr)); + } + } + printf("Crypto: Corresponding physical address not found in memzone\n"); + return (CpaPhysicalAddr) 0; +} + +static CpaStatus +getCoreAffinity(Cpa32U *coreAffinity, const CpaInstanceHandle instanceHandle) +{ + CpaInstanceInfo2 info; + Cpa16U i = 0; + CpaStatus status = CPA_STATUS_SUCCESS; + + bzero(&info, sizeof(CpaInstanceInfo2)); + + status = cpaCyInstanceGetInfo2(instanceHandle, &info); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: Error getting instance info\n"); + return CPA_STATUS_FAIL; + } + for (i = 0; i < MAX_CORES; i++) { + if (CPA_BITMAP_BIT_TEST(info.coreAffinity, i)) { + *coreAffinity = i; + return CPA_STATUS_SUCCESS; + } + } + return CPA_STATUS_FAIL; +} + +static CpaStatus +get_crypto_instance_on_core(CpaInstanceHandle *pInstanceHandle, + uint32_t lcore_id) +{ + Cpa16U numInstances = 0, i = 0; + CpaStatus status = CPA_STATUS_FAIL; + CpaInstanceHandle *pLocalInstanceHandles = NULL; + Cpa32U coreAffinity = 0; + + status = cpaCyGetNumInstances(&numInstances); + if (CPA_STATUS_SUCCESS != status || numInstances == 0) { + return CPA_STATUS_FAIL; + } + + pLocalInstanceHandles = rte_malloc("pLocalInstanceHandles", + sizeof(CpaInstanceHandle) * numInstances, CACHE_LINE_SIZE); + + if (NULL == pLocalInstanceHandles) { + return CPA_STATUS_FAIL; + } + status = cpaCyGetInstances(numInstances, pLocalInstanceHandles); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: cpaCyGetInstances failed with status: %"PRId32"\n", status); + rte_free((void *) pLocalInstanceHandles); + return CPA_STATUS_FAIL; + } + + for (i = 0; i < numInstances; i++) { + status = getCoreAffinity(&coreAffinity, pLocalInstanceHandles[i]); + if (CPA_STATUS_SUCCESS != status) { + rte_free((void *) pLocalInstanceHandles); + return CPA_STATUS_FAIL; + } + if (coreAffinity == lcore_id) { + printf("Crypto: instance found on core %d\n", i); + *pInstanceHandle = pLocalInstanceHandles[i]; + return CPA_STATUS_SUCCESS; + } + } + /* core affinity not found */ + rte_free((void *) pLocalInstanceHandles); + return CPA_STATUS_FAIL; +} + +static CpaStatus +initCySymSession(const int pkt_cipher_alg, + const int pkt_hash_alg, const CpaCySymHashMode hashMode, + const CpaCySymCipherDirection crypto_direction, + CpaCySymSessionCtx **ppSessionCtx, + const CpaInstanceHandle cyInstanceHandle, + const uint32_t lcore_id) +{ + Cpa32U sessionCtxSizeInBytes = 0; + CpaStatus status = CPA_STATUS_FAIL; + CpaBoolean isCrypto = CPA_TRUE, isHmac = CPA_TRUE; + CpaCySymSessionSetupData sessionSetupData; + + bzero(&sessionSetupData, sizeof(CpaCySymSessionSetupData)); + + /* Assumption: key length is set to each algorithm's max length */ + switch (pkt_cipher_alg) { + case NO_CIPHER: + isCrypto = CPA_FALSE; + break; + case CIPHER_DES: + sessionSetupData.cipherSetupData.cipherAlgorithm = + CPA_CY_SYM_CIPHER_DES_ECB; + sessionSetupData.cipherSetupData.cipherKeyLenInBytes = + KEY_SIZE_64_IN_BYTES; + break; + case CIPHER_DES_CBC: + sessionSetupData.cipherSetupData.cipherAlgorithm = + CPA_CY_SYM_CIPHER_DES_CBC; + sessionSetupData.cipherSetupData.cipherKeyLenInBytes = + KEY_SIZE_64_IN_BYTES; + break; + case CIPHER_DES3: + sessionSetupData.cipherSetupData.cipherAlgorithm = + CPA_CY_SYM_CIPHER_3DES_ECB; + sessionSetupData.cipherSetupData.cipherKeyLenInBytes = + KEY_SIZE_192_IN_BYTES; + break; + case CIPHER_DES3_CBC: + sessionSetupData.cipherSetupData.cipherAlgorithm = + CPA_CY_SYM_CIPHER_3DES_CBC; + sessionSetupData.cipherSetupData.cipherKeyLenInBytes = + KEY_SIZE_192_IN_BYTES; + break; + case CIPHER_AES: + sessionSetupData.cipherSetupData.cipherAlgorithm = + CPA_CY_SYM_CIPHER_AES_ECB; + sessionSetupData.cipherSetupData.cipherKeyLenInBytes = + KEY_SIZE_128_IN_BYTES; + break; + case CIPHER_AES_CBC_128: + sessionSetupData.cipherSetupData.cipherAlgorithm = + CPA_CY_SYM_CIPHER_AES_CBC; + sessionSetupData.cipherSetupData.cipherKeyLenInBytes = + KEY_SIZE_128_IN_BYTES; + break; + default: + printf("Crypto: Undefined Cipher specified\n"); + break; + } + /* Set the cipher direction */ + if (isCrypto) { + sessionSetupData.cipherSetupData.cipherDirection = crypto_direction; + sessionSetupData.cipherSetupData.pCipherKey = + g_crypto_hash_keys.cipher_key; + sessionSetupData.symOperation = CPA_CY_SYM_OP_CIPHER; + } + + /* Setup Hash common fields */ + switch (pkt_hash_alg) { + case NO_HASH: + isHmac = CPA_FALSE; + break; + case HASH_AES_XCBC: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_XCBC; + sessionSetupData.hashSetupData.digestResultLenInBytes = + AES_XCBC_DIGEST_LENGTH_IN_BYTES; + break; + case HASH_AES_XCBC_96: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_XCBC; + sessionSetupData.hashSetupData.digestResultLenInBytes = + AES_XCBC_96_DIGEST_LENGTH_IN_BYTES; + break; + case HASH_MD5: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5; + sessionSetupData.hashSetupData.digestResultLenInBytes = + MD5_DIGEST_LENGTH_IN_BYTES; + break; + case HASH_SHA1: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1; + sessionSetupData.hashSetupData.digestResultLenInBytes = + SHA1_DIGEST_LENGTH_IN_BYTES; + break; + case HASH_SHA1_96: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1; + sessionSetupData.hashSetupData.digestResultLenInBytes = + SHA1_96_DIGEST_LENGTH_IN_BYTES; + break; + case HASH_SHA224: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA224; + sessionSetupData.hashSetupData.digestResultLenInBytes = + SHA224_DIGEST_LENGTH_IN_BYTES; + break; + case HASH_SHA256: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA256; + sessionSetupData.hashSetupData.digestResultLenInBytes = + SHA256_DIGEST_LENGTH_IN_BYTES; + break; + case HASH_SHA384: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA384; + sessionSetupData.hashSetupData.digestResultLenInBytes = + SHA384_DIGEST_LENGTH_IN_BYTES; + break; + case HASH_SHA512: + sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA512; + sessionSetupData.hashSetupData.digestResultLenInBytes = + SHA512_DIGEST_LENGTH_IN_BYTES; + break; + default: + printf("Crypto: Undefined Hash specified\n"); + break; + } + if (isHmac) { + sessionSetupData.hashSetupData.hashMode = hashMode; + sessionSetupData.symOperation = CPA_CY_SYM_OP_HASH; + /* If using authenticated hash setup key lengths */ + if (CPA_CY_SYM_HASH_MODE_AUTH == hashMode) { + /* Use a common max length key */ + sessionSetupData.hashSetupData.authModeSetupData.authKey = + g_crypto_hash_keys.hash_key; + switch (pkt_hash_alg) { + case HASH_AES_XCBC: + case HASH_AES_XCBC_96: + sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes = + AES_XCBC_AUTH_KEY_LENGTH_IN_BYTES; + break; + case HASH_MD5: + sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes = + SHA1_AUTH_KEY_LENGTH_IN_BYTES; + break; + case HASH_SHA1: + case HASH_SHA1_96: + sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes = + SHA1_AUTH_KEY_LENGTH_IN_BYTES; + break; + case HASH_SHA224: + sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes = + SHA224_AUTH_KEY_LENGTH_IN_BYTES; + break; + case HASH_SHA256: + sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes = + SHA256_AUTH_KEY_LENGTH_IN_BYTES; + break; + case HASH_SHA384: + sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes = + SHA384_AUTH_KEY_LENGTH_IN_BYTES; + break; + case HASH_SHA512: + sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes = + SHA512_AUTH_KEY_LENGTH_IN_BYTES; + break; + default: + printf("Crypto: Undefined Hash specified\n"); + return CPA_STATUS_FAIL; + } + } + } + + /* Only high priority supported */ + sessionSetupData.sessionPriority = CPA_CY_PRIORITY_HIGH; + + /* If chaining algorithms */ + if (isCrypto && isHmac) { + sessionSetupData.symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING; + /* @assumption Alg Chain order is cipher then hash for encrypt + * and hash then cipher then has for decrypt*/ + if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == crypto_direction) { + sessionSetupData.algChainOrder = + CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; + } else { + sessionSetupData.algChainOrder = + CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; + } + } + if (!isCrypto && !isHmac) { + *ppSessionCtx = NULL; + return CPA_STATUS_SUCCESS; + } + + /* Get the session context size based on the crypto and/or hash operations*/ + status = cpaCySymDpSessionCtxGetSize(cyInstanceHandle, &sessionSetupData, + &sessionCtxSizeInBytes); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: cpaCySymDpSessionCtxGetSize error, status: %"PRId32"\n", + status); + return CPA_STATUS_FAIL; + } + + *ppSessionCtx = alloc_memzone_region(sessionCtxSizeInBytes, lcore_id); + if (NULL == *ppSessionCtx) { + printf("Crypto: Failed to allocate memory for Session Context\n"); + return CPA_STATUS_FAIL; + } + + status = cpaCySymDpInitSession(cyInstanceHandle, &sessionSetupData, + CPA_TRUE,CPA_FALSE, *ppSessionCtx); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: cpaCySymDpInitSession failed with status %"PRId32"\n", status); + return CPA_STATUS_FAIL; + } + return CPA_STATUS_SUCCESS; +} + +static CpaStatus +initSessionDataTables(struct qa_core_conf *qaCoreConf,uint32_t lcore_id) +{ + Cpa32U i = 0, j = 0; + CpaStatus status = CPA_STATUS_FAIL; + for (i = 0; i < NUM_CRYPTO; i++) { + for (j = 0; j < NUM_HMAC; j++) { + status = initCySymSession(i, j, CPA_CY_SYM_HASH_MODE_AUTH, + CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT, + &qaCoreConf->encryptSessionHandleTbl[i][j], + qaCoreConf->instanceHandle, + lcore_id); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: Failed to initialize Encrypt sessions\n"); + return CPA_STATUS_FAIL; + } + status = initCySymSession(i, j, CPA_CY_SYM_HASH_MODE_AUTH, + CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT, + &qaCoreConf->decryptSessionHandleTbl[i][j], + qaCoreConf->instanceHandle, + lcore_id); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: Failed to initialize Decrypt sessions\n"); + return CPA_STATUS_FAIL; + } + } + } + return CPA_STATUS_SUCCESS; +} + +int +crypto_init(void) +{ + if (CPA_STATUS_SUCCESS != icp_sal_userStart("SSL")) { + printf("Crypto: Could not start sal for user space\n"); + return CPA_STATUS_FAIL; + } + printf("Crypto: icp_sal_userStart(\"SSL\")\n"); + return 0; +} + +/* + * Per core initialisation + */ +int +per_core_crypto_init(uint32_t lcore_id) +{ + CpaStatus status = CPA_STATUS_FAIL; + char memzone_name[RTE_MEMZONE_NAMESIZE]; + + int socketID = rte_lcore_to_socket_id(lcore_id); + + /* Allocate software ring for response messages. */ + + qaCoreConf[lcore_id].callbackQueue.head = 0; + qaCoreConf[lcore_id].callbackQueue.tail = 0; + qaCoreConf[lcore_id].callbackQueue.numEntries = 0; + qaCoreConf[lcore_id].kickFreq = 0; + qaCoreConf[lcore_id].qaOutstandingRequests = 0; + qaCoreConf[lcore_id].numResponseAttempts = 0; + + /* Initialise and reserve lcore memzone for virt2phys translation */ + rte_snprintf(memzone_name, + RTE_MEMZONE_NAMESIZE, + "lcore_%u", + lcore_id); + + qaCoreConf[lcore_id].lcoreMemzone.memzone = rte_memzone_reserve( + memzone_name, + LCORE_MEMZONE_SIZE, + socketID, + 0); + if (NULL == qaCoreConf[lcore_id].lcoreMemzone.memzone) { + printf("Crypto: Error allocating memzone on lcore %u\n",lcore_id); + return -1; + } + qaCoreConf[lcore_id].lcoreMemzone.next_free_address = + qaCoreConf[lcore_id].lcoreMemzone.memzone->addr; + + qaCoreConf[lcore_id].pPacketIV = alloc_memzone_region(IV_LENGTH_16_BYTES, + lcore_id); + + if (NULL == qaCoreConf[lcore_id].pPacketIV ) { + printf("Crypto: Failed to allocate memory for Initialization Vector\n"); + return -1; + } + + memcpy(qaCoreConf[lcore_id].pPacketIV, &g_crypto_hash_keys.iv, + IV_LENGTH_16_BYTES); + + qaCoreConf[lcore_id].packetIVPhy = qa_v2p(qaCoreConf[lcore_id].pPacketIV); + if (0 == qaCoreConf[lcore_id].packetIVPhy) { + printf("Crypto: Invalid physical address for Initialization Vector\n"); + return -1; + } + + /* + * Obtain the instance handle that is mapped to the current lcore. + * This can fail if an instance is not mapped to a bank which has been + * affinitized to the current lcore. + */ + status = get_crypto_instance_on_core(&(qaCoreConf[lcore_id].instanceHandle), + lcore_id); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: get_crypto_instance_on_core failed with status: %"PRId32"\n", + status); + return -1; + } + + status = cpaCySymDpRegCbFunc(qaCoreConf[lcore_id].instanceHandle, + (CpaCySymDpCbFunc) qa_crypto_callback); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: cpaCySymDpRegCbFunc failed with status: %"PRId32"\n", status); + return -1; + } + + /* + * Set the address translation callback for virtual to physcial address + * mapping. This will be called by the QAT driver during initialisation only. + */ + status = cpaCySetAddressTranslation(qaCoreConf[lcore_id].instanceHandle, + (CpaVirtualToPhysical) qa_v2p); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: cpaCySetAddressTranslation failed with status: %"PRId32"\n", + status); + return -1; + } + + status = initSessionDataTables(&qaCoreConf[lcore_id],lcore_id); + if (CPA_STATUS_SUCCESS != status) { + printf("Crypto: Failed to allocate all session tables."); + return -1; + } + return 0; +} + +static CpaStatus +enqueueOp(CpaCySymDpOpData *opData, uint32_t lcore_id) +{ + + CpaStatus status; + + /* + * Assumption is there is no requirement to do load balancing between + * acceleration units - that is one acceleration unit is tied to a core. + */ + opData->instanceHandle = qaCoreConf[lcore_id].instanceHandle; + + if ((++qaCoreConf[lcore_id].kickFreq) % CRYPTO_BURST_TX == 0) { + status = cpaCySymDpEnqueueOp(opData, CPA_TRUE); + } else { + status = cpaCySymDpEnqueueOp(opData, CPA_FALSE); + } + + qaCoreConf[lcore_id].qaOutstandingRequests++; + + return status; +} + +void +crypto_flush_tx_queue(uint32_t lcore_id) +{ + + cpaCySymDpPerformOpNow(qaCoreConf[lcore_id].instanceHandle); +} + +enum crypto_result +crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h) +{ + CpaCySymDpOpData *opData = + (CpaCySymDpOpData *) ((char *) (rte_buff->pkt.data) + + CRYPTO_OFFSET_TO_OPDATA); + uint32_t lcore_id; + + lcore_id = rte_lcore_id(); + + bzero(opData, sizeof(CpaCySymDpOpData)); + + opData->srcBuffer = opData->dstBuffer = PACKET_DATA_START_PHYS(rte_buff); + opData->srcBufferLen = opData->dstBufferLen = rte_buff->pkt.data_len; + opData->sessionCtx = qaCoreConf[lcore_id].encryptSessionHandleTbl[c][h]; + opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff) + + CRYPTO_OFFSET_TO_OPDATA; + opData->pCallbackTag = rte_buff; + + /* if no crypto or hash operations are specified return fail */ + if (NO_CIPHER == c && NO_HASH == h) + return CRYPTO_RESULT_FAIL; + + if (NO_CIPHER != c) { + opData->pIv = qaCoreConf[lcore_id].pPacketIV; + opData->iv = qaCoreConf[lcore_id].packetIVPhy; + + if (CIPHER_AES_CBC_128 == c) + opData->ivLenInBytes = IV_LENGTH_16_BYTES; + else + opData->ivLenInBytes = IV_LENGTH_8_BYTES; + + opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET; + opData->messageLenToCipherInBytes = rte_buff->pkt.data_len + - CRYPTO_START_OFFSET; + /* + * Work around for padding, message length has to be a multiple of + * block size. + */ + opData->messageLenToCipherInBytes -= opData->messageLenToCipherInBytes + % CIPHER_BLOCK_DEFAULT_SIZE; + } + + if (NO_HASH != h) { + + opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET; + opData->messageLenToHashInBytes = rte_buff->pkt.data_len + - HASH_START_OFFSET; + /* + * Work around for padding, message length has to be a multiple of block + * size. + */ + opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes + % HASH_BLOCK_DEFAULT_SIZE; + + /* + * Assumption: Ok ignore the passed digest pointer and place HMAC at end + * of packet. + */ + opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len; + } + + if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) { + /* + * Failed to place a packet on the hardware queue. + * Most likely because the QA hardware is busy. + */ + return CRYPTO_RESULT_FAIL; + } + return CRYPTO_RESULT_IN_PROGRESS; +} + +enum crypto_result +crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h) +{ + + CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->pkt.data) + + CRYPTO_OFFSET_TO_OPDATA); + uint32_t lcore_id; + + lcore_id = rte_lcore_id(); + + bzero(opData, sizeof(CpaCySymDpOpData)); + + opData->dstBuffer = opData->srcBuffer = PACKET_DATA_START_PHYS(rte_buff); + opData->dstBufferLen = opData->srcBufferLen = rte_buff->pkt.data_len; + opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff) + + CRYPTO_OFFSET_TO_OPDATA; + opData->sessionCtx = qaCoreConf[lcore_id].decryptSessionHandleTbl[c][h]; + opData->pCallbackTag = rte_buff; + + /* if no crypto or hmac operations are specified return fail */ + if (NO_CIPHER == c && NO_HASH == h) + return CRYPTO_RESULT_FAIL; + + if (NO_CIPHER != c) { + opData->pIv = qaCoreConf[lcore_id].pPacketIV; + opData->iv = qaCoreConf[lcore_id].packetIVPhy; + + if (CIPHER_AES_CBC_128 == c) + opData->ivLenInBytes = IV_LENGTH_16_BYTES; + else + opData->ivLenInBytes = IV_LENGTH_8_BYTES; + + opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET; + opData->messageLenToCipherInBytes = rte_buff->pkt.data_len + - CRYPTO_START_OFFSET; + + /* + * Work around for padding, message length has to be a multiple of block + * size. + */ + opData->messageLenToCipherInBytes -= opData->messageLenToCipherInBytes + % CIPHER_BLOCK_DEFAULT_SIZE; + } + if (NO_HASH != h) { + opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET; + opData->messageLenToHashInBytes = rte_buff->pkt.data_len + - HASH_START_OFFSET; + /* + * Work around for padding, message length has to be a multiple of block + * size. + */ + opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes + % HASH_BLOCK_DEFAULT_SIZE; + opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len; + } + + if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) { + /* + * Failed to place a packet on the hardware queue. + * Most likely because the QA hardware is busy. + */ + return CRYPTO_RESULT_FAIL; + } + return CRYPTO_RESULT_IN_PROGRESS; +} + +void * +crypto_get_next_response(void) +{ + uint32_t lcore_id; + lcore_id = rte_lcore_id(); + struct qa_callbackQueue *callbackQ = &(qaCoreConf[lcore_id].callbackQueue); + void *entry = NULL; + + if (callbackQ->numEntries) { + entry = callbackQ->qaCallbackRing[callbackQ->tail]; + callbackQ->tail++; + callbackQ->numEntries--; + } + + /* If there are no outstanding requests no need to poll, return entry */ + if (qaCoreConf[lcore_id].qaOutstandingRequests == 0) + return entry; + + if (callbackQ->numEntries < CRYPTO_QUEUED_RESP_POLL_THRESHOLD + && qaCoreConf[lcore_id].numResponseAttempts++ + % GET_NEXT_RESPONSE_FREQ == 0) { + /* + * Only poll the hardware when there is less than + * CRYPTO_QUEUED_RESP_POLL_THRESHOLD elements in the software queue + */ + icp_sal_CyPollDpInstance(qaCoreConf[lcore_id].instanceHandle, + CRYPTO_MAX_RESPONSE_QUOTA); + } + return entry; +} diff --git a/examples/dpdk_qat/crypto.h b/examples/dpdk_qat/crypto.h new file mode 100644 index 0000000000..13a06ab3ad --- /dev/null +++ b/examples/dpdk_qat/crypto.h @@ -0,0 +1,88 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef CRYPTO_H_ +#define CRYPTO_H_ + +/* Pass Labels/Values to crypto units */ +enum cipher_alg { + /* Option to not do any cryptography */ + NO_CIPHER, + CIPHER_DES, + CIPHER_DES_CBC, + CIPHER_DES3, + CIPHER_DES3_CBC, + CIPHER_AES, + CIPHER_AES_CBC_128, +}; + +enum hash_alg { + /* Option to not do any hash */ + NO_HASH, + HASH_MD5, + HASH_SHA1, + HASH_SHA1_96, + HASH_SHA224, + HASH_SHA256, + HASH_SHA384, + HASH_SHA512, + HASH_AES_XCBC, + HASH_AES_XCBC_96 +}; + +/* Return value from crypto_{encrypt/decrypt} */ +enum crypto_result { + /* Packet was successfully put into crypto queue */ + CRYPTO_RESULT_IN_PROGRESS, + /* Cryptography has failed in some way */ + CRYPTO_RESULT_FAIL, +}; + +extern enum crypto_result crypto_encrypt(struct rte_mbuf *pkt, enum cipher_alg c, + enum hash_alg h); +extern enum crypto_result crypto_decrypt(struct rte_mbuf *pkt, enum cipher_alg c, + enum hash_alg h); + +extern int crypto_init(void); + +extern int per_core_crypto_init(uint32_t lcore_id); + +extern void crypto_exit(void); + +extern void *crypto_get_next_response(void); + +extern void crypto_flush_tx_queue(uint32_t lcore_id); + +#endif /* CRYPTO_H_ */ diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c new file mode 100644 index 0000000000..2a4a0ec1c9 --- /dev/null +++ b/examples/dpdk_qat/main.c @@ -0,0 +1,857 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" +#include "crypto.h" + +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define NB_MBUF (32 * 1024) + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + +#define MAX_PKT_BURST 32 +#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */ + +#define SOCKET0 0 + +#define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF +#define TSC_COUNT_LIMIT 1000 + +#define ACTION_ENCRYPT 1 +#define ACTION_DECRYPT 2 + +/* + * Configurable number of RX/TX ring descriptors + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; +static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + +/* ethernet addresses of ports */ +static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS]; + +/* mask of enabled ports */ +static unsigned enabled_port_mask = 0; +static int promiscuous_on = 1; /**< Ports set in promiscuous mode on by default. */ + +struct mbuf_table { + uint16_t len; + struct rte_mbuf *m_table[MAX_PKT_BURST]; +}; + +struct lcore_rx_queue { + uint8_t port_id; + uint8_t queue_id; +}; + +#define MAX_RX_QUEUE_PER_LCORE 16 + +#define MAX_LCORE_PARAMS 1024 +struct lcore_params { + uint8_t port_id; + uint8_t queue_id; + uint8_t lcore_id; +}; + +static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; +static struct lcore_params lcore_params_array_default[] = { + {0, 0, 2}, + {0, 1, 2}, + {0, 2, 2}, + {1, 0, 2}, + {1, 1, 2}, + {1, 2, 2}, + {2, 0, 2}, + {3, 0, 3}, + {3, 1, 3}, +}; + +static struct lcore_params * lcore_params = lcore_params_array_default; +static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / + sizeof(lcore_params_array_default[0]); + +static struct rte_eth_conf port_conf = { + .rxmode = { + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 1, /**< IP checksum offload enabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IPV4, + }, + }, + .txmode = { + }, +}; + +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES]; + +struct lcore_conf { + uint64_t tsc; + uint64_t tsc_count; + uint32_t tx_mask; + uint16_t n_rx_queue; + uint16_t rx_queue_list_pos; + struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id[RTE_MAX_ETHPORTS]; + struct mbuf_table rx_mbuf; + uint32_t rx_mbuf_pos; + uint32_t rx_curr_queue; + struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS]; +} __rte_cache_aligned; + +static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + +static inline struct rte_mbuf * +nic_rx_get_packet(struct lcore_conf *qconf) +{ + struct rte_mbuf *pkt; + + if (unlikely(qconf->n_rx_queue == 0)) + return NULL; + + /* Look for the next queue with packets; return if none */ + if (unlikely(qconf->rx_mbuf_pos == qconf->rx_mbuf.len)) { + uint32_t i; + + qconf->rx_mbuf_pos = 0; + for (i = 0; i < qconf->n_rx_queue; i++) { + qconf->rx_mbuf.len = rte_eth_rx_burst( + qconf->rx_queue_list[qconf->rx_curr_queue].port_id, + qconf->rx_queue_list[qconf->rx_curr_queue].queue_id, + qconf->rx_mbuf.m_table, MAX_PKT_BURST); + + qconf->rx_curr_queue++; + if (unlikely(qconf->rx_curr_queue == qconf->n_rx_queue)) + qconf->rx_curr_queue = 0; + if (likely(qconf->rx_mbuf.len > 0)) + break; + } + if (unlikely(i == qconf->n_rx_queue)) + return NULL; + } + + /* Get the next packet from the current queue; if last packet, go to next queue */ + pkt = qconf->rx_mbuf.m_table[qconf->rx_mbuf_pos]; + qconf->rx_mbuf_pos++; + + return pkt; +} + +static inline void +nic_tx_flush_queues(struct lcore_conf *qconf) +{ + uint8_t portid; + + for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) { + struct rte_mbuf **m_table = NULL; + uint16_t queueid, len; + uint32_t n, i; + + if (likely((qconf->tx_mask & (1 << portid)) == 0)) + continue; + + len = qconf->tx_mbufs[portid].len; + if (likely(len == 0)) + continue; + + queueid = qconf->tx_queue_id[portid]; + m_table = qconf->tx_mbufs[portid].m_table; + + n = rte_eth_tx_burst(portid, queueid, m_table, len); + for (i = n; i < len; i++){ + rte_pktmbuf_free(m_table[i]); + } + + qconf->tx_mbufs[portid].len = 0; + } + + qconf->tx_mask = TX_QUEUE_FLUSH_MASK; +} + +static inline void +nic_tx_send_packet(struct rte_mbuf *pkt, uint8_t port) +{ + struct lcore_conf *qconf; + uint32_t lcoreid; + uint16_t len; + + if (unlikely(pkt == NULL)) { + return; + } + + lcoreid = rte_lcore_id(); + qconf = &lcore_conf[lcoreid]; + + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = pkt; + len++; + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + uint32_t n, i; + uint16_t queueid; + + queueid = qconf->tx_queue_id[port]; + n = rte_eth_tx_burst(port, queueid, qconf->tx_mbufs[port].m_table, MAX_PKT_BURST); + for (i = n; i < MAX_PKT_BURST; i++){ + rte_pktmbuf_free(qconf->tx_mbufs[port].m_table[i]); + } + + qconf->tx_mask &= ~(1 << port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; +} + +static inline uint8_t +get_output_port(uint8_t input_port) +{ + return (uint8_t)(input_port ^ 1); +} + +/* main processing loop */ +static __attribute__((noreturn)) int +main_loop(__attribute__((unused)) void *dummy) +{ + uint32_t lcoreid; + struct lcore_conf *qconf; + + lcoreid = rte_lcore_id(); + qconf = &lcore_conf[lcoreid]; + + printf("Thread %u starting...\n", lcoreid); + + for (;;) { + struct rte_mbuf *pkt; + uint32_t pkt_from_nic_rx = 0; + uint8_t port; + + /* Flush TX queues */ + qconf->tsc_count++; + if (unlikely(qconf->tsc_count == TSC_COUNT_LIMIT)) { + uint64_t tsc, diff_tsc; + + tsc = rte_rdtsc(); + + diff_tsc = tsc - qconf->tsc; + if (unlikely(diff_tsc > BURST_TX_DRAIN)) { + nic_tx_flush_queues(qconf); + crypto_flush_tx_queue(lcoreid); + qconf->tsc = tsc; + } + + qconf->tsc_count = 0; + } + + /* + * Check the Intel QuickAssist queues first + * + ***/ + pkt = (struct rte_mbuf *) crypto_get_next_response(); + if (pkt == NULL) { + pkt = nic_rx_get_packet(qconf); + pkt_from_nic_rx = 1; + } + if (pkt == NULL) + continue; + /* Send packet to either QAT encrypt, QAT decrypt or NIC TX */ + if (pkt_from_nic_rx) { + struct ipv4_hdr *ip = (struct ipv4_hdr *) (rte_pktmbuf_mtod(pkt, unsigned char *) + + sizeof(struct ether_hdr)); + if (ip->src_addr & rte_cpu_to_be_32(ACTION_ENCRYPT)) { + if (CRYPTO_RESULT_FAIL == crypto_encrypt(pkt, + (enum cipher_alg)((ip->src_addr >> 16) & 0xFF), + (enum hash_alg)((ip->src_addr >> 8) & 0xFF))) + rte_pktmbuf_free(pkt); + continue; + } + + if (ip->src_addr & rte_cpu_to_be_32(ACTION_DECRYPT)) { + if(CRYPTO_RESULT_FAIL == crypto_decrypt(pkt, + (enum cipher_alg)((ip->src_addr >> 16) & 0xFF), + (enum hash_alg)((ip->src_addr >> 8) & 0xFF))) + rte_pktmbuf_free(pkt); + continue; + } + } + + port = get_output_port(pkt->pkt.in_port); + + /* Transmit the packet */ + nic_tx_send_packet(pkt, port); + } +} + +static inline unsigned +get_port_max_rx_queues(uint8_t port_id) +{ + struct rte_eth_dev_info dev_info; + + rte_eth_dev_info_get(port_id, &dev_info); + return dev_info.max_rx_queues; +} + +static inline unsigned +get_port_max_tx_queues(uint8_t port_id) +{ + struct rte_eth_dev_info dev_info; + + rte_eth_dev_info_get(port_id, &dev_info); + return dev_info.max_tx_queues; +} + +static int +check_lcore_params(void) +{ + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + if (lcore_params[i].queue_id >= get_port_max_rx_queues(lcore_params[i].port_id)) { + printf("invalid queue number: %hhu\n", lcore_params[i].queue_id); + return -1; + } + if (!rte_lcore_is_enabled(lcore_params[i].lcore_id)) { + printf("error: lcore %hhu is not enabled in lcore mask\n", + lcore_params[i].lcore_id); + return -1; + } + } + return 0; +} + +static int +check_port_config(const unsigned nb_ports) +{ + unsigned portid; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + portid = lcore_params[i].port_id; + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("port %u is not enabled in port mask\n", portid); + return -1; + } + if (portid >= nb_ports) { + printf("port %u is not present on the board\n", portid); + return -1; + } + } + return 0; +} + +static uint8_t +get_port_n_rx_queues(const uint8_t port) +{ + int queue = -1; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue) + queue = lcore_params[i].queue_id; + } + return (uint8_t)(++queue); +} + +static int +init_lcore_rx_queues(void) +{ + uint16_t i, nb_rx_queue; + uint8_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + nb_rx_queue = lcore_conf[lcore].n_rx_queue; + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("error: too many queues (%u) for lcore: %u\n", + (unsigned)nb_rx_queue + 1, (unsigned)lcore); + return -1; + } + lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = + lcore_params[i].port_id; + lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = + lcore_params[i].queue_id; + lcore_conf[lcore].n_rx_queue++; + } + return 0; +} + +/* display usage */ +static void +print_usage(const char *prgname) +{ + printf ("%s [EAL options] -- -p PORTMASK [--no-promisc]" + " [--config (port,queue,lcore)[,(port,queue,lcore]]\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " --no-promisc: disable promiscuous mode (default is ON)\n" + " --config (port,queue,lcore): rx queues configuration\n", + prgname); +} + +static unsigned +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return 0; + + return pm; +} + +static int +parse_config(const char *q_arg) +{ + char s[256]; + const char *p, *p_end = q_arg; + char *end; + enum fieldnames { + FLD_PORT = 0, + FLD_QUEUE, + FLD_LCORE, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + int i; + unsigned size; + + nb_lcore_params = 0; + + while ((p = strchr(p_end,'(')) != NULL) { + if (nb_lcore_params >= MAX_LCORE_PARAMS) { + printf("exceeded max number of lcore params: %hu\n", + nb_lcore_params); + return -1; + } + ++p; + if((p_end = strchr(p,')')) == NULL) + return -1; + + size = p_end - p; + if(size >= sizeof(s)) + return -1; + + rte_snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++) { + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; + return 0; +} + +/* Parse the argument given in the command line of the application */ +static int +parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {"config", 1, 0, 0}, + {"no-promisc", 0, 0, 0}, + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* portmask */ + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask == 0) { + printf("invalid portmask\n"); + print_usage(prgname); + return -1; + } + break; + + /* long options */ + case 0: + if (strcmp(lgopts[option_index].name, "config") == 0) { + ret = parse_config(optarg); + if (ret) { + printf("invalid config\n"); + print_usage(prgname); + return -1; + } + } + if (strcmp(lgopts[option_index].name, "no-promisc") == 0) { + printf("Promiscuous mode disabled\n"); + promiscuous_on = 0; + } + break; + default: + print_usage(prgname); + return -1; + } + } + + if (enabled_port_mask == 0) { + printf("portmask not specified\n"); + print_usage(prgname); + return -1; + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +static void +print_ethaddr(const char *name, const struct ether_addr *eth_addr) +{ + printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name, + eth_addr->addr_bytes[0], + eth_addr->addr_bytes[1], + eth_addr->addr_bytes[2], + eth_addr->addr_bytes[3], + eth_addr->addr_bytes[4], + eth_addr->addr_bytes[5]); +} + +static int +init_mem(void) +{ + const unsigned flags = 0; + int socketid; + unsigned lcoreid; + char s[64]; + + RTE_LCORE_FOREACH(lcoreid) { + socketid = rte_lcore_to_socket_id(lcoreid); + if (socketid >= RTE_MAX_NUMA_NODES) { + printf("Socket %d of lcore %u is out of range %d\n", + socketid, lcoreid, RTE_MAX_NUMA_NODES); + return -1; + } + if (pktmbuf_pool[socketid] == NULL) { + rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); + pktmbuf_pool[socketid] = + rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + socketid, flags); + if (pktmbuf_pool[socketid] == NULL) { + printf("Cannot init mbuf pool on socket %d\n", socketid); + return -1; + } + printf("Allocated mbuf pool on socket %d\n", socketid); + } + } + return 0; +} + +int +MAIN(int argc, char **argv) +{ + struct lcore_conf *qconf; + struct rte_eth_link link; + int ret; + unsigned nb_ports; + uint16_t queueid; + unsigned lcoreid; + uint32_t nb_tx_queue; + uint8_t portid, nb_rx_queue, queue, socketid; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + return -1; + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv); + if (ret < 0) + return -1; + + /* init driver */ +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_panic("Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_panic("Cannot init ixgbe pmd\n"); +#endif + + if (rte_eal_pci_probe() < 0) + rte_panic("Cannot probe PCI\n"); + + if (check_lcore_params() < 0) + rte_panic("check_lcore_params failed\n"); + + ret = init_lcore_rx_queues(); + if (ret < 0) + return -1; + + ret = init_mem(); + if (ret < 0) + return -1; + + nb_ports = rte_eth_dev_count(); + if (nb_ports > RTE_MAX_ETHPORTS) + nb_ports = RTE_MAX_ETHPORTS; + + if (check_port_config(nb_ports) < 0) + rte_panic("check_port_config failed\n"); + + /* initialize all ports */ + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("\nSkipping disabled port %d\n", portid); + continue; + } + + /* init port */ + printf("Initializing port %d ... ", portid ); + fflush(stdout); + + nb_rx_queue = get_port_n_rx_queues(portid); + if (nb_rx_queue > get_port_max_rx_queues(portid)) + rte_panic("Number of rx queues %d exceeds max number of rx queues %u" + " for port %d\n", nb_rx_queue, get_port_max_rx_queues(portid), + portid); + nb_tx_queue = rte_lcore_count(); + if (nb_tx_queue > get_port_max_tx_queues(portid)) + rte_panic("Number of lcores %u exceeds max number of tx queues %u" + " for port %d\n", nb_tx_queue, get_port_max_tx_queues(portid), + portid); + printf("Creating queues: nb_rxq=%d nb_txq=%u... ", + nb_rx_queue, (unsigned)nb_tx_queue ); + ret = rte_eth_dev_configure(portid, nb_rx_queue, + (uint16_t)nb_tx_queue, &port_conf); + if (ret < 0) + rte_panic("Cannot configure device: err=%d, port=%d\n", + ret, portid); + + rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); + print_ethaddr(" Address:", &ports_eth_addr[portid]); + printf(", "); + + /* init one TX queue per couple (lcore,port) */ + queueid = 0; + RTE_LCORE_FOREACH(lcoreid) { + socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid); + printf("txq=%u,%d,%d ", lcoreid, queueid, socketid); + fflush(stdout); + ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, + socketid, &tx_conf); + if (ret < 0) + rte_panic("rte_eth_tx_queue_setup: err=%d, " + "port=%d\n", ret, portid); + + qconf = &lcore_conf[lcoreid]; + qconf->tx_queue_id[portid] = queueid; + queueid++; + } + printf("\n"); + } + + RTE_LCORE_FOREACH(lcoreid) { + qconf = &lcore_conf[lcoreid]; + printf("\nInitializing rx queues on lcore %u ... ", lcoreid ); + fflush(stdout); + /* init RX queues */ + for(queue = 0; queue < qconf->n_rx_queue; ++queue) { + portid = qconf->rx_queue_list[queue].port_id; + queueid = qconf->rx_queue_list[queue].queue_id; + socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid); + printf("rxq=%d,%d,%d ", portid, queueid, socketid); + fflush(stdout); + + ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, + socketid, &rx_conf, pktmbuf_pool[socketid]); + if (ret < 0) + rte_panic("rte_eth_rx_queue_setup: err=%d," + "port=%d\n", ret, portid); + } + } + + printf("\n"); + + /* start ports */ + for (portid = 0; portid < nb_ports; portid++) { + if ((enabled_port_mask & (1 << portid)) == 0) + continue; + /* Start device */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_panic("rte_eth_dev_start: err=%d, port=%d\n", + ret, portid); + + printf("done: Port %d ", portid); + + /* get link status */ + rte_eth_link_get(portid, &link); + if (link.link_status) + printf(" Link Up - speed %u Mbps - %s\n", + (unsigned) link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + else + printf(" Link Down\n"); + /* + * If enabled, put device in promiscuous mode. + * This allows IO forwarding mode to forward packets + * to itself through 2 cross-connected ports of the + * target machine. + */ + if (promiscuous_on) + rte_eth_promiscuous_enable(portid); + } + printf("Crypto: Initializing Crypto...\n"); + if (crypto_init() != 0) + return -1; + + RTE_LCORE_FOREACH(lcoreid) { + if (per_core_crypto_init(lcoreid) != 0) { + printf("Crypto: Cannot init lcore crypto on lcore %u\n", (unsigned)lcoreid); + return -1; + } + } + printf("Crypto: Initialization complete\n"); + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcoreid) { + if (rte_eal_wait_lcore(lcoreid) < 0) + return -1; + } + + return 0; +} diff --git a/examples/dpdk_qat/main.h b/examples/dpdk_qat/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/dpdk_qat/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/exception_path/482248_ExceptionPath_Sample_App_Guide_Rev1.1.pdf b/examples/exception_path/482248_ExceptionPath_Sample_App_Guide_Rev1.1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aa646d9b250a0e7b18b2aff6a50b95de4824571a GIT binary patch literal 77734 zcma&Mb95!jw=Epo9oxo^la6g09h)87wr$(CZ9D1McG5{+pEus`-uJ$7&i&5VW7nu% zf7P6O&9$oLDl&OtQ94F?Hh8kV{h0-L7-nWxLI9zyfdxD_H-o6Tm7}pegQ%6hqp`5D zp{a-g|xNpn;jR00cyaenb)Ad|9>7U@A{>((It}uOp4%nz;!Z&6LNc$Wc zYg7n|8Cs!G@_WZu^y(0gjW*+MxD=U<$&&L1cCINg3H9k_)XmV@b%9Xil*aFo>`M=y3mO*WcIbQwmmZxG zBDO?AX&H$lD?`uiZDC4GSuRbiiM-Qxno-XtY72v|3P+9(!-74?jzi1o7TM>;FHQ4I zmryZ~Ude|Dru*)SQ`O=37R~II6^EYctk(G2BPMkQ;em*CX)rNl%T7sf!e1JMdC zFsXry^Vk>h55K;51Pzc9U}}C3R})ykeskAjmc+iZTT0=x27xbNM31`(&t+xL)b>tA zG7*3pi9?d@jz-dbIzK%CS8au_M4Il7LH{x5Vo%Z;f#aTqBUv@wBxVCOi`zLb7gbU- z$H`xvG$}FFDX0oG{fn4e*cDuTA-NO43-rd*se-?p)vvbX7K(})M45@z|D=56SZ4N#{!p|yDf#*F#*Fp z=*=CA59bIrtp#FBr?7yXn{a?_;$owCkI642&#Pm-zG1UW9 z0rG%)fC7L`t+sB?Wtw4%Mxl1`d0Lt(8Uh-J5^U%!L>ayC43f0C1bt&opSm85N)Z5- zpa&0ggoF$VWekx=I35{Em`F8v;5(SFoU0+KX9||54u+%$G;d8_{>BLv!*DT@IBQlPHL$Wwlj(VCMK5~=k!7#=Vw9xMWFQ*&wDF-n}wZm**FfqLf2 zk}EJ}`@Uu3(Lan-KazGWfviur(~EUmxu4;SL-44~#` zy#qJ2DOVyHr*9OpUbC-joKaD5(J001n#HS1hwsT+2b6$ul%U9efliyNirYaxF~Xsj z1$}Iw<8GHSAIbRw1Cg0YJNzwqod1G67WV&;$Ii*|hdj1_`u{6c&TTf>FunY{Kz*85 zH-&zb!peiyZnYw$OCguY)Y0;ph1CVhr^G&BKc9*z+E|xttTzP^>U5vyxpvC|M?sMR zM?p_q4njq2WyZ4DIY|1?!ZM*0Vv#WD?(NaF?JL4|aF76_8yYNX>e-2&z+&%NEWqOO zrLVvaMDed6E{lmRJM}lEI#u;~eB)B!ORm}F3*s`=SG6+jsx&dxKxdonvBqgTs6@{B z+E1w;4s)Dk(fW{oPUIs^1uCV9I^}`q0^s{+))%#i|-wpmxcIbPa zvwG6Nj+`LCOsc{8dcMppeLQq2v-p1w6!DcK4d2enyZtC(MX zLOcy^QWVKh7MAL`u^?y|Hr=o*d=5^#_h1$oR=JFB7`ltPVbJ!v3t95^vkoS~@x#4M z=p#A?#{l#+;}BP>B=km{7Vv#t09j^K?)5FQ$O;Q?PT<*1?N#J#6AdD*bs_LO1dm$& zBh?$9x7|5WA84c5S$@I~uGVC!Fg`nYszwM~TJfn3lpH|AE~@tkZkM)YWnyg)WDqoO zYeFl-r{@YDNU}|b(BM!PACDfak@l&-5{#=DA~*VGI&Z1d2} zXV=X>fV1+wuDp%A13N1b-O8G+;1#{?Qg~S7BxA8@YhvSv+-k#CjZOQ6r{hBk5GWA$ zZ^XgBQy&Z9U#O3f^}n(&3;X}hUH@37Dwn1#HUUULztqwUJ>pCY)8fGTV%es2+@b1h zC-CE|5Xp@3*r*F_cj{Jb>?lzU2K>E07`dLrGlBe=jX(r)QTd@f8;6DD5m9`rXsq}$ zP-pc>f=;YaiQ;@oN6n&$Gz4V&68WJ+^PBKTEgO%0{dw}>2CeQvNc)Ci3b!|*P{RZg zMEroOtIOp+UbS4d3q(Rg&Wb%NW(i5c%k&bqmhy(Z}qF~3JJ<&i}2uN@(%+~3#z zcE2jG*^!e|;d-c+E9qW*^e~HuytiS(rLIAC~6k78>e?Gsy!9mpRC z61H8i7kACjzf4_(W!x|lSsG0@(hAp^_>&!?{gm{EKHm{cY&|RZERn{}%39KzV=i$Z zs`d|vnXo|4TT{{8g+P!>xt%$0Xn5PIVfdA?AbZh~K`nn#`Kb%;edY6TeDu1z3Lk(G zq{mL*@AX4XS7)M{3ei}7jn#LaJ!XKC^ zQ!dA!tB%IHTU(o|*3X&ufx1D4 z9ib>i;?PJ?X5JS?LV9k@+=bP_`9WwLaIxK9=E@r*f3imaBs@{?bia2@NWJHg@ofGn zvfS%5107XC)6#U0K?((aC4~ssuofDwm08JX0JLlz4kHim%X9MWhvnz6cTtNQJ#IlF z!GxT~{N?w<$5xM8cM_o=k*?nM8871%3*;ozZ>kiuLhH-QN+!wthQja9@7bgsRTsV3 zKQz|iRkkI!m!c;Wixb^?C(8PG`P?FJ|Eh12u0qIvOBB<;Ad2-rJTWrvsDb!TSE!bMYa*xLt#6a+#efQCX#JhXcoN4H*l zIWbKDf(HQ_ZkgC4U2?I(%9}JNW{@rE7fA$B2iCV#s@aqh>W}v3a|d=0_Uv)-jA)t* zM!?91Vn4`v)-Y$Y(h|ZXJmNfowh>}kS-Kd+yR6^KyCEW(Gq^y7ZZbfmO{`A0X3#KL z0eGYLY(aQDWw#%%H-4$e5T{u!zbyfl9_P(zQDbKm9T1j)k9UZ+Yb`_RD9^~{FAOfb z*~$Uc;$z5mnF!I)eK+Ya%{a+;19W7j17#*LX0F7Zf_eK-uYE2DIi?9dIALsLcSWr= zs+vc(5r9Z^yDvz$0B5nK=+d2eKa9+-q%3S5h3+rjI?w3zm*E@ubY$RdJpY^-kEyoo zPb)rr$)8Y55AlKKhZSH>BDjv*(W+Y^2Bbn*O4&0l)&`BP7)-zkm$Ru})qo%wHQjBNiiG%f^RLOA}Kc^v+}9X)kr?WkHTNW;}g z%*$W5ba9tW*G*2)DC??@x1lW^%{+y>W&9IHCWeDI^CxBnMxr5BkzGdg)htBw@`f@XGF-g%2gi%Mf^S6-x%{BJ_g7lZt|EC1@ z{|gE9{{tz0$|@352OG~iaq=WqCJC+qB=Nf%TlS!z$o9v!$B=bG8&DJzvyWKPburf! z{VZ|uHVlD|X4LDS2*k1`+xuHg|L)HDU&kJH!2gcHf4p`?s%Y740FVw8Ua)*vj^a_t zfCYTnZ07ULEjikJf0b6x4l)`Y&f%AfSutxXv5`%fg9>YRo45pbSPH39n^VRuBg&p7 z0*dDkpogW`H*1dP%`45x7H9{=nWR2S$*2zDNy4NN1|;V_S>lW`19nL@O7sik;}>g; z;VJjZCoC*nax!Npo*$j~N8fY1s79?@ z-qdwK@Oj@AdD?IQ_|njwPJzvsyQU**xw~Q* zMTJ1AK)rPsAMPL_(k6UW`-?J4_7@uz?>9s!<2LvWib1}3Lhs+1_2OloJ0UBdAffPv zR-W2+DDSW7J@?7(6jgVZ-yk>jb=uDtz;sMQS4td*a6dCPVqd0Bg=o6NhS=?h7H9;X z$~8j>d+fKCUsN%3o`l7v1-abbm1pcM{f;ZrP=Otk794H^z*9JOJ^B{}ER2%26e6^= zQo$_Vk_2Vj1r&0dCEl^GY?1bU>p_sEMKP5!pa#LE|GL8Euy&xyXJZ>s@14NwuK3Vo zTeiBV#IZp06KmI9b?2Y;`03uS2_KWGRws7#u4;>SrnC3d_}vgf=2zntaa_K3TWfnf zM}d2hGjuF?3mbIDo^EjKjCAYj(WVAXa^1E|C|}EY#hUy@pNCu}))$By;$?v+)X?SO z8#EsS2pmu<&&J!|D&*gI8f^c4zT;r}BYVz&D)5yFeSdix!al}dz3WZREB;7k_VkIf zTBCi9`40x*8$nOk6|N$P@O0l#Sj@Xz`)T#90|vLLDJ>)e1Ey; zZs4L(+U8=cUbY*iFLP8)q=A?ee3Q@3_fpVJjF42I`@Z=wk9X|IQ#jKwAte9;?|6#8 zfBx>u?uC6k35LyfeubLyz}PcLcFd5_7;Oo*!sGM9koOb)`5pI3KV32UtO!ZUc@>2n z`d3qM&laZVYG78_(#SHtmYda9oyy2Eu9lq@y%oK||MSW3k^E&sHhQBA&DWcU2T%q^ z&OlyTLTy3&2mid2Uh;VTJcgE%5 zVErrQ{_zk0!Q<~t6R^c?Lk{|qBw$Tpj4-Mb*Q@Il?ZXyr^g}v`9b64Sq6r47K{%K3 z_UYbxm~>pYLZ>CQTvdFexm&8Rs2|0WGE+(w?oVXuF^W%pC9(?_*^DbrEH^L`74MUe zya<2|7Jp^uIhzD-8?#p~JdW+f`#pGcJ8db(3%V)YwgidHI@T%1Z-U}k$D zU}j?=azYpov5VCp`Z>@d#rs6VXTv!NM)0RA-D&_zjso;eSRf`f!ioUL! zDl9aU>!H;E3_Gx+crUogp;!UQwa#hj(JuMeBb`!^2Df7HBWjyoZJq@X2Ud1Hn?A^a z&G;Cu%IT!<;tlMHJdOFyO$CH zZLxmx@^ti(n~U0*+UqpKIEod2b}|H?>M?{pnWS9Ux2cVZdWIHJ$!z^+QOkiumh2|k zD=#tE#t1G@%p|!xL9j2^Anpu}})G|O!L z4i+T6n1u_I+}(Top080XLdB&Xe^>DvgNMb`qbbLwC!d{$F4E8~HUPnDzvg#0iGp&v zT6PiBs>HSm{+c!qqa44lS8(09j90|aK%-oa zN-3;fImeRBK!Rp;fnMsd%`IAF1qfJ3%~4Quk-H`#^qQhq*;HPU`#Ef{$zw`3s^v?)b=TAt zo21Kq^jOwh{tuj0C*RDK80W&EfXtK_5}CDpdmSE%Ks@T>A!d?S!s8}pl0(AdA_u{I z=A6Zzg#Aua8APU8lS!`c6 zab#lA)Z<--kRWFf$73M?b5nx&{5^hPc)fdm<3Nf=J)Tv)-uZ#weYbaoaQK^@69K;7 zp`(8`?b=I3QhnkpB)Tq!fR*%Tw+CHJlCSl3*<$MAn{d}Lmuxy2XX8}IC#NSX;hIz> znE{p~s{^z_4i~V)CMos9gRfS-tC;sg_u+H0$2ccwB*UI)T?T`x*M%675do7a^E9H7 zud41tp~xZJ#1-raZmzb`LipN3WPfHZ;X?R26&wd~p>e_7oqi=mG18GN2YqC6^RbX2 zO_LalT#7ji#V)+>g>TUZ#}Ox4GkoTqBpU`lqk}X5Hn?f)xVO zud>GrxYpV)zRie}ZoH_3o976x_Kl0kSd8BcOUpWYhz#(Q4Dha0K?`LMd~RQxhNl5q z5%wc0>DKS#(-3o6Jh9FkLL078-l^{tzpIRMaci}0S~it?o4arci?!lBu$(>XclQf+ zSQ7PF8P9`bNL;kwAwMv-iRMtOW~$1pt%62xRhlzDyWV5Zm)3eTWN>74z1lAuI??WE zYK-$j7CdT@bnk1|8FEdHJf+86vgQCvtF*41 zVqR+g6!47(Q_jCD;H>|~Q)L5i{>SrSWB#l2$NrBEp+;4yXl+KQP8fDf&?7^0mWoWm z3eQBG$oh#7r3djW?`*aXtMyK`VqXxxmPnC+KuB{sX`KjpdRocJCfPlW3S`qhS*9~i z@y2CR=%IFg15yyCA$)zs5q#wNAf9}dqHGG83NKOST?*OYQIN@Sx%f%TFJdh;!`PG@ zVy+66(FWC`($()&aBgJ^!aQ7u{GRZVdifHk_7kiady@iE7RH)~>_Mu8SHfKZ=io5- z4bHiH$Un0Xs6+*TL7~T`F@q`PgE^5Ql(o~t7$Y@;dnsK&8xWANQ=1Y+KPrQBbM@$& z6tHIEH+s2g82)@29|EgTT>v=q7>NVu#=dJbjF1f&4FPKv5eOzV?sK5g)R_79gj99_ zv3?+M(?eW6*~K5H_QRb{iE2&X5If6rfR7H1ech^lpJ$$%$O%bz#^an(WL@cc+p(pubctF zr)vshP9x_ZJvlRJI0)J8J8%FVvVBoA+H zUK-3O|2fLP1}e-h{{l{c+E3eOv*<~P=&Onry!4NXk`FaGoWtkr4JYEez`akw3p#V3 z!s?B?J=&GtwU@IKO+u}C#O_ERFb4}Eo;0rGcG#KeZL#G^gzcD7Hm-AZhcIh>x)vAE z#$!AT-I%%DLE{_rLDUjDWwKfPai? zCN{ReI<;(UiU4N$zdE&&w6abUGc06^|#0Lj0R z=>KfV{-2jKY%I*o|MyFoNUaTL;;|&J6}7<(=B`58yX-Yv)O^X+_*me!_T@@9er?iC zC_qZ`;ZCiOfzA<*>9vC_X)gUngS!(A3G?3OwZO>L>)yhO{zbM6h8vb~63rQt1sVNt zk1_I6(#ACX5r$7!=vMz)a{c7NyGq0E#`V@&{^9udFwHU=1MiA&ul|TR-#hwcoDp{R z8l`_ECX1-HmyT9Y+jWeElai9|*3diC{r(2(jg02BJKh>v-szm6_Il%Phi0mfj(SBs zOyH8Vs1Ofoly-bYJ=6i$IHZ85W@B%cM6RHzOb@Zv-ZE~OQpn-IC4qcsw|i4UZQSCc zAcFQud+3@?${mYA)q9N0=1(ZtBe_s&V_Y4rDMH`6M`^oMSg}OxhITU2OiXQvq2ihV zy*eg>cIY%^TtSJ~oFp->#Ay@(M(&wG$A0TEo9#Ys`C?=*bjn`8F}(x z?~r(Cn1$klb@1IiPEgVOs2*3c0*FOcGR1FXaa2l4j9igurw3?41IB0-;i{e`Jdg1X zJ*6Hb!WTz~RRX$?j03i<@obSyLKzs3kTh+SyT-sKJdc->jvVKLC9~`p`AFCKSh01mdQuw0vR#gFn&Kp8-M>2Gq0ppiHu! zITDSH>;P79m|PK8>DVnhl_vWIDiRT0L$FlCO;(8p4eX3&(RdfbR|~m_jtaIEc2E@j z<8y#1Af=pj@o_{YQpJ7Ky?ZiWI!j#jfe;G+n3}ug3~pQZbtDXUF$q%6@3%OzUFLuZ za+V&Kh;zQ%I5UXGFT_$kb=IMj*%e2|woyH)B;BKO+;n^c^vyhOHj zl-BWEis3TNt?yE!svraP;KDGiWx9&5i)>&DtTadz+165oe(!2OkC@0BTkmGR>4C$y zmlnX$1O5If?-hJoB1xqtrXUaq$i_`CC5lTOHlzV)?Z?$oN$!FH0s<3-?P$-+kc%_{ zRw!{m=zIL(Gi^wO5oG8h`jE_FI52Ohr(%3nMCzsG(R&Z2RDq-E97rlitJOfAPcw{@ z!JF@7k>!(MPJ*CsQ{N@e_fe2j5{C%c{L+!|DztM5Zv{cf)%Mccxj_nNNZDOc2C(uQHTfL*kS&kHw1qtpZ3w8MMJ&EzXzIhwg0rtQYvjO3 z(t~wd*|LWD{w!rfQBp3e@=Xrx?-)0+D-5Lxq$r_?B^nF(4a_=4b1~tV!^>LuGcQ&S z;Qn3w+eu&$<9(NoFconuH^Z&;{sFUeI<;`4klvn>X1H9F{howmlw_6QPoZKIx>WEJ z5k`AI-g|L4u1DSYRiD=+g!G{i1`#(;`iO*k?J&KRDPrRnN#t=B52-w&n9UrexpBmU zltpi)ICiney4eqHQh2F$$uczDV%$?u-}JfrdeS83G#|2IJq2q;HsH} zZmXo?1<`W@P`D1su?l*5hCSLnFX?jud^M-@2FjlCbOt*+PuchfBenRF5e8#B&*k=! z>+(^xxp0*#Hw&5}5Y9#X9$sdSY z=)m;l>GghczwG>cNI@(;OI#`+fsmG;6c-*37(A*Td9U+Ok5<#)jZ<~ydSAj%A(Bkv zP!AE0m#B5is(QMLiRkKpiAw=l`PeXOUSn9ccR$1*e#coYO@iJrjd zToXtoyrLdY#$*6ti}xczcojo@5cC3`6e6Z$ZP|;5wwQT0#!01*zoa8!kvULOg~P(O zIf9Hd6_OshM&1mvibKKkvzB-gI)6!=R~(8!azfd>+i*^A1_9jIY_jv%Za^e)p2J)j zvRT1g(1LOVWCgm0@LjJfJ>d6}H>0XGK3xDrY_I)1F=u@uJ1rWF3sC7OpwF{8tBRc2 z#mChv-P*^Vl3Ly*0S6eT;akGwAY>}m9Wf4RDgIOzzw$>x;~3froalU$9^v11;Ud9wz*v8Jg-@iL1ft2K{!#><3_qhICt>Y=76P3PA=v@copJRG2Ru5P{zT`yfI78c zJTUe8^G*_r#^co`p*S#NOUq_h0iMSPKMPJO% zsJ&PTOshtqf?zZrR(yf-5O{~z3ZPd%Rt69ZYmpX6&5*ZptO*oA_6I^M7mG1uY4%8H z;G+5^k1lKJ&~?iS?R|r`bO$z<^iFsh7c)igJ8Bi}jlW7QA+H9lg5C4IeBjyJdamXK zs5y2&W`)azGlexqC@0F|)?{|W9{jk=XwM#dx3Aipo$I@5Ovr(3Jgh$+ zG(4+KeceftR$ZPiA&AP!5D3HB>)U;<-09ar9@3Z5lt!NQ@mKAzy|7&Osy&4E`?ni~ zdM~%a=ztrU6`}Y&OYd&;Ogv+)jel`E5(J)tiVXATO?tt8qgI|>WlOq*CZ3@bz= zG$*$5*u#=;5*t!Jbw(3>!;b<6=KIqWhps8haq9fCVGQbMu)F((xBgnrV69VpNb6d`Tw!W4hTnktc@Om=ApI*eeIi+q%1!dhonnDHTlx zqJ$y)9bWSK&d@OFxQQ)yFKGKBiT_T^h*k$Cdk(z5noNKo$!RI5aT0ji&Phel6MiiO*d& zf^sPlb=YyBriay#7mH^_2^SW!i-;p)YXQq3`E(#MyZ_;7^*)=f=LJYN9q)Iq_IYMt zUguUG9+pO$(o>g<{{|TncIlMx*oQ=&Ugp1ry|~r)>q5xvab8A-19t5~ z0woat;BJd@Q&UyF5^lh;?vbn=DCYwH#Y0_9@kxRKT7rCJFZa|yQxyk_+jZEaIB5i( z5#QYxxkgRd@YHU(PZwdsrDeOl%v$6b1Gu0D0d2v}N!_DC5^dwAxTY^OO?5%DoC-(1 zoB5{(@d(0muZ$hwq*T&&eW1D_29(awX#h}o-$02$kpib8Nl%A4iplLwQPgSlxQG#1 z+FMNk|1|3JwufwHFOzoPd-i+}Q@98NhPo+%zCx-+O=c!p$m#ne3p1&@M#=?CeZmUS z6*u?;wyAD#YyG$-BxFNJEMA~W;k#)STD@v_BYH5AA{UHmVvNm+ls0*$*5x;vZ4o?J z<~sj=_CPBO!nX6PtmWBf{USU$Tu*4$c@5K;e8ll@#5GL@i7v(OXWF+HLMpexWg+(N zrCEun8s;}^$zA6<9=Re?a$NaWHz@vW$6uRsDAKf7ypxU9EB&O){e`jA=|iAV6pyU3 zGETL_3q0;mn*K;63mAA9o^vSGH={(?$-!reic}aWs_>GB5JW$?UTN9*CpxaM1lP5_ zM5AJ>8vSXse4x`HB7p{ILLPntamAE2)|QhYkpOQ!^|*A9w>!92pBLGY)C+Pu|E-D?z41Ox5ZpZ%&|Mybea3XNp}#vHZYSmU$5-s(rD6_w4Q4X9ML8J(^N*d zq{iHV1Egiedjd~lE^+iXn4m?N5jJ_&jxz&Vn@ZJI4uQ0=^NF|FM!U>_L6;wn*{(j$ zh7if#Ht45kMVHwMLNJ^K?O|Cyas;FjHE|&DrT0oH*KfQCCUm)cf~oJ;LaR7WRvov; zZ!stwqN==tL0G0h%O$<}jU9>h{01}D>g0Pd@hCjVFxze?Ur*Y@2`R_Kb$3mMo-J6@ zxe$n7u%KZ!ZkVfF=DN(Mwf4vebb?x)UyGj~8S^`5<}@7}IUh<&rU9SQ;R1N*+>TY1 zODOF%O?Mg%KbN6?3rq-9>EP&9Q$^QwbYCpf+V4-7>r&o?-~qRrV(E#m+Q9g1mkBj~ zh?NPm;-M<&WEKV#h7GZe;g$#R1V3y}!!yBlvZyd5y)0r3PGYOp71$u85>E2L!a8fe zF2j%iyj)Ulwz zCamAdr4;>hiC3qc3z)P_xYhcI5%Aebc3J!VK8|}j&y$qV=gg;Y0pXQ$uJsF%8#er= z*rHL(v;DEzruTh?NwnzNjU0z+sMsqa=4i7=WLWq&(y-IOtuK$hiIsV0RJ6e7DlzG{ z@xph$?H}S&9IzT*v}k?j>fhsBdfr6%rzfw-8TOObm+q}hS)bQ0*`B=eK~sk|kBsg8 zax1IJd^UEou7pYeP2(O7KjaibXhzHYHe{L=2Qfq8E3OCzm?>mtG8?gMioIJo*}(J! z!9K2@uxB*-R{GAR53NOS$(J?-gBpD_2&@{ks-B;mnIbA1TALd>h8mE-)2v;t9&=|p zx*A#vxnQ|T?@}O~9bavPFiSEn0ylav`Po(3;(p6XE)?$^K1SBQIuaZ6>z-;mF@0+V z$4S~mWX_YXU~6vt&5bEnX}>Yk3`-Z`{61uKoWa?VITlS6K#D2~V#^;C*ra_nN%vi8 z8yB{-;}s5%a9~E@2u!Q&PG9mQ=FFtN3u+DV?%k$mDe4a;e>xM0r-%{u}I z$^tPC<+J0A4m({|iAD3H@UyuZJ z0vUsFzjXO%(hum|t%y!+h7{jjJw!%hcY4Z_k>&4_EAIrZ(r)u(fT@xC#7#0`rF3(T>=0F?CTbd_zpgm`!3#wXZ^h0^;tgxsL$Cnn&FSpSEjhZ8= zhP^TG=e|#jim^AR{YxK)G!OmmhMSS6CyaLN1+DvDk0AGz{?2oaP4n&_J&NX#US*Ib zyu0B&rzy8*YdY5lA37t)l7u|lxLuMl#Ihe#Kt4Alzo&Fa&Vfa#M=>5LCg|BLDg9qH zCrU0WL@@aV{q#$bJ6F@J9g;}`vYMVc0)XA$BD``=Qo3ezQhV&yjQeI6-IG20?|i8SZkC7J5CrPyk9v5>Ck9vmsxSdBDs}>02ATB3x@wBc+Mk;m7T`gq^afVMHu5Ao<`(^Q*nNJ&>?nb{4eKH&Nx3rLGB>RdyD=P!7#(R#hs58 zeUAyo)9QE%Z9tRBk=RDxHN~)AMO-|Ka8^3RNvdVR&5eAoIv^`YVrEDgg^Hd?X5)H! z`z?3S1&`z0puC4^_OhdEd&UX_=KWNUA@L{m&W6%fuP2P(^ive;9-XRbf!#0NXzP$T zOPZ>^HpBdu#fV_O31q%xf@+IIo7+vL8jn zTB_y;6n;J5Uq6oNp~!k1wwO|8+xsHz3}WxUmZZ5KE+(ZnRot*50h$nB3(mx@kawKl(5bcHLgjvp?Vf3zEhsZ zP@D3K?2#w$${woQH0kOnQ8IzUqg90kZBjePS)#%z_W?Ud*f}-w=Ezc?br)2|x18!y zX4aByFM7-V;c2`-Qf}M>xM#SN)>T$BVf8<5tQpH-l+zz$hc#B#AaCQ<4))&eK_D9= z90kA9s6Ws=8g^8Cy-Tpa&-pq|Kcl6v;01X2)L9mN98rv4ER?jgw8ZjRArMz+8F#O0 zduxb>k~{A{QY)RveePK_Hyqs2V(3@Mfwezi_?7Q5-MBbM#gpXd9^hXr^;iGqsOHTg z_NPQZN=WCk_|rhvDBISN$jMI$Ue3H)UhkcHwzZEUAy+IXIM_3;l(Amu(*BktP$8q>RlWt7;sJL#Yn0gkr16!afLlwbhTs@y#=8AgNs#aG=3M z@Yg$^A^gE^GztHSj&RXZQ$)ZruUn*}8dc~V0rMa(l@ZXlVRKQn%l4}z@x@SZRFTT7 zh2SvoJAs1w!Z$Jc!8lG9jrSR;?@bHXp%3SHSLdKH;Cc4jLNOCXaoE-kPXJewMd2go@Zde%}ESZ5>#s?+p5X@hx9Ldluv9X+3^ zH_a7=Y%d2Ei#*(0gilSlppDTt*fPk?vX6mVlC5P@y|m2U(tcz&D}9o8-5oxKXkD@V zlz|X8;x0}SF26(w(;mB)^1R4SuW^-=1<7Ih#hC6v>tJrO*=9;`z*p=?mFhemm&**Wb)Nj*?a148{&cM=@(C&Ka*PV| zCwZDCq|H0sNNG|Y;mumbZ^8q|dxku?7Y=RRnai`skWj8(VdV|@x@^B~5?$I0W}kQY z2%O|8&y*b=wV3ZyF%PC~1F>Hf7^+D!i_&nhPZX!7D_ZL+4w-Rg5zax=#3u-%Ch9nR zuW}QS#caue5!Qb4D7}XFUsETd9Qb^Mqy#7-@6l$*fFnrCjtGq zL=5|mTTIqF82Mfu^s=X+$`GH&;c^e!{$&TfxU;7s92pXbn4&+|SjU9_AZGHr9c~SG zz_U5ruUP3^&gNi3(|a{<#AsCgT<*z|tIbhs>qH{HgGi({;d95@6e@0ZxMux*e2|6p zs-qy{>zf`Be#^(*-?Ul&=eMx`r_I8|&h-DZSuQj+5^!3Oy^m{p_od6Bc6sl}Ezw5o zIOE9L?|zMg@=PS9-l!)e9vp}rvLBs80Js~!*T+{FrBvR@KIhJDoxsAwHM#P--8?eW zWnWWF3U;EFj$L$xxl7LWDQ3TgXJkHdJzIor7s?q6NC=))p#R0;X79vAk7=0KLvU|rp#q7 zgQOG{lA2B%1vzojm8#k4Fe@`rnSe>IWLXF=aYFecPeupUkP&>5T)ZZrmqeu4h9O?s zwbHenJ(&DOuWwrlNswrdw@KCHP&k>|HMyurE$u-ud{K(2mt%pj0G(+!%CLEWR3e8t zk6IYG-i&tYPQA)65D31ouTWocjR?Jj=8g_JuI+t|Nx5#h+E&<~ktl+uC7uV~$4@}C^;H5nG5m9%Iqyw8bv5bi64)L5aup*Q^#&ZZexfoo(~K%JAxsZn z6e7oj5naC2d@sbvOKoTn)X$|}DsJ@LlQCG$3Q@4$;94Fq%>>3E!n@eL(_@ey{_wqI z?FOUFegZKd0iZr)O3@_@gkxOjr9>%}enHg1@ph)fKO&{k@FwA?=QR>my4YE93}|fMJrx z&8K2r#JHBf$+I9*5Yh)o$1bH`7NMjO+bc@*;3F|ONLZPgX+Q`lAg7{wS__T6AJ||L zM`0%JZ&p(V<^quxIy4T-lt3WH!r;Ng7YU*8^C~v>QaUx8Nv_T*1M>2cz>U`2^Ql~^ zgNICfm9q*(unFPd%!!*!o(VGm?DZ%Ro$}DM!8|m*qcGx7PWk)e3hFs9U6aZwW$^{! zL>)~$-(j7Y5-#3yvOKWfph_Cuf&0&a zs>$?o@W8qoiJJ>mw|m|$i#LW#(VjK1JT<})RgqA%IzmCI)3K+1+V>g5;h+XVB?wg& zuH8HY8phwE5$$P*!D_Fe2EH=4VY`6t31yQQ3)Z6rs4JpEql+DsBI{G~=1=yrNm%}Z zY{b&_n?`E~i7H1@M&Aq70DI=`*S)IRt9`F3D?3J7b5tgrJgJ2@P;! zgHlx6^@G#W2Nad&gEN-a>3cXE3*ZL1ig44-;yjUy_=&`TYJoZPs8C~}UFP|6j`-pz zf!6S6Yha|R5~x)2;&n|k@+Y;J)zd?zi%yD+t)TL(PDXUVPEO0&;Lhzr;TX!5Mz)N^ zVMhNn_r_MqcLFbp(CVsEj|1U4An)l_CTOS@U&^u&(h4?=-%pned-1+ovbxh_8z`nU zVgJ?`>Gv%GI6;0bC}ut!%UW}Wf^wWX466yaVI_f}!XB<3u!L**-GO;>)+bMF;S z-I$S;GEtv$nikOXZp-(697I|%OJ1yiI+!axvswtVt*^0HJ`%k=!bAVjHok;|#S;;laMRA}rp!Wf<6dnl-#WOWufbW#a2Z zngOgRGde9>Uw5eNf5eX1daPr1Y%JNQ5+rLSckAQGvu7(Fd@skd_i!G9o!|F|qbv1r z?a->86}A|7l5ccUQmHyHA#E}K;Jd)!x_z8Dbc%^ePaf}I?^`+Y z?-(*s5AEsJ{%TzPK)%cuz>sc8BM|IMk~a&h-q2+I)y~eBRZez|E)5}II<}7-lemfvYWRAe- z%&^$!Y>pT%M`Qo&^`He&1*vM2#0pbV6$C_whApPHb~!C#lWjOwGCWpM^}Y9*cGkyq zp&m6O$b5#oO}+a*}}z*?#u5sUn+!S2)uYIC?buNO%p>tQ@U66Nan6Q zeUK-ZK>L#!Wwbx)zFGz*9{oqyOV6-WvOxs7x29_p9DlTAE3L7fl&*#qpWGeoX9W;^ zzFbb5&J>=d87lCv>-*Z;%}${qW*xl5WMMHcJNcaB*=AoLj;pM6o!LPQscLL@b&oER zzE=MtR`5ALz4aJV^udC?sAfc=6mcro(kZ1&q1*KxFucMiB2jAju|sce8nb25=0=ae zfbM_DU3k@>N0XnLR~#Cu$lgViV>#>dJ|#F=VzJ`G?Q5|{HdgN-JX7d#ky}E zlOXnNAHamuF5_@1#L|6ZU}?W|Lz+%;f9w5wZ<^U9xIRk}SdE}Ir=9D0q2?C71P`AJ z_m|u@-w^YTGq0=EgOYPL3jayyW*Y1)# zmgRLy*2M3bQlK-RXuAwWE%*-eWz0LBd=6qc5Y2iQR*z-p-RE8*>F=2gkEPeUEIB*} z!^hA)Xr2SY2`=pSdHf#Tb}08#PDHO3Ewu9bjky8wS7s?q7qwJ7Jb~~0^}j!6`FhY< ztuEU=kC##+vI!o~XuM}*=;&*@wHvQH-pWazJKpRiK6KS|FW=(D(7RLh~pX8te6 z-Z4g#=-U@=+qSKzZF}0b?Vfh`v~AnA&1u`VZJY1>PwtzXo1C27WT&zUd#94B4{Po9 z!%gea`Sa|;&`9PK@Ky)$qexwNH?sBnw~yjX_eE*Bo>k(uGb=8YsP0R_rXZ;ty>9vf z-|Zq;VkaRHy=&`A6?OQI6Er%YGAb8@X9flU04xYna?>6!UeEGZgx}?JsEhE4D9+;bXG1PE>f3{0l4) zo*ll!>;nCJJP##wZbNO+{vT&SC!qy!Hs!NMhlAI0_hQ-<2{DQ@K;3#E*O_F7v)$zoh#^;uCKngD*P{pRj;otc_qA(XnJO4 z71f9YDppY^8;n@pz^qbM&V!bGUN5*3Z!ZAyYW`z2jt8iZK$@!U zpNwu)D>e0TM<3XR1S^Ee?O0{2wj-;i@JuwAm_0;1jyoyU27PNBHwZ^m4WbT=uxBzi zs){XaqJvDp6kDbS1JKAgE#jzctk?~;OoHoYCDd-S(Boif|E0aVk^?c8?1;G0!Rra% z=$oGF7Vc8GhI9dH`wHbSh6aTQ=a%%NP*dk4kc?{z{=FSwzObxOayn*4I>Ljcc zeg|mJ{Q!^RU?ojx2TmdE=XQE>`UEJ#05kkdv}2EuGY8N&7*}bV;PH-rn*4h@0P(|k z1_aTkVKicdi=CL^xJ(};rk905e(x<-sJ7^&(A*MegDME{EwLA(zn1E%FlmIs^hPas z%5S^^4Xm3HS`Rsf`>eh@4>h%+cVIBhfqSyS1QrAH=|ZJ++Hi6V>IPW3VUl?brO9el zVsv#bkXzt*@GPqc>ukp<5U&~qI^C?@%>>a9>Nk+RfQIzKuAX!plX3|W)vakVv-gI_ zKUT~KiDBkRLce*Rld(>cg`QI#R)avl=m_KyJgAYBvO0+fxc??~$_?=5Z7(u;wr82zOk zYGRaO_{DHzs8X?$4ki(lp@iO3C69#{EE}Q@Ae8b0b5+uXUEH7?k7nQjnufgCJUTwNwGg$E0y$vf~`X z`#x%&+UBVJrV~~*RnjK`kEoJ_&;TE78QN3DlkN?vidFWjV5HMR_RE=K(bX+6(3zJJ z)(mF!$Ctj4xNS4;FTyS@$<;$B%F5^5;(ryx3V32g7n!Y`w+0dZ#>m=C2gccLpouxN zKi100XGi9D=USv8_ssr>rP~J*$RASGxE$HT64-n2@aRSZ;STK=A8%6%#U4y37_Bi% z*I_NdKK=EIUlwNq`XiWmguIO1GaZYeYOyb`_bckZGGM5}akq679Js*)(qS+;9ME|* z;X3@VaPf}ata17=#bEQ|&3c>Hq_uzwAk%nJ`dj05690u)@v+xqQu4C+RF0aqDA?9@ zc^218hjfbq06fjZQYJ1u!TcX-|L5mk--?LHEZvs({D%w60*u5@8D*8a^iN4HKV^8+ z9L0G+)^P|EAAm zA7JWUykkOx`ma`TRd*Nlrs?HJPFn?8rw)U+Ml}Kp0Nkgnz)OI_vC1S3Y%3hM06QdA z$L1dGZs54Jb84j%KPO`A3u3F_VDQkNy02EH69!1~vdjX1betD36tnKPwQ&5CaPxt7 zsZ8#aY1fxE#`LvtEzsxCa+v6D;Dlu+D8RJ@%?}Mp<}2^!+Z}J|WQB+n5UBzcwBdGX zuy2<_v%OiI_i;F-jRA?8#9?^R`!i_NhJ(fh(aGTq64$2YA*UFHVi6VaQZP);&9;|U z6EM?L^t-rxW>{AQQWv;9uQIA)*@^x-#wE9ds%dcH*%*sJnbnXPTPevcnKx(M2gANo z(?(fkQPk_B+>-FbZg=Z%TjdGlowD~I-r+>O0aSJmFSrwD+2|!c+~#5^wao@;(44zz zK{G)ym?9GN#GmwaMK8DOXZj5Z{Cv`g=gQ<5i2M<=NIij=Eo6eGaD_7r^&qm;@rlUH zzk<>dhmj|!A}6G3!y`1?&0gsRo#;3gztDNcQ%q-7D#)D~arv0LAd1NL-Oez&F}fT+fZ>11J>`az9Ar#B61*H|no4)js#U{;b685J93bK$*z$vY#@r z=~A|L-9rVPN{R468r)u_juQRxC+mO<>T$L}O}v4IQk|bUs1x9jrUOu@(qT5MK4fDw ziM8&!1a$1Oqm#6F8q}$T14Tp}Zpc$>K__sX%D4Z*4uT{6AW^;~dKdGQbs5$SG8;2S6qh?5N*2Y`5XUsi zy+gj;t^?}faI@cz@IX@LiF_y<P6L$I4|M47JlA3TV+0Yg!Y8wyi)qk*97Bf&!WG zA|r_3;+SJ7i#d;Mb;3=a_cvJm{xBT8KIRj{kV{|dqv3W(3I#^M4P{|<9rK?hN(_zs z_^G^jo5GeN&+U%L&y&u-(Ixi=4x<@tQX$46W7|VZ7+VePV+tloU*!Yhd~GCrSL^z? z6w4{C5eSRj;>^7$^lg%+XZRVjb}y8<2AOp{=U*9CS9IJXf4TZ(jk@_l#%E;QUWt}S zHQuBDy|nn(ePv%t>t^gXt}Z)5sf7iSb94u9fgA+Sg%2QS*e+)yj+G@{E1=T>RBDtv zXr@~5SFP^+Lu2Y~Io%_ei-m}QK2VwCs7qil(+a3UrsOFs?+W#VRNTDJ?d$3?>0WBH z%51`(5)%=<*fo{aw4)aP)T(Ce$1aIZS+pk4Y4D(@yBcJBEIz^Rme65fqLrciy+5|D-+GqP9ya0w3O zxfwqbv&9NKJCC^vCCkgr1V!&dzpij0)tSm#ND#&Y1m;aBBT@2X#7&6sEz2`?#oq*7 zwSpB@{*vT7&{j&)I=FJgzmomX-mP>~%i$n;(J?Jev)CO&u#q8}W+Fumt|$d*^r0Z&X+%J_B4LwYXXf^kAJPiu`LnYDiPxXQlez0wPu*N}(2(GYAnJq6-$>m|H({d7#dr?7w{h8?j)5qh`I-a_2K(=J^VLDmQwLbVDdiu!pxg8#YvA5K(-f$e!%%%fh znYI*0WM@x(HG5>C+-)_+&T$QKkzQa)g!fs<#?30WZs!TCvDut2c-LdDtU3|yQjdD;KQ;s%TwV4`CKF!A8>?6^jLWBlY6*}&Om(!_iQo150>yV$SjhfnU85@o zFRd+CGLx8jDzoeZR^D1GE&njmO>&&H_F6R@rJ^fF9-*e?!+~4L?Y)>k+4Bq#hPHu3QOx`)i zzaa%2kPLr{whBtUILWz+9zD4xj968sU2-X@&x7RW>7)b?$VLW9{bYDhO%O44y_ zorcoSrbYi|`@jt0oA&y?%ZuR7$_Hoe{)S)L4<;=kNe=6s0G=;DR|$1@1^(x&890Vn zQl>oJ8BXFK>&e;L8x9Uq>;hfN{-(dy8v`$9fOYA>+{%0IA-6mAlsT?!R2~qw8iQWSV zJ&ylNJHYde7nAOJhX<7MFG9MF7sUth`*9IwDj>a;17VD#79hsA>#=_uYyq+W>5BvF zLB8JM0ff+2XGYAoWlcuWiHZud@8&oO^16&-_(hUaFu$QNL;ZX)XG3|0}NB zTF;xnV>(MhM+Gm{d$&eXFTVgobR)Dk*}o2ME5f9~3U&Yz9TCcmyCWkhXT0xoQ3HT4 z`i9bCr>(vSgElB(GE2*J&vvrOGJd+SGof2%&%2^ea<#rU6c4d;&He-E1kXAMtb>2~2{{GM$uNry{hH+Wb;$!guYY zmzfOQ*z@ZZL1q`61)?f9HuACTc39TZ^vs0?MnW^y+A^Y|x?C@H)pe?#93OvNSdqx} zx+~7@?&g!?s&Yz`sPLErRm-hUnE_Pz_^dfG~WXeNJtVHcFiE`eI=!)?~|>2(w22_ zyWU~opk}K3)nw0B&VrY3Tzj@<}4XzI@zY>Sr#*7`be};0pC0#!pg+m{+NOgvj90JSA;C=Mb01OvKODSS%t|>u z=)`}tkQjUO4Au+(ky>B#WH`kc=>MEW7p-)91DP`mekcU9v~ptFxK?=W)dI4rhnSA_ zv#Nb^jFwgek5C;Zbndx^xJ2Wu2Fk|*r=wnvc8c$uxf&L3Dh}#{-Y5){69j_)T@zpZ z#x9Uss2igAO`JZIfMc^>-zL zJ}*`HT)0u+%mtF0CRAMw$Sb!HQJs~y&+(5~8;y{VqNob~fR!HXOwVc>#ywS?BVqED z2dgo7bM3j^9E43M&q!Yqh6MDMV;+!(F3bjUJ=lHvC2ZNRX(3<6j3e}O@=A002?dS1 z6t_QFBYjMUSxC$kJ(YznV9;l8C^Jd&{Kj1E!9={jQFD#Zt8~l+@^P5j3%ct|NZ_D= zT9NTmM_2^zke1$3cTe=YiNzClzLn0O+f%Y{H0S&E7=k4sA`MuS_ZrwAN8i(NZ6EsH zgZb5IJl+-wBA3jG>69Ck4r_LeYe<>=)KL3Ug%>$~DP{M9uv79bH2BAr+<8q0F#FCMqEr9vIL^wB*P%*Tit6&Gci96T|g2mR6Nqr8m~r z#PI35>M{!+H^=K}0*jM&gCBWHAttrkw(?m&Xlwc0nSS62Z795ua3b#uqIU4oS2~9-AMHC^1UBEqDeV$d;5ST)7Wrc?BQ-}?a|XUYnF+1 z>27TzF+c!IW;j>TC8>K&N3W-YTT_UOz+P`#wx^xb-{0Gdqq=0Khi9j&gUA1xySvSY z>+AdSdgzh78@@2}ZmBZS%Zsb^3-;^#JSgJsb?Ip6ZEb6)`HycoTOQlC7JBSEm`$!v z3G|z(usAMzXczjDu)z|zvDw4D7t7c6(~^2-rf*0)mI3&Kbu*-*Bn8Za;iw7}qCnkmzJJ}) zOyrMKZOgVS{=YtNpZn>9Bu2l~zF)8RFN1<7+9Qc=lc+Rih)T_(JH(n|STMm=BUN?L z@emyIg}R-0G(AU&CNn+7kThz>=J-8iK7R*jh;UiaRkdlQAR+pF#tBGfy^Q339UByT z9iAygsL3}!{1lU~6Ioncjf@kc!Bz>fQdZ7N4HSHBK!e?8PEB!OX4)ez*H$Xybf%jd z)?xm@m03NUa0L;Ilmgfn)+&jcHvdVf&dH7dN*^+(u>0UdeqLSsC;oW7Ts=9zfb8qV z*bf8DXY5K(p2wz|xOjY-zOMBJn%8S4VtP>1lA| zz|6~?_~AW62-M0A0w=d8U7tPW_`^N-tjRWHjfz!k|)XVc~$?)BMT;e1{S#65)%AJNwV2yS?}xjvnvsJIv^DaFp*)YyMNf^vV0)|5BvG!i26@W26 zt}lP!zXD-$m=@E} zJiEqEPj7EsxZvY+0?i}ccTKmQ+`2uij{*A-Ldw`hO-nZ2|Ih>=w?}5iYGLd@204%0*R~cr zR(RmQdK3z(UmJk)+( zpUuDvX5Zw4cL)=LPF_wxAl$Nq+Lm)?kLT~t&-=6ndY!DW%DGcYd}R8FBi!O`P^d*v zuCqu*kh78!w(z0K&$hdylO}xLfg*?0^Lg=yw0mMApF5G)phLW z8j~Pb4!LK^M39DrKle{VM=Tp>Oa57W&?*R=E-(HO84gv41$57f37M?JowVWD_&ova z0F8hZ%9si~FChv<;)lmK&t6RPSyV0k6W^m+x28UYGxgc8;EQxYQXQqnJpqB_kXvep zBO@BlMfpV|U^OAO8;=-o-HGQBDf{c`n|IFLNu5RUnW9Za+g9Phv%Jlhk2bnOo!!;M zK2m3o)&>N4Z5$GWB>Ntghb(c|i_v)D-dUie%si}NwqD9y-Q1oMS!VD14f%n`i5dQ4 zdq-!xe^}=C?WZfNkNS6?9^mh*(?kIq24a&P8~(rDUES?%>ziA>?ma>`k#vv48__P2 z*?RdjeR))5QAOaH*xXNDgm>bs-5!|)_A;l7o8S7ON9TwBTKvU%ZSJ?0&T$>#FAlH1 zvTGZyU7r~!F}_`VVVtg|WfxuiEk5`<#mdaWj%W250-M}kpzMEz{-8t^Ucb~3EHlju zz`WMBt6iWBFT5dlR1|BT-G}-7ew#gY9l@Bj9km}}`P+Knzy{&g?yY$AOekBY;ZAIR zH6zCTDy$cgDuJ`4<19^Ucjo8AH7c&Br!ytHOSN{osMWFR(RZ|gUGVoePx44mb-AJf zM+W%da6#<+jritPyt1GFsf0g0*xLRglF zj*&);qJS`NHUX(k&L$qI^;X8z%}qaCc&C|e9W%)t?`+?-ZDE^I$6u*&@;=&h9hxGFPYIXduvnA@Qm3P$ZTo2wL3n z7QKpGp$Rdp41Yb6&&nnTUj#NVA78u;Xh`kd!>_t*4;Npct4D%aIl}xe5up!$$<(%( zxN%U)m6u@WuXvG;E0K<`**J(KLLtVzVe(axTkkem`F?uDO04aujSg4A-Ohas3LpeQ zU;AVmO$L!q#RGN| z-E2*U?fQCE&HVl$RFvWx_@V8N41T}c?XT8a&E zDFxa%4$>c>UF86Kj7P`VI@>HQ3hb7a%j`AA`JTY?{HA=5X7_Y`y6}eRsRTI=G5bzPrheAe?WhoYu>ciukt~g@a9D*xq;O9ydw5lY!tBT zou`i5-jn%Y?frLZKi5c2w$=L=zxi$K*x_=r+~c1-W35R^Y~7dQz%q*qnOD%&zt?k; z!G@6!yjqw%&^XnXxq;l!pcU8fg_|z>7v=_e;Nc$ZzDun7P^}{1a$#`iXnUgA?3j^K zs?S=ja>u9Z9qb^tj_t|lTzttL=9y%BUi&F2Y){jj8|u!#!p|6xdt1d$SG?3kMil++qOc88(yLlwjyC20c%?ug;Y2H5ZQh3?6qu1Zs6IQyK}&YOXZ%%Pc>Y8K zed{4MyLlq~A!FQlVcHO2H6y$y>hWKD6rG&DK6vT7{Cj;6`{!9AcMg^UlzvMddB^o@!s4S^47U=BJ62Q@W$-w2Cw0DHcBOMCM4{CDmS~kT-hnS1a`YUZnx_IJhh6SoMm{2sRQX%$Mun zRWACJS3Kjq%n&2IkS+a}Y5aY7wauDbo$TPfb&HA+kU0fE8t3jtG6kUKHPia2Ryc zP%oEt=cIqB^7+U4*lAf(6HG4}3{*8Mq?Boj%ZplwLC~GdB24i|pW7>L#JosWOZ+23 zMt1>`#Ts?qPVZdNCdq@RPH?m05$H3UJOsZL2FHz#l*}@$byFF<3@7Vm998Ok9+KXG z3aHa7)oD8oVc1FomYz0^s#-)um<&=M4mY?lucX~_iX+|%eqnk*iM2C&pcRzJikn_z zj`c@8Dmr`=jIXLfI0Aqp)6Y)DI_s({K0$ccMjK`Pfa1(A?j3LXJS(nN+fzw3S_r> zHLsaFLeMz%UVpD->caF>u*j%x2Pl4fvnzOP2LP)MTDE#&N$M+5+-G3c#a+$^sd)S}1K3+jHPkMQ;RMSBIUD)vlq5<@4AM}Ho3k`}qBBdP5=-*Jq+~gH zO!T0;3)>|;e)dye?4;hn{RcIq5EQ-R3UibWJj$v?lYC|%RCwwoyr0KI9p=ftQcyCVvKAAspWDG z)ZHfaNod9H`GRnvtvODBrdrZ0Sxl0P#E_IMl4;0Co-m6w%;P2ImtVL^#X0LEDF5i- z!sRyDdYx$fN&G(zf*;@~@F|Z|=UyvJ$iWK?s0RMOK{1kHL7k)M3X(;l=kQQ|fOV(@ zQ00MQ#*Rl3u-&e)&V8dgXm1E0L`GU7k{Fn%n=sbaLIW#! zK=J6$@${!+lt-{qwuh`05Qs@0t1?Qsmh3xuY5(}1@Z{*~T zN@(zgWWOa6$Yo*4x}r-VZkg5U6ih9a!Q%7tSX9|Eicr}W34x>-#{5vqk?SDM(EezZ z=ys|UCEfO)p)%++*;)kw|B7>MYwabG7amNx_}QKXc};kY>?H}|&~jSX+)QzxJ$t?GY&H)ObT)^mxo$UsqR3qf2uXCp^=qgXs*Adqy%PuN&Q_C=s;OG{x@ zLb0K?nf)ri&~mY`-^#FvVdY`c&JnJs))1T|ng45{}tOAM^P=*l%)=27PQ993i{o?lW+9>BBjq9Yh*t-a++yQXu< zg!G+eJgG>Y4%wxVW20^f0HLLRr>Q+r-@}q4Z}m0$TgYD z_5ImvQUs;^{jWjuem$_ka9TnUx$%UDy$<2QIUZkJc9l?G%XLet79mB23L-7KCySUaNv)+EMPkZn%;*d>Wv2yj z$!xOrLP*?mm73LH6#v*P$WSz6LCgd9_(H2kJ@cGsW-nts_ZF(b)fB(bg{oI$j-n)P z4YsUU?&-U@UXtK+u~NYUhK(>|GOOEvFb@eWDkbe}o=n{N}hf_Q)#G6&Hx( z-gZC!xv?0o7k>*&20D`_dYDK2JjK?f;8zgHF%kd=x@^8n%|UgVMZ!++Zor*+``RBi zODpcJ#BxVMfa5H7xw*h1$d>SKOBez5YWSZ%5ObmKCZ0RF*xjOh%nzPSwv6W2ol7=E$r;Ee#o@Ye0gOwN!WIT!#9&*b0|Z` zo=rt0bbX9tO6oao!_2t}rip4xk2o7P&{oSsXEZ5vgHGwygWn`JEj9s3plGt0$cM!+ z^S3gRTwsmWBSGbOqeRY|$(&jYtShK&g1I}R=E{rA6YWtrN{+mT(K}8V&>{!qiD@$b zWZhpWVz6gl%tBK$%z4c^Zai4XT2AT*({!PtH*MBK{=$j=(fsnrqAu<)Z<}+L@m9MY z_$%@fZEzYe%TSAKQh4Sio>+lkp!W9pxenm0qLAAbmm{IHeeX`2OY?Ya;5psn!Op@!^4G^7 z2aq9L1W{;C2t@6nrXh!nToZ&JES8^{Oq5K&r7SX7Nr!cgYWzEj8od)7%Q~&NP-Kk; zjI!rcjo2Mlep?X=Xs52^H2q~7ZnwrWqHMwoM#BU5V9(5Xe zp!X(5{PZ#PF|ayDy3UVSx6m1Idp@9g3p9RJ3paa zy-oGTv+%g%2V2Z=?yVp=Hlp`u$9jZ5{FA7rE~fd|4s%w+>9OtC^H5eufy6tyX@=D& zu)rO$!27NZ+oKfe>}h@;9U2ayHFD+e`Qw2a{8g&0RZ`8by5_DJFgA43>2LOZuU$z& z?|$+-k68ZB680{=B%^xsKZi22y}Ho|G6ZiewoFW9XOVQR-%@sWcCKteop%i_l zzaqmlz{asw!6LX7_Z5$TXMY8nyW0Dco;ahLb>X_*=7bKjUO2r|y>XQcy8E;G+`2l@ zrE|5C$4ajX&F&eeloGWrw=6Ebl9V>#-P;)=uW7kSMIbZBKVt07UTr7KW(u^N7C3AM zMWfh<6^s$Gz01XCXE^^M5RCLuzRqtXJPuE0kC-@hi%90E*dS3ShO13LRcV zupD|4r4$O)JEf!#$EE6(r$PMvtX}*L5}P4H)TH85Q7e#zGI6PthY^YU?ug~90znWB@kh%^N9b{`;BkNwmowP z&boJ$e+ZCH>RWY`0)volKwcG@5(TRk2xrDn2eYsn5O_`)0-w+revXp{{Nq`$|4By! zhpU<1zw2$6iJ_RUZo{S_6q)`teZ=HbYui($@zaiX^t&jdUIWp5+!#v6PHkMo=yHVF z(=RX>dWn^c%)GOQ?g5rL(kd;@fIuTHSfOdlt5(5ThZ3j)vC=)VW2b{ zb2rcmM`oqZ0E||1)4suPrw^tQ($YUb$ZZ^$e)J;6Ioo42)NB-TG7d3~cxDnQ_`Pic?kUt1;wQEg!aQf}2V*e~(~2=~ z`BiRF0kJ%vKuLtJDCCbMyh{nhM}N9rJuEHZxCb_q|YrI10iY`C*L%GbIIM-E(H( zFg0b*%fOvE7=d9k z*U41ZC?K1ynQlvXfIGx#!=$#;mT*yfAp)EQQZ70g>65T4+xy*qozw`U2X$+y6A_TU zHtlqQh`@?+7$0I8R8_GOqRG{8xE_=giFnSTBL$ZvSB-y&Zne>b)NkN+qF_XOcpgkh z?izw&aGOT3QX*TAzktRij41)y;!b=rXe-U1XgVQ!Ag92XQrQd5LhuJjCP zBE%5J)3h~@hq=@{?Y4?&#k6|Pyl*4+`ol7X$7^DWNNv|sa*ye%hFS8b;4$~NIRwZe zc@u0W{yAnv_c#zApewK!(NN(UNpcMwlZcR=PK{00DCDu+>?dbtrz;BA0;}=Us0W@Q z!N^c6O41=*O%X5<>)vJz(4-vgD56A{$uIalD<^Wj~VU*?0Dn^~fgh zJMp;(+j>ICQmxA2l{&&W1(4(8OA$=s@eagmsf&UXh1N@aqIy(nr_t(iJ?2@&_NS$e zmTR(867aG}WTU~Gh6y1@D8u#JD&RzrjpC6EvnJ$)W+Th_0OZFu;gcx+S&fAztTcr# z)@^I97k7k-L)EEgJ7L&c4m0EF&d@X`6yQzcDhpTXZ&OxQehCH1-koa7iY{Mv0uqhVwRpRLqW-!1Y%5v^X=E=015TwkuYfd&&x1S+;v5qCh} zaAbhVw)R$#@H?%daXRJ1X2jJkL7&iM^WqLqE9j%r2mebJ$zu@F(7@j@G-rYgPP&v4 z%pvWg=`^UU3jU2{&pFzr9+!}mp&lSga6QrC9(#QRzg{>MUTeNymSx7YJd52zLGGnb z;B%YLb+8Kpt{ZGgD;fh~f*($!xtwT>qNCo1XS^)YRIPrDgYSisr_-erWIL4pH5{fZ z&21n_eoR+3ljU@2ly$h%59 zUay*dV3rU#rV61)F#$S+s3j zJUE3hJw%0Qt6Wf~k<-k>BZ@qYZmx`bga4PaY162m5ep0{Jl5YR)y)YTp54? zl41limoh&{*8XW}T?J^$0CQH#JO> z=^>5Cqor~u9G}it`V&H)135OUmw*=}t4v!pH2-Fv>VU6TIpD{-Vm}9$jpW}3YaU%Jsrefaw z#Y~+1kiM!mBQ{l3N*KCDjOF>u2;ol9BgVAAa_%H{frbz+mT@)vz+_9B+hFo2i3!iE zLzJ05Wj2+Z;D@ZL2{bf(N(Hlbni8Y<)qhkKMi*(wX}v6@;k9-uIhF|YK$Mg~M@=%p znjX+VR`~YQoCnV}>>ggWE1f4>On8dnKo&*r2c$mb#j|97$AzmmX#!8i)`f?{+`StJ zn=L$6f^QE`xtV-11xn1%NGi%t+UI{53Bvsu`JaB&w4>-OpAd6vdQ)ra>;wIw9u`lv z3HN7UU!HNpM|ZR8i_8w~R6Z}CKS4_->nJj!Fut<8P@^PrriArY)#dst3 zQr6A#S96*!Ir+nF3~u9mjIQ)V@BCK zI#ae9C4EP8^CS2DYIf#f&;3^_1dYul;(hN9H~q#hHlxa03^`fCupCcCGL6|=T82{- zMW6A>4dwKIRboM0(|uXe}~rXApv4V|5Ua z+pgYcX7;!_4V-+amD--(xiVl$qWq#CD!X-P_13W+;jJ*z&V06RYD(IYI|>aS z#LQVwgHE>mZ3rnymNS5!@(ndQ^8TRV%T6Rya4PU}We(vqCx~qLdG@#xfu3q@Spt7Q zv$jjvb41J}oULhO7T~$K%bz21;LUXl=a^S$G9v7{aq&k(;dT4OFb1saXyJ#4DuPfm6)cx|8DyrGWk;alu>+223DJVbzC?Pu8LwG=HN_9@0$2r3J|SL-rK+o1 z9i6gS-WYPJ3P7_>ksXS(H63{J6LxKTNu^4eAIr(mVwZJ&d(yLSjvTT$TNqT>FqDp* zfvYu-qF5n)M92O_ggXo`k*6v7!)o3q*B`?XSu}6@O8_L7E_v^9pYrc$FdbmJ zHIqfX*&o?acYIY+`CvMT;ub0#=CMc$uc0++5};YweTh&zJi{|klKlYSNxS9nUSQYu z73e;8B5`>FeU0;f?&W1O>At8Th1@DUs3>}Zn)Pt}W@WP+S>i1cY-w|Ys`m*D22JWI z@uf~R%mjaJ!xN$lVl?G0#%!w0(rI|nVXZs1ihgZDwybuZ{`QmQp;BK(il=yKbksR@ z-Y@1xRMvx3sDsFR_jEZzUu9|1iRC3D5dh=vh=_=_ zp%Oo3bEetDXQQpWlKq|dNb;y3^_pH)PMUMo&xX9YTOdT_bSJ_i`n6{*M15`KdB8%yuWClIPlt(;pO7ocp(jg&BFzZ zb$B}HAl)sUaCmcLv*mn=#+QiO9Ep*4mFH1;tSzVo;SASPF4rMVIUCc*^5`!L1U?0{;0W3L@>CK2G$VbZ zlIp>&nGK#M4qmbWX;StSI+HhVaUas3lp_%L+Iyss#bX{*=s?G+`brTtlaeLo7muXY zW`QhL3t^QQACBOxf{HIfGNU1)pLdO_&@JG8tfzs#Y;t@LhnSqUlX=HlWHTIbRVqmT zq}7(TqDEl(SUoR@W552xeR|_<%-3b-Q10Uq=YG|kg$nRzG0x+?Ut@*?qqqqW+*`{j z#-O0Fn_DJgmug95&h?D(h|r9Rw;8$>10c#Os>#jn9tuAL|8`DPJqj2eJf(+h#H*6< zwD54f(Mgei7Y7Y%GH-86(t#V}KWFvHXcp13(2^xao=7mbx(gN=3~3X2So4&cSyRBe zNm&-FTlb(RLjG1iC?)|M3mtFTSa6@m^ zq3c<_erSa^2>?SAd|@j%$kR$E4|@iEx}#INJ(H@|(bpmN7pneeBU!r;Z*g}i{d2;e zH9tcwq-mJHKUT3IluT{pk>9&n?K|GQ66;k1Iq|>XUvuRKY`HypZpz| z|1Si3MsHnUus(I2|Bq=q$N&}ABoI+AkS;yw_W!j0|89Q!{|g&&v9SV_U5uPPewzYB zeu;|P+d2Qopp8-Fza!sSQ@wwqzNC+=_^AnS#?HIa_RVi`}gm}nrOtglcoHav43vm>G8<9d3fWWv4d~2g;h~jepsA|y%u2_OS@~gaA(8-wnzj)?tKp{POH={kuVhb~a$H0`f44 zED*ui1ZTT(PJ!U?gYyoM9;Y!*7ct7xJ^n%%)!Yd0n34pa6QjNJKf*Y}zQaEHvH_O7 z>c!fR0NSCg40c~zEq^x<>|pDfssOucuc)r18=%`jvU#YJHomT&jvu+MGMB5bnYo86 znl2Yr=d%!zvTEO%+Gpk8{}7?aDzT0gI_DVV$NUAT@HWTa{=w3BD=t-_j8i+a^_*xI zpmqRQ2EoBF#cW65=IDbL&-O}_DADFsj$}U8C zg7Yo08FJbAid=~n5q^41Fd5~*tK1yrdx!Oc+oTuC|3CJ=GAPn!Nf&o__rcxWb#QlU zXx!c1eQ8r1T#R2GdA747f#c=sDqq1q4E=+h(ytum2W|^slJ+{SORZ->uU|W3)7DeC{sLQzS{1tR%!qg-MPljuL_d$|eCF)nt_SpuOd`pF5DHxIAuq>sFmA zPWMt463Bz32tcEykompJBqJXXE$AW4C?7O{LO4sZVH?V?;kP}uOLPvN2crH2iVxv( z1`N07k|}WF9OVt@>GJk0Syz&sI|V%;j5PTzsC;F)c=5;Fi}xaCXsM>F8^liV7FCfg{?dY5?;if_u3}he>_1;w-F@b z4z}@xTz}DtiiH*Bo5!`jT&=;PU(;TM5-57d( z{4)Oh7_d_4fnC@h&Zbh8pdT4OZ^c`oJhW{%@c$AYGt8@8Op z*P3{c{LzZ=5uURp2cXR86)fS4Uiw4r<6DQwbkmvrpi8J3ky0m@hmGV&-io?i6J5*l zX7wMf({8P9L``eDHtkNF&~#^mx}u3C%w!SUb{L^C?Pl z?Wl0hFF%fXlH+`9E>JxLz3+mE6}bl}d-0w*7(*e>nWHgj<(5~?1i}{V`?Jl<|5WJK zl~nte*IpM@1w#O`u(Bm3SBP9!o(|WsU`VaYS z`|_`aL2fz~@?a`W_ga6C-tk|}${amS%!uqGAUW0R^$Jk)YtEXk=u&yIujjam(2EEv z7#mJeM784cepRs=i$@%~HfFP=Y%y5JxgG0}+L+V$2#|w*LzpH+-SUZoXjuw=pfaf=%sf%(q~e~} z0#OwChjE@YfwSl6_J?vW+c|w~_4hP>1O)=CQ>iUBbXngYML%{U40ZdkQwyJnNmYZ< zSHR3Bvlcqq1xpboaBhpy%5Vf66_yu5Rd2!&cYdrlrEG7BcA%t-mcck_>RSEC%@U&9!W1;eYF% zAN4F0g9lFuBgh>5Pb-DZeaM{L0srH!yx>cDn5T2!U6X zM10B!{mz2 z8D^noI9h-Bczw56lDw)P%?yBOTdG6o`S%~xvn>ZTr%U0#$bJ{vr|Q~oX;KGv2}!A zTDAb^oVYN7dqm=LytbPN(Zbx)#=D>ch-+DmEGt zyLR>h(O1ZXS#|?6Q(aXa0kw?76N}H0(Lo|$&eAbfls>^7X_j7Kk??PIxJAkZ=2WLZ(zVz&};#W#nZtgSTk18yH z$q4+COE8wtrkcA(VGvacSHOdkNvzuyNWVEbL?AgudtZnX0sE4xp+@1Fp3=ThJ%53* zx1-3#ySAj7QrC{2A!>w{Gl4H&A)lUwOfJNhv(8?p_pF)zQ*q1l8p&zZU4Sjn)-O`P zP-=KRiF~+${{+EX`VaT*bMOobUL=B6p~sD?M!DueYT+k;Y-k^BRjuMC;>Sy*Uh~5w zfhMQpwhM9&L+tClM?W?)bRtQDOIf4z$~Wdv7FDfFO|RDxkDfL#{`di4FTh1dABoE$ zMjz)Vi3k_&Enr93%ms>p@^>sLzv+WT*l`}40@H>{X+29d*%u<_oz3ej@#84ca5zog z2e%}7drmwXozem%D>dmn)$?T|1KS8`Ewog~MX4|e*_D%PXwa(7e2FE_oS6Vr9T_2T zojShak!2B8D+fSbgv&Am{<|^ID}}8V8^O3p&3`U z*~n2=xL~2?T3L8#5d^0d4i}_tAC@%2$-L6Tgfv*2&p?L-vteV=6pZVNuevAW)-7)L z$Sa%NbyY%j+Md7z;<>_bTAebNw)bA%T-A`V_iRaEZ<5lcpIlA7K}ZF56-9K@i!4az zr|8hGGkMF0Fd|d_g}A>$VmPcaj_O4 z6ZtEHL`z{8gthz~0qGfQ;-jpX8rCv32-Y<#x`D zfA@;f#NL+4!`Oj|g^`)*{}Ps$|9^|y+Wxl^rY8RqcW`yG`CD;Q6DFVy&=zRt;{4eH z%YT!1_%D6^Z|ZzzdODiR)iIk`Wgmb9J>e29U}fRr z`Yg|8!T>PiWH#Olkvv;yJ{v20h2L~HV6XU;zo9Pp;zq3ET zd6k^(O#{d6Q+kZFvZw3C1z`qgrHv<1g;NJ-R8v(fgj2VD- zpMiq=Ul9Qu-2W7paQ$aoB57&k0(4@MwE4^J1e(~J0{<1N@UV0LXFy`HQ?0X^xH*FA zd#X7_o`6O=g&DBJnOLoIwALuDNd!oPpB%m+JDH}?us#*K$A1~gzR3j@v2rgdR-uJu z>zjIbcX?9M*`CVneJNj_Ha@8uSf*uHSH>&3YlD+et@7pS%zfuvtkiUrQRn#(B*uQb zRfb9`5MkTDASsMX^TDdj=S;RV47iv=e9Ija@C3_OILO6lU%12 zXnWnliltk>5i)R@e_KvxkYlhU5jz^y=51mTjEpt<8w|c5B}cIB^XmPw%tC}*%kzOW z#5?uWAp^vZ26~JX4T)H;xOn}Uivcb~`AH=j`l^@`eWDKLe3U*B<9bV;=*7zNY-pj5 zz>_X8eQ8ii5hy8MjVUKLz9uaTfxBJd?#qnO(rVS2K?@F~n_7=5fRQDawfFYGe%4gDx4a)s!Jr{=rJZazjYSL5P$Yt2-ZtG z8epZ@n&sP*w2f%g+u&ELl+sV&u`3p955W4Tze2BfJXAWl8s`TW`D;CkhVIiMp4MxD z6eVF(RE0qchtCPX=qwt+$Bv19u^0NHCFWQUI2MnmdI%yqhYUZQEVQdUhex@OJGV^u z0u-J}lxlIL!my@irNB;(kr-;A%<{GeP%8i+W}xG%^VKwS?$vZCNg7035iEn8t;ul9 zis94Yr?3JKK5Q;fN)8a^odp-u*AEvKE9kNzP-m%w@;$DqSQzoc&*~N(aFLU>d2=!D z6G)qX-D>Bf`%tw<9?RrGt_As!K#p2T`-TDJDJL_6nU*athPjWt`shO}6Tnm1%GH~C zCNkBO9i^uPi10e`V6Dxi>A^>%(91DSH`u=%Cbbkqcx4Cj#9eViqK5d#8r9P0lwPtM z1krjV$f$CMfSi#F0})Q!256?^Dudwm`H503@=@hy(9Ka=`xELb>S4gz14$L&YS#G? zl`2C_(9xmHL80jdz0Crs=;TBr@CgsY2n4=(5tSigz^ihEwo1u@lHnJEs)5!|J9;FI z;Ms{Sl+k2YI>~J$fctVX**Ir|IYzP}njua>xKww?K3bi7VtP z$@!ot?x7P&X4nhR(W69+(#4GPSR$lRqJv$7$gwwKj;ftU##KP1O!PGus7k_5fMH1B zeHR{SmKaoA#4cJ)Q^d$uAnpyh;I17o6`Iy+sB#3r_R#0cHF9C&@fSblA~4(w`G&shHjaNyN z5Lq2r&6Xxdl4HlCD}s3qu23-5Q`_f9_mxtm(^e)fP>m3nq~oI#DoWcc zTe$uL4btB?Wk6qwUW4yiJrWo@1lZv=-C1+_&F^z_jwop0+Q%DKIuOWiCuuBGXi~SN zFK=?A)6R({olU0qHOxDrc~aR(1Go*Gl4VE^!q@Y#n@*!4cPfV;5tadaeMf;+fpy89 zwA=|gN(&vNG?R@Q@U-AqPffk%_FO!3W#eItdEh)v>K{K3nEJR4f6DORet~cf$V90;;P=%sA0;$uOawD>pT8(eGn9H{ z{{dzWh+=jl|BcfuiFN+z7qs_>~I%a&GU)4cWC7@Z#NeIHM9?;LOvp@(i!{-;JAy@2T$gOTXRP)rmJB?NSsv z0w{f2H;0{HfziwOSXK&HHtnXXNZjNT!lnTdti_~QO~lCC2HoE;>v&V+X$OQ+7^-6B z!3svqA{pI&`#~bU%4PV(VUg0cpn6$0p**;V^H>SOy9nAG4{21aCbCO(W5QX;44|8^%F?oy~8F9p8EAwjK!|I zw*BCoRPc3g>$&>HgaQtQ2qu#h7u#hmdreWMNd$@_c3plFfba7?GNw2b)=2>X#zEkd zk5rD?gnWcZ+PP`-IvtB=$L9(PuW0Ly(O@i?V9oH<9>8)DRR)S*h$r8?caWM>HjdF% zIk9m#U%u(zdHJ+0(qc9ERq4o+vy(C`|Mu9e+m1`K(Q(ZU!|U!YC-dpz+P$~y?PKsV{_ar2_s=0&kEf3J(6G+ZNI~}Y?f{KJ z!X=rtG$}S%?s!|1e}gU+PgV#r|J=@91u)~b|1KA{jfHw65J;m&~a=2__K?`s{&U1bce^-Ti1=leet_O5o{E9eBs9MZ2$1AWof1W6pEW%gMD z!Qp*+Pj3&j7S|KV_jp-rq(6R@papVlBZ5f6svF3VEkt^u&ofdb1yq*yNk){Z!kDXg zELBj}yQpO#CFP?$qT`Q`gQa znw!w5+%=llnM-I9EyoGeQnNYJnf$85FkBI_mQL+ErGc&|Ag&F-bSQx;)CabyV3eHYGcT;Y7#O!%wz34QS=0x zA^R>{>~O-CEF3bE&LWOWL!&o|f#j_eZWplP1ShcEA`3g9n?V(CV4`F0KIfQ12vusU zRQW=&rPi8!y$s~Klf@(0aD9?%K&zq;`(xYLLG)6PV#HF0Ci09SFFq&{W!i2TP@FkE zeA{(ZQGL5kt;?UilV}&`$B^p=6cdjS88J@_q-lBQWHT9>hu5AsoAHR*2YHBN{Ks%0I? z6m>!Rflzl&eV+v`#9w6 zF++G&>jMRwY#Mf+jr*K5b_gBY@2?^T%hv;3;92Phgx zu6&pcLputWFv2+awE>3z`u2k=HP{iy^eGVowY;eMz^rflfMraAIET*-6ZYXKBfCzSfH7vl}q05 z=LG=~NSa`7`k}5~iop*Vt#J72UsUv~ zUL--^PA$5MM;n*iw5?iFPp2Ez!d)^=^UHr|fPP+JNM>^P62>m;+jo~%k-}Ihh_)_y1%_``9uvQ5bJQI3OVUqJ!+tiHj zXY@PYIG|Vh(A!UhHS+uRm-@cjFcvgiy(&5qvWjK38oI#>WgL03a!Z|6t<@2`Ri)CE z2%X6dB8ub?vf%mXIabQ#KHptu5exavMI}mfif$%cv^V;7T*a{{F37pS#vEO>mEwad zCfYTiTuZkZK9LCP@m{3#T#aJY3=R6dS@-I+RHbC1pB_}`cd4M;e+2d7u!VkjH5cUS zmDHP#zYcKdd0XYRn-N3niA@(}7X7SmL>=PnRK89UHsg3t3ay`$izw50Ce4GUK?x=%Cp+hAn28nBGC2tFqv6B?1|hc_uiOQXQt1_rvU`L5&yWX z|G3?l*4zrt2*V+l_;9`BiV`VxIws#pRt(CmBJ9ClP4;K>i>a15&?;1OZ3i^T9ij%&5Kom=Wp-% zsNic|8oI%?`RISN# z;94_0?_RWf+hMEn*F`0jLNSZ>Q6S?Q)7dIrT6g&NH`m(DFFuyjCuLPpnY5E`u4^j0 zo#K)4QQgv-?uW20gZF|HqX@a~Op%i?JmfSN_(y8- zFu4Pa%A)gBPg>#w?V%Do!L`cLgR>ZhRNBBKD<-t<>J}2#wC;1D^l4FNRAaxK(lVaW z^+4|DH9ZS=TnE!q6YY+=nn(Z!PnDhRZqaj^!(m3kvGufi4W>TG`#|c^5iI_Pke!wJ ze-*MbGygY+m?iD=xXl&JkGeAMG34;f8163kHg3i}FPZJ;RueiHrAeGQ430q^GGOBU zn#@le-&jGDUgw*VVr{+R-l&1}d7h(n_gGAQxtSjSnzO>ZHLdW5QVPXE>7)V0%n(mW zeGYATK{nW3vV`E$Krdf%-ucMUF}7kyS}Cb&x#~+qEWVHW4OC#jg6CpD`mwC}xT`%FgCt zW}F4*oTAK=flHW4L7oz}GH=#EX4?m5l@rqN6^2-wcIhyfe(JUX>z9_bSp+)xog<7W z&d`GSNQkE?4RR3$mOgDvQTBBd87C7wI)r&@NJSy@AUYjjQU+_pbH*aM7@|=P|0HU^ zCM*mBD+EvOc&NxSeFNuLIl;GRSg(D^m=L^(X0&uCCmt3Oc9zXN@~*5%k>Z#y_~D$B zBx>i$^gir6{3a1O%z%$)5Jp{aAW69UN(daT$*vkLj$Vjf%HRY>;NqTyUg1{4iZPUH zLVnE<{|p8?O);`eG`i#Xp)!n%K%gIDFQ%4p{&yqd~7B+=rM$T4TXX7NP%<-jTwtzzTlL;iSD$WB}S+eipm%V`NaCE3pvU< zQLy8}CQ&FFLIA>bHePCJBPwG7CMP!qGD9ZF>wF>7NIBb#$_P?{C=7>pYW^fA`ta}} z3wIXy(K!ZQGw6hH)0YC3L?Rb-4^1c6jMYB8UCV_@qKY(y6*J~&QqB< zny?#We7RbsOm9{!{~tj2S;viw!j%{WdvtG|u2H>1ZIw zxZoIc^|}yPj;jmOM%F+?l25T?bFght4mGQRf0m2|a|nb1_{pm&&i)YwV9%J7#Z<+z zirP$o*EN{tt2h^c791S1&vGaxT40JEu7tN|-pc3aH}9p&!7K`aK1-*e*>niEZYcys z=3vVcjdneJFjn8GNVg9XKVIU>1RTF7G5nwWeP`2z!SUs8}7^bUw3*ZZ|(Ilsdp*bxkoeOvlj47V8J zPHl9~vZwQVz*&jDkZ@|JO!>mOW42=nMA5)}e1w!6uRr=*UAtHPlzs;x{rK$FN9K=xBMBR>vLbbjkFtEls2=*#oX zmpqpB1G#5N=5L_ZXvPSG4yd3jdxncIyY?2Ws53)0b#PY9rKe~MFzcxL4<1pmSQx6w z&deAOiI}?7Bc*UrW|@27BSX)(#YDfgYBsOUcn6&jD$VkVFKCdijPh{>Ueq);jia_P zmu$tDqW2RGf;GK4a;^DdgCD7X7j*3^c(Ooky6R(zZAbl*1oW+b61%cWrAXM%5=VXxByEv)17=O&&;{M}*Q5Dy-yF5vQ#nDMSAt_i!PxSpz!2O`z zF!F_0OI?3C8B<`_c1L}?jT@Vz*60MwlN$2!tMMD_kp_(aOMhsUM}g!jKY;M1;PZyp za$Ai@m5Bio3N}mxR~DeDK;`Z72xjOd?jVh`XpyfaBn>oVaU^-pmuDvwDxZWO%cpYJ z)+ndbIbR|S6l{l{#{Fr9e1c&b2!^pJv#-TqKp1{pB4d`R-9AH~j;J2X$-Z=ciAEDP z9wh7M#%<8iTb?~iUiWr$8-Q|PnuILysOODWyW;09kkVI2z{F(7(6y@Eue{z-6*$%j zY1^)(u2Qw)+jzuO&%5v|&w}@qwN?H6hl}+lT3!OtFBEX|CJrH_JKn@KzX@%~gf+j) zuBU;B8JD)Jw5{5JzkyiULBDc2tM+i;TE~=WB_q)^YBcLyH6Q`h$bmj$26NvvxS!{HK`8)8@9{zL3J~!%cE=%A2ujOZqu4&4+n3{hk1q! zrBC*{SW9#|^#MPqgN_0DDbd%$2Zd1J;!jiP1o-t8oW30NLEg--$%U#Hmxh%Z2wDvJ zg}QQ5*NHPt=$S%3GR!TE`YW3`O=+zXFFm3Nnd+O4v@BZ*sk&~x#ucP~Gn$tNh2ei@ za|H$Rq7kG&!t-YhvwMA7H81rDon6w(?76|^zrS~_$=ZvPX(^P9DUd$B4EFZ;{c4-f z>l_FS=+Ez92Sm`Gxf>uBFn2Scb+SL8l7RRoQU`Ocyp9|*;=Os z4}W)jfX=Bgd4-i$HM(n;vY<>yj)n$v`iO(HI)>KYDc7S;!)5(0eP^6`!Wwe#2)0gr zfGp%+xTVN;-FE)gSco+HU04N6`h#Mh!Kp_0ugtkY2m|n^b4%M}3ur1t}ui*t+WN$8x_}=Lqvw z2XHbTV})bI0gIi#&Ii)-nt&=)(LT6S6K8;hL9%ZHXVYTqRARU73ukgRORyFMbZAI+ z#&r}wiy;G!%Y)}6~lLwM)UaJb0R+{8v=Y^vV5m2#+J7{1kw&=&broaR%*j}a@t|a zBKS1InsY)}q;N3`kJ#EY{nrgR9*{I2mfLy{A6UNrBsSCo*(P`Zb?H1`{7&%!D)IC~ zM|wL)yzz&@_w_$VZ%pw9U0zNf7`;S-ujs8YQ?41pZIZ|MZ6FtBo%k~)VzXQle{#i_ z1iF+Tw%yf!nr%PH4M#PMF@&vhWsdgR7bBJ|k2n#QjTx#C`BW`lq)Acfl0hZQC+o zoI&&1w7PO#=#5~%;rZynhl>$P_5$}$D^((#!A$~e9GaYzs;n* zT8-mQRCv3!&qs)>+`%7i|3UwZ-hKEN{ExbOuD9AZLfFp307s_WDXKqMMj+pMbc_Ed z_y2!&V2F*2i{-zr*k$Xy$9=BY{V`N*pq>kAR%CZi%X3xPL*o;%qUv6Q(|MRd@xX8$ z)gi-7Y&^vBjrzs&8uLDV#VBzuxCIqhMCjz+bIR4!T+*H)@cUN{2G={iLNGPDOfFOb z8k+wC#TeVXdeh_8X$D41z3u}0!P8I2!MwZ(Gps0fO_6d5vUB~~XG8=3-yMqL>+oGk zyvt;BmS`np(g*aag-rM=BH!SI;AuqPsQQIq6Zugq{!QuloRWERl-ympA?Z-(*T;}5!5RU8I(!!Jt|qxHtz&TTt7}9 zevK%kp-8CT+xanV0qF^|!s&ZF?V&K!tmGZsd%lx4Ad~Wxf%s*daD4s%3Neg2nS>%c6Ocn%$BIcPGgUA1P2Yi)^V>)wfM|3g z^`KZX!Dnm?Lm_s-Bu8`_Os+-OIH%|pESbH&f@}u42|B@70VAvlb{2#Q+%4L5Lu1KW>F*RLEPfOU$EDTe5@s|MsbBU~ zZkOYV_f(trBW#NE2{c>3kU|{i6{M@mZ<(H~d?^-%7CRCNK@0G~DAy)`X_-$h53br1 zrBIBKKDv!4K5D>-nQox4fn$#XvCNlF68?&$h?ETXc~U+EjA<`mEo*wCkp<%C>Gl2^OayXhY_bfiEltE131$6r zX##dKxLb-TSQqKWK>z&dXPDn)O|#XK>JiM@RA@Ow#pn;>ValhQGhm1i2aizJUlnx7 zOK3ub`9RQXTqZa$M<}F9pi%}xfP-Z*SWA%QQ50T*+KZ@>{I2%NF7|>-fs*&PlrqSd z>>1fqQG}hiu=2Mhpuke^pZWc=8^o&lu>!Yw6WgT}MDdwGz%TRy$s~jl8d`2SzhXR) z)d*T9mCRZ3>}6DvzO@hzL=NeNv07QDYW-juASUjRY@`99Gc_Wra+EF^CAi7(d@=nT zj{RPAh?pWXW-)1tV)geCVLBlCY&F2lcE!{euavXYWQhkF#x0goSIo#@NPDll_)3JB zYoF5^$qy^m-qcBpu7s*nts2>W;i#U|Ies(co4yRSyq1YA4LPLWSdLE8Xeyx z`=fIN`t@i>>BX6(tnF@>;R7Znn(bsLnmfc8b&sl8us^%6?{|Ff^Je4}ckvwvDYtZ} zaJz3m66KXy#ldhWa{H4(7r>EO1Q-1T_BKxmK{J?r2yhp~O@dxVi!7M=4LWxI_GX!T zu$U69ylU^~D8>xO^m@CsMHQXO#070m<*-sjwEX;YBzoq)$5OynOC>ZX9>8=X@5`E5Ax=Hj?()5$3-+GQNx^jKV61F=$%&jlo;U2of_ zu-9oN?5w1a{9^Sn&z$mL852QJr}Q8A4ifecE9#mYz%PDl_H|V^PvxMY@L<}XCmpv- zSi3PF1I7FM2%Q@JK3=)o{MvW`$6}ll)CIZ;?jn)--Gx4ehzD7%zqMSdI==HX(3!tj z)Mqla$|TCks_FC^8?ce;BHI-F;tdr4+P&Y3=Bk=Bc+u-T5aCzu)=k~g}gz$(OPiI3$_mw29@~^nlvXx~=c2`2ij#t^%#hYK$hy_{{lZ$P0Pf zq>e^=w*x!JmqfTxTU~hvF6-6U$P8M>T@zwwxYGrXjGPO&sUE1c6}0YUk6U+HODyiG zoASfkgsTxV+|63gIdBFVi$KWsaS2Hx3cnrI>YJc9 z?VA3oUwH+Vll`%5qwk|df?lC8!SSxX`lVSLaq3Zm-*<+5w?7}CDVR6YI8n&eYc1cM zZ6|6mC7QgJoE_zakS`Z+psP?^jB|+8VA3G;!W14H&x`2YA__2lLz@jd(S+y5^=67I z^U2ukSM)nMs#=@Yiye^TrZAYkxF%sg;rgEYUMZ>Wfoo6YqreN5((*pB7uUJUj<*H0EUEAr zCjOWY_&mYac2E-Lu*v{Y`XL||tTE#a%4IU}YpCE%vb20AfJ#zqrW z#WH=ff0moH9|_(s=+>l)m%by~3nyukFDv@G=FA@+GhFV3R}0X?{82X~i{FWj&19gw zhQt8OCNF=?kUu@zmf@#t>UTV+$=BfIn0mN)foQ6I3y{PRcMu^%`?2D}`5Yn0;l)Y1 z?+(50QF()v(Hv89xdKl1waILrB;D8kr|U8&qh_5@fdVH!2TZF-Tq>UunM;j<%jZV{W$jJaML4Ql7!WjJy#doAaj)_^d@UiQj3ECU_v z6(vC*#;M3x-iW+0Gwu0TH7A)oy&XNu0jZT&HM}0-IrdJ zRz1|4V=vc{ogv5DgVoDpHw#V8onLZ37MIIObi3`$$Y_>K^hJWLLqFp|hflyjB%{R+lX?CB#v>g+#Q^h!lLg}h_qh$~Tp8LAjr7F4)HD4-d-ZTFNU26H#(U|o)#BG# zPu`oJSKxk2dyBWNox^EaB)rTTe*%s>FY;thZHVQc3d=u(AOQ?lx0}t|t!r;gv6570 z2J>~+>`O{F*6hizy|we))O-(-zwu{R++d*N!C(JL;eF@gm_Bx$`Uvh5pduefr z2ye@3j527elSB~Ilv=2Cw8XqSq;^G_ha^;PKY;q`Hw@VRL#{bM%( zIvbqLw2`3JMn}@dp3e*_#CN;pK7yV(rqD+Sqq@gCtZ#M(A)%uC6 zt@GXJN;pr3fI2?!jMDJz>K#p$+c&)A^I}pAyTAny$5{(EdE&XuQd-yykHH>ddopLL zGl=36>)mgyt0$L8T;MLN_}Rza7%@6q9i2%pTIMSOxtOG`*F-2NbvO%OP{v}v*nOeXf;q%mYP`-5o&(k~F)_CH0yod25zFjgM6 z|F&l}M(cCWDkqxns%HMZcqx5)JG!6qfemfeL~7IgieP)I3SJajQHwY&+GvuG^3E^( zCl~Pi;=4745`qF55MREsPM)t@3kZJWMN1d^3s7-Op33|6+5zaGK#!QKy;f?fR&$0W zTU2saqa?D?+~)e#BA9%wrrTu#{;fICJq^0X=C0}RP{EpaudDx20YHvWxS2LGwx)#& zJS4nSS>X6~WrfFDmqOI9?S-41z>ure)2iFNFf0u?4DM^qN6c}$)es8~hg3OKY8wM2 z^z*J2k&&H3JPg3&z;d)NI2m;ezYiD{Ihy*5dpyAhOXJd2C$-u-RxF5UJzF$-j=(q# z)V~~q>a{{m)D!M#lVh?<_Ehwt=?!($4on2}S!PS(#R5z1Y}6w& zAqq3#Co#lC3BV!yrDWkUgsP()h9~G(GuIjcY6r^+dH=OQvq{w6(Vu)1hmlw~si|5? zgh>KCjv=52^!#aC{r~_p*Sm>5zZ&;pU<+1m$gHMce9yRdvPF8pZa+mcf*LJMwh{mZJ`r zn=zvLinVy(gu7!R*EEuJ@$$#OM5DarQE7fB+d&lOe8CyrnjuTh2WifCW_HQ4n(PSH zSW5_@W{?JX&$rMiTFNk`kvUZ&4awyHX|(QF0K=~x)PyQqsjv_L6%a@%Tda~zR1a$o zj$h!2O&i#6JqlG{AH3%)Xz|^txQZ!75Lg+jHm*F}X9t`a7tM(&hH4gd)jjyoT7erY z0Kq`ThM3&oFg_#K&h)@ib{pr0^yXj200}G~ZY=%d+z*6Bj3OD>G}fr%zI})uYTf__a*lTK)coX-+57G3 z&E^i?5zZ<#^1NaSYc;NfLO(UIPL`K1QR-CB;)?A5+jv1xzR zK?&D-DC1ZjEpJS)E!$Aqs_#0!Ql&)|V(QUmFsRkAhyu8F_Klroq<76R&~`eA;58JM zy0!@`+vzkEvKT$KduYBh%lXD$U{vY9=PmCluj@^odK%*e7xjqXhVA=6#%cVi(){CF zIL?lPzNwA5lrLAuU5wZ`*fEraM_8k?8sN-PGpws(Q-G6Gse{}yiX7wkJg(dHP_P>t z?%OTyT>WZ{CS+%gu(nK*deFJs`N6z=roZe)FtqvT!sOSlowIzT8or_4>GWjn=U-mI zq7lB`s;jv9tp%9?Jac4|(}7SOYs;Y9IDF|Ufik>uHT=*moK(d!PD-pMtI}$x_Vk+a z&@M1d+1K0(~@yE!{^*MzYLFVHlw|=X~yYp7-HJ3watDnjLw< zQQ(yX50N8OE4_XmCP~xebkLjKNbhFgkeLoX`Md-t7IJ}$9dq+q1~A$K9#v~#?+0J) z__iXKFWZ8;1a@lqJP_FPhP>7y?Bh14^@aYa))>tec3ViUd?{(U<)>_6dD#0 zHGM4mBAYFx8;C6{6Y1*3;OP=ln#mwic$h_038yc5`L8_!e5p-w#NDq}RYj?LTKVOJ zvpHAFRCg2r>fTen%hq7c6+2P3zYy&N?5GNJYj5^7tB2}$4$nR4DA(Nhh@LKGD470~ zHYR;3>Y}dgNQ9dx?=IIO~r)0xPu)c*1V0e-{J~t_LgbL5Y7NNipgZ zv`55|M0rc*z(kO?@UPtET0Pb3rFm6{q8tl-brtQq&kSn5l>EHMEDI0TA*C4S^35xS z7B{q2USa{9P3^~PGJ2j9pw5}~sODW#uBiQ7LQ1aJoBKe7R`S^gVgA+-HhI8)+=Cj+ zoR&23pkU-F#;m*G!cd=z4JDo$s!N#en0fDzMI`OU1$%sJhf;M&O(lv}S@qbD+b0lS zP2rbw-uc%%Q@j9B3)y8DbX%nDioAt`eUg=fu!vQ1j75`OWWXmo#Gxx5JZSdgzFdGA zC|)JAotYF{Q7v z4SQq^&fDYXq;zpDit_4oTrgAbVuZ}fyM z@ej~Et4@J$=^$LAb{oDy;HZXi9|soak^1K%-0|%(otmo{K3HOmb5aJGlC{doHYk_V zCnwZB9Xyb4suHP14*zYiFTIiQ{>5>N%^;0K>~R*3 z4Bfn{=&Ei_-ebpTr!|v@xoo&!kBB4mf7*NNsJNExT@(!t!8KSQNPuqK32wn%6B=kF zxVr>Ga0sr!gS!MLXdt+2u;A_%>@{I0JNw*ye)pVl?|bi`osq%nRW)bTTyxE;uC;o8 zUrj%KfM4XR!F0BlgYbR!m@M8ZPImFAubIa|s06aZ`}>i7SXnYjdWz;JaH3hDpx8jHD_A2*HJ!4>T1T%j_}hTSV389Kl5>6yw&5{WW5VnB zAkjQ3S-LfFPys($c!^>@|6%IeH=`{hox>pro?}Hw(Da`&Py55euVJ5Y4Sf-8e8#L_ zKk(sY%y7xZULXDns2IZ~zmcj-CbShb=F(gnw<>Eu?Gp3mBTn+D*+?^QHT#ED8jL*1 z^igY*c(^vO=G?L&Jrjvpy=2KLfJMQxTr|CxPW(WxFk|~BCZa>1(iLWG)Vw(V#>kyS z;1x%$ule^!-!UDB9_%t0XOW$~6nDK4Hw|(z1+PC=MR7M=j@ax;rB_dutS~;wIgi@@ zZdrBy2?`dyMaSOITn&e zl?d#+H2^`Jm=1V+m`Mg0`f{W$JOvfF#k>GB6*f_8SbRY+wrFe`5TBhXmro2{SSIB2 zLa>o)fd{+G#hzZ|t=HO>vC%H~h_Y(6<(Rta{G$&U{(i!FDq8)z3oP>&nTkv{1=y5r zSiRu_?@pwr>ioZcic@{C>)Q4y&*%XyPRF&YuRrxw=HAB5c)1!VUF9=%ZF@(_^8g*p zvTh^iF2tk*^i%YY_6`ISjbVE)XzWX{>nG@D3v|z3SuNoZ;gc9unCO1XFO3%^uAY2X z^X9zf%}PP5aOdPCmzczia{?pHw6h=eTWYGI9Ix73h2c2i4+=oMASYM4LfoJ;+@Kr! z4)UtjSVb8Xy6u>tJo*k9zgEp6jWktJS{%~jLS&SDR;{7@V+BuedYSb{$&XwnilzsC z9m>JCxs)orsu*qzn`H?@z=U4zRi>k-JrzAu^R<3D&}TCI8jV7_1-s6jr;kdU%I9mx zlqc*Q1}^uqo3zKDPxUmayOdbDCb(*!qzt;L?E%IFe2(FGLzUKEp+Z(i%z zdOnDzBb|n@82eW-3dLwQlvIs-$Zy+ugRW>$d+En%oLAQKVQVg>Zj} zuiAA0!HEbki}ztiSVh0R{M~xgT0d@bzJM1cz0R_iKVi0~upMp8NlrO_t~qSX$&Huc z%N99`4?}F2oonaJiELP+dgYjEL{++mfY;9JeKb9sg@~t%SluYy7-QQduAkZh8mCt1 z1P;9(>)^HPcs$CTHCa;kfjbE0&hnF5o zVmt;D`KzOghN3^S7GfR@%YSjR%Y*6%;z4tdg_|!{Y;8%RcWLC)kZkfLJKyzV4ne8M z?_3ysdr}Mpx?`Qz5>27=?->jfP)DU>WI_Wnv5-UPxrrO#9@qK$bGoMGa5`uBWAbsK zA@2ubeQj2R|0HbH66??^A8(`vONLENkfurv@C}FeDZkQdB*D=pmdHGu1fE4EYjEtFuWtbicDzJaI zQXd4#gJEKew!GpKAFk~~Az8W@T5N;Xf2F5MALV=^C*E{3wI!~HdQ4D)6ffhe^yUYQ>$Qx_e9)hoCFLq!H;co}$xg3{cirZwKfaBOTR}Ut zS&V$j+3DetmD=JN(Md&;Ct_7!B zU#_qC{fy&=)ACL)_OrgZGX9?CPsc2IaC==BZH;*N6EuN@+_$t}U1dDV7No%yO?9rb zK?ZNP9W;>&>=V&sAI`%q%2w9;VNF`tMnK8??Tz+gVO`Gv{5l?+>APx@_?r*p z&qx|dtuNM)C~vnmE2k)y9F(=Pg<{RY%WEvuf2Fq7KRtWqxbxI?-zAL( zmp_CC4x!UIyjih!zVUofK~>vspFt6}z8C5S4we4GqlSq@T3`)Z4zr=0>&AR40UNB3 z>N?D^EsV18pny9uYdc@{i|Gt}>*8~Q2ghw}HrYZZ16;O5rNA5E==moMB>Ohq-$$lPY!M6#uG$pFE)KU&)(Aw<61IFK6^43Y(2><5`Ou?QMoh| zu8-mNa1%dOEspq5o|Kkx?BI}qXnOK^GRr~Ve0jL&wd*z-#eBIRHSb3i;?0jLZ!R{@ zF=2vb0;sM8wYx-e8_?~M!k7F)R;HEVj4_^lb@Ka_{ z?rJ{ag>7l=dxAw?WDgiLm`PqoRGufhp@Gx2!r|ICe2O^Qx>ZjLhW9rmFAd^LqBxLV z5I1~_3qol%%I4V^5=Os?@X)DcU-yA*zPi{T#bfs{4ihm$Xxr+I)H3M-O+(;qwT>lx zFx7T=Y8y*DgcUY#`Armgz>=(j-!&2g3$^t7>;+S-%W5R*kZjnXuQ}*1qN`w>6R@&4 zl=o4QY>~8-d82MCSqXj8h)!l@+f$kqGZ?5@JR&=-R9xwwCnM*11y(=NMEWTfrGT^ zb_zUaQ9vfiS2>{0t4<(;cPY9Lrjq{!DvsHR_eYFlC-)hKzA^!+N79*SV{+&Zc=d;+ z^{kM^FK=-POtKzRDNy&Q1T0bRx_a%{DK|cDGHery5S|G(r6!=&1=+A#>g!?8GAex^ z#^H;35-8(m^R>7EdPC_WS!<1p)ha^=HHND%-8Amf=PNTvwv%j7vi=%Ci5 zNFL+1lq{-QzOxly{N86e8-PRRAV;J%tgz|)WuM|Ii~O+>i}cQLap$!XPeqC5j;mc` zz{?13=-7CHHtIK#Pw|5cpCg69G17h3vCgfu%Xh&o2zvo_&o-E7-0 z^>8Ws7Wqv!E|dotW%rfd;hRgpOZCW#pbdEC1Dg1?pBeWD+_!dM|Khp97G2~C$eyLk zBpLkDGfOy#%M;x@vqrEVQfu@^sJ4+x;vdGA2(h;+xga0;=s|MWUvR7pD#D?@=jT75 zwu`aI$XwicU3u}&`?=|v9yy6fv#-_4Os^k*s)+jJ=lvoeseU{$9DkM>!{K^}mbK9? zHfp40Igr$Vpb5oziNh!b4#|)Wr459H+Cm*NJ0AmGh?z)B6|N~1@f=$(6Jic3{OiYSno(K%F@?KrCrZrRaG|GOo*+ZUvQ&6+r?r^Ou4kNlDd8KWRT4i zzzNNKNN3xKYvYYttfvi;9wkP|3?_MBO*?i(4z=0hoGJEHBXs*Q1YNrh=(0)N zkRDjOd!SU`2#)cs7a(kvq~p_~MUK70(9M=1HiPSl3%B%uHE&zT5R*;B^fXnPqu%F7 zGZrZltDt-L0c!U+(px0O)jlzFFT8CrEfH?I zn75-!{ibIq7|s*uzxn(ri>jp%0O|V@%WmRrvhSShX!xVIFrjeyGw&R`c>8jg)Ccu? z7d{HYpnYzh>TertT{@_quXaDd_CWt0nO=crB$0zT$U#@~(r3OnclleeuXy)% z2nWZ5aCSw)#IK=oaD`;)FVe2WC$U!=0HS1Jcr!Z_ge=OAg)a(@9!Un5S)@^!A&QF- zQ_mC3HjnKVLY*>v>7vc##3akCoi6CI&0wVV7Vj=9{t}Cm@G5NYc)4G^pD{)HOSg)l zxx`$>Yb4G+vXW=U%}rkkJ`rrQckb*P3+UHzMvd?H)%m-w&&-Kdh+7PzSm)pewHkK1 zw7u=EnY}pPGJEDNJ4KsUAK`@1P(MSO>5o#%>XNYyW%*2# z7)~E(^(oE`9I&q>6&T0`_(x`7z^xk(glc{>UYIFf@)pB5XqjTrCc0~7L|hq5=GHnjD)as2@oH$vAAqQDdo8JXEx7gEzfqdlc;A}m^D7F z>#3^`K;0iK-*d{JDljdxA$jH_^c~@+<+P+f0*w20&wL?E>j#7%6M80zeC75M_G$FBizF#KR{g4;(Y6PW`h3+MxK1(t+Q9< zoa5^#wt?nqZ=yBn!8o7N5~?$*b=c=zO;?->qd1)F@+3V0y(|r(#UF#=h|+rrEX3^# z+@T8n{7GFZ&H^gVLT2l-B;OyJeqU^>GYiK^#=qbYj$A50B^8ffc+`r^a3W9|pyYs` z`UOezKq3+?6xyPG51e~JGnRm+2@l%jPjV_6801dx$liw^jyQxm2zoOjg;t&4z7!j?#m)^pL&ujS%MZZ{u%QkU^sV*5IYRw)al`XaQ^ z7jj9?IWUZO^eFhZ)*fY->ym6p&BdgiU6MgRfKuW|!Ty`~s%*dQRr1HiA*>wS?7v>3 z{ud;}ln|0($~%%_%6~^PO!DAB^&&9;|DNUdHx^dD-zjVb zgbZx0^g;IIqNaBCrsn2g+dBqZCLlSjx~Zk1m6IJE-yQYsFUs4W?ff18>kpg$Thk$A z!oL`e|3Xmwi!b?Smv?^oMLld~t72tkE+7tJxCP7JapaOSL#$?Eh7iyGWGDVZdpTDnJ$y{``#sC?eIvg~{!1Hwk{|y|8$bwIGEnO`8-6e( z|LX<(&W8Li4g9Z0{K2?=$DI7Lo&RgT|BuL|fA)Ik{$Je40{=pd{DU0$o;LYkUH_E_{wv{sqU-;fyZ(iYn$ijKpLg#BVd!lHNzxL< zo>tXo)HlzsO&viyX_Zy=jK#x?j*n(oKaOi^!rrKW3JixkN^2uwR~yTS{6SvpHu5&JK|$iwJ=}_`U7^;6G|I z5Yir6JhHz*IGZ^>tsIaLjlBc9MCTB6>r2ac4es0LY}p~#6qe5V;E(D*nVZLWzPh;5 zwy_4T^U7$OAdwy@KS!3uI+@C-h#yD2#*Yg=ryzxi%|W>)9I^-O1vX=LPN zS5%$EqsB#rhhHh6R~5xiN0lDFqhxpqX~?9aDo#ptP@8&;pk{F6+t&CZ|K^4>OZPki z-Bik^Bh6M_xka4DzWLYQ{!(tyst?2EwKiS7-kaOavZ&q}p^_E$?D^b8R{k*IBpK8t z>0K!%6SGJ)4rRs5j3vj4Rc-8nCD_B z^@MKAJu44E+A~E|9iIJ?v{*tX*#0rzmO5Lcm@FJ;2f=IisV?6FB1so+u3DhE-9whI z&>8aH>Rhl~c+TBH18*-^gKP(>a%Z128y;a~Y2kf|tjG=98beiJ~@L+vR5V( zrA2<$aJnv5P^lw4Qk@t+5w&ih?tT=DW&A69!!=#{Lc;~| z@sB2J;g=kknPb}K$FevH!5}STBN7j+LW-QsAflop?v9Uj^|)GvBF z(7B~*uq-tWhXBIiJ2 zGa8?zj@Z+Xh!K*RitJ5D$VQA1)rnrJc=;kLHQ|efZ!=Ay)t47ON{bkbwSqV6_^Zq@(>YEIBtN`AZ2{XGxtBIC9Og0&m3`J~u>gr!KpFMC#eq>;8Q=G<5cy zPdl-mT27rDtwFE1Q#wH1p~j4OoW1-5s4Cs?N3FBGmwvbyUgIbgol*85QAFogT1KkI z-74U?$pX|^iV4B_U(C`j-A^giNidW|6k&Z|VT{~*oWk_Lmhi&8UG;qCHtIA?aN*40 zN!WLRc+ALv)9ch5Db^0>iyAcW%&wlcTHVpym8SBWs6aCxafSUR`O}qDNDeD*DAnKP zutMNv|H&5u2j^enT17=Dl8OScVdCkPAfXS;_Ye;e3G?mrwbI*9=1*crDsm`sU(2Lf znmX76s#PtNECr9GXveYZq*{$u91h6q4$=}skObb8e7>xDh?5TEP3 z9aIhHMHkJlrrk?LIC!)7p0)ybq0st)hPVK?NVap8g(C#bfBm&}s%j?L)sxu8`P~YR zhkcN}(iJDVdJ7Qd#ox!~(WE>~f1x(zMHxBS1k4DNlvC7J7EDT8R!uihna@Zh^LEN~ z%<|lI9a};5sjj9}vx$x_EC`2g--pVwOgJcJQX51GfPcmopPRm*hJ%)Lp0WIel6d>< zWKEc~+-hYjerCxd&F7%k7D(3UI_S90W8KQu$W)WQ=)w6hlPJPr(gNyP$s^dauR_?O z9SY2I<0uaGp35RSQx?dy6q+q~gUzJ5UO4Uw(p@n9J4~UXr`WNwj264C>=LBdaq_2w zdi|dieP{NzCQhm))Af`-i}sN(e0Xe4OJj3j)3^_ws0F>C0f?L;%Pu6P!bPgK1@eEboqpmXaKJ!Zt^aA>P z;aayW-IMY`&3>0v-mX~fNN^JJ(FNIKsqzaK)rhrK$4$zD$fU zy-8&l{86Tf?iMnKr;t9z{o6VGx7`c>hdG?!$aOFiWz3x4_mKeVar-74~QTc(D{7=v|Sz#YpmF#`L@jzRC%zQx7 zy@s`hUwR=tELNY%1@&d^Fm#P0(ZZEK7^%Ao$%7hu9$xq7Q)E&ozzpS?XgeC!e(JSbWy_(W>@LqW;5UBsYXcsYW8 z`j;&JLHO~jQ0qs5);>!G!ULQD zZUdXB2h(aoa^k{h6lGD2gI_Q6Izsa;x4R8=AHKmSQngv_a~s}pGZ2|g-hSq+D)OKS zh88i?LCPlpO?PfnS)bSA)L~Gu63y+QSk~$5fZ>V5aDZ@U?OWYQ-p|EYLb3IT8Va3h z@ZeDYy{(L(`S^2x&UhX}4-plj>Ec2hkzk>hIpl+g-o&cx2oeL~lejbiw3ItvR=jU! zaf{D}iBQ}a-ioyzsZvOI)oQn19fz=DDU=Q3vA>trC9$Sb4tpb^ut!F#Rcwj0q8VGv zSc_r&bq)21Cy%qiOk?SZrWh67EceQ}O;Z*P&CnpnC~?vWA`)-jxIFa}sZZb!^zv$z zM*FcEujxnhlfo?0jdT`+D{gNE8g5<)o6IyrT`{DeYD4zxEU0=+{I?;`{vS>6+`qAG zNe0S5ruVNv#D+`U@6GveSdx5?t>W`l&@23}ZX+#AjqPk|(ePwPOe%+(N`{*1NIL8a z1J|Pa0GrOm={pV{4v&OzTF;HzljSE|i7K)%45m|@r8y0@3&)x&gGsyv;R2^UjJxA$ zx#~A5Bg+ClYEo+V>go3Rd0iq3t8~6YSa9e*GcAjS>9X8JX-Mhw?9F ziqF0abyx~XXBKEjHOV=$HQTfAjIcTix(P>IRGuMNwa?h~Z|q(wQ`M=) zPpw&D<$%-x~l2<-bx%o{yp zc-z7%!6JyN_ZsjPf7yS!$1L8Muxujhi~%Q*L~AxcK&y!?fZn4Y<4tFN0Kiq!=*Bn} zxh276{SdY%GIqz6VjiD?&w4Hu!wLw=f#*Id36dJIuD61k z33cyRk0j09WT5!Zyuwae$;(f9!kk7DM14+GW4fb+?EJY1$*f~`bCTg6FD{P7!yRSk z6gesk>$J@nASva3;Z`*lHX`M!y}8Bs=5XEQsq2-*)-RPvcl?&k658wal+y!9Cib9* zwXcX;fi{vwpdNo*v?8b0I(M9EvP%U4No(y@L|HflJc&EtE?pL1W2qgpdRKKJ_)2B3Ji23sIzrKh0ui7TPCk?JqSy zdBOjwSh(~6k9P0|2?$ld`?&0HBmX~w*K_=JvR8@j2_<1h72SK}heaTS<)u#$27+$$ zpYlEWR1>sGI-?<2z&V;D89V)*s6umok-(owd|uWqAZ26XM9afK>G;Nf_2swkQ&7ck zn0-q>TMn~7!ARi$IV`ZC2+*CXdEupX>O%E}yMv&T^wA&_%O{%+w# zCs*9kbQRv}Q<#Fvq(euf%sOsD=1*&r{2$aJtYd&x{)Tqp2x1Hvy^I%^UyZ{-d)tq( zF{hG_YWFVPx-kiJsp+45kkeAH<_&MsP+gs{Mih24AwD^CxUN%6ZPQAlo^E;;J$ch) zdPeeEo73LtOw1yGZZ#J;Kzfc*`G)cl?ldKBLp+~%_QLH=d%SsMM)L_t+4w=ThN#dk z)Xm)&j5c)Xh_Ss478gapu7ckSvWsS^m74r_E={*R-#xAyJasy2q-Pij)L!N1xoA~3=D0tWC#RmQQ7vIjIwmrVP4q`qRSo3{t;?^&C#wUPp z*jS*{+H+9Q+FxDl@}h&0$nZs6a>x_@H|=sC)4->r-?HkmcY-%g5U(hGb`j&fhZ z;N8Sixdr9DMWuzMh0AoH6MOJ-Zr83nRRCedH!Rb`J>ZGbExF}}tz3Rp&3Er%nxkZW zHEJKmE#wY-+h_(ZLq69}Om*j>__ynChrNreL_aLmb9sR{x;_jM$EHV#P=-iJNl!tAgRI_@>uBUh9hBhQx;>-LqfhYBcy3tK{MA5 z)6k4^wr)Ze4;Xz{AGv@-VnTw90$4^@?t0W9x(;!eks9dNOhy3bMEma)J!(EYBnv68 zKZTKXre=IN%;_+WgWrRHKh(Ws@yUx40c3iS`lQ6{1=#96@+lriXjI0Vun;JOK(flR*y_1)B8(}Mb7Ela8?T} zz@(jfZOSaNFygHF#ZwtrSx(= zI}dk31^If#WE_UoJK1V>e<%Kw-k#6rS3Zt*MX1qP!Zw z@=DL^cJ$qs<~a7*afMRdEWlt%WQn}#kcYos*1@CUjeLt4^3q$U^~nY=I4?h$(5)3e zy?0ym&atZm_$if_2R5lr=@T1yU0yk*H3jc%6y%wYdACSXHD=V2QbaGGw!7Bs10M~xX z{375+ri7Q7!rvw?CB$}Bz`$WCjNt5OiS}-#Kg>o4_qQYAx0kU0!%Cl%`}fzJhAKUl zGr)%pM^8^p@CV;MS8^vAp`xA65Ddy+PhW3QSo!Q%$W?ZEp6_YkAAQv;_LU;8qQ2s} z82Xb4lhfCtHL;0&2zzIOliIJw-mslW$CZG|D$F89&A=Lk2@h$AK|x&l2o**o_%*ZtiThRkH#9INbY4OAM6BT&R%2GdDj464j$VY3kH8E+Re^CO~Syyk_ zma4Um8_~2l%Hm$~Y+DFje`rSH;h2j#?{9ds9X0`>Su|e~b~IjE`|e4c)G4aVE|J?-sE99UWIZZ+#|85AHQLJ4EGS4~OswxX*D7mS z)teI%ec5SZGJ_seg{M%ZP=1kVWaY0rF_4rh4Krc0ZB*9&q`xbaXhZS=sd5$q1dal-)jYeTS5R06C|u$+8%qW^NCI_s8?JM&Iz;gG&_jC=!B43)kKej8EA*AJf=qYaYTP7n}VN7emB!sW#(xDf{n$W~i0# zOkRH*?RUQw+`ZoC;{!nQRKT{DkQ^X@DA>`|0IVb-4EXmWfQ6l#6Cg*<`sXk(nM;m! zQc;<}^B-t*a!v084imaYF#)}gnaCf(DH0exAn1l_LzOiT$Q2aD#Do@mm*x$fnJ!rv zpie=YtA`=^P(dFUl$&cPJ}&L<;ooWVWrIeOcg{Z9+&`-R%o}?57S=$km`XqrP#Y|NUGib zEm?W$i#hXIfV11%caQbkCEDQ$b>y-Gtb#RIrQ&;W543H}S^N{wp@OJBqHq;DVf{tjpoPiPUCg86DNy9Kh5uJrB2hWzm=BX3G-A1@k)nSyW8d za>WJ{-IqL>7aUN*#y1RvHWnKhl-h|}+FC@3gbALCxwm!aoYcEJl#R&eUs(gj&-`~v zu9P;R^m>C{Rcv8s_j*F{e1su)y2VIZrGTOm^Dz5#(BWe8%_R^YQOP1Cl$Dh=w1y-$ zq9*OrmKVn6 z0$29A`4mNSO}c9uxD@MM%!|E~S~KljZu0_ff!p$0$z~C>C#g{E-H4L2X)w!c0^S)9 zp|M;PzMjlquKHrYe@nv8<|*SkONGC_EVd3jkLVA*Bw52EkTFKOAPhG9W`;EA4Mhcw z+Xai825tYE0tlATSqgLhTJItHI1F9@!WVB079pldRAfQSN%&JAX$9C#p~oiB zG=d0RXiVNc3p_PQyWR+Ta3Q{HdJh_)j~>E-C|E>aFH@*WqOf@Pl0PuSs1*`WAjd%$ z6R~-SfhAl!h?k2cG#zb_!vt_lZ%@L5t?SJs!I?HQ7&quNxU|kmfcp0BP>TkLk9jd3u9ruKc@-)f z*N&9co3O1{zf8lW5PBSa%O9){KMAw#xy`)Iy-n^&a)~F=p7*WSdbb9V1S8J>X(#() z|F;J=*flNPi$rON~5X_^LA=2fQJm)n=wM;)tfND&r>d<;oK)qe7FwX|0(ff-qND6K|8BbqVV zwVS$|3s+<;-Bl4gS8UWO_j;62lW+=Lo?fn5u3BzvC}Dyqo3twSJR@dMZt$(iUK}mJ zMypk`+(4=)vW42tHpn*2mZVm=R;`xTc5h|l(DE?wuzrPinT#$C=NK0ThXog%UPmcj zEsK7dz8nVySDtoG-6!Ta1}0{l?vdK2a-yo5;;Dk1>RgGtqKlSa_P5-~G^K1?HL!F{ z#;cNURYqn0obMq_&z&-8hMunT9}}`uv&*xmYZYmInr^5Ia}97!aV_}%7|rK_$YZrk znM`d1ab?vJbyoEX&7&r~7s2VcB~|hT>Sl7<61*~QNsj)fSz*axadKgD75yT*<;A|1 ziI(qoE%-x7MF5I>-gB}Z8uP->UVM_ieG6@_XWGopr3Wmvof6>28l$bM}gjg zDi(7w`?@-52Xfoi{mQa!-W*siy;zceg7-w~$;lH88L*6c{Az+@!p*B}tIsS2EcF(0 z98R3`CUd4Ywh1jP{pG$ryIKki?u;Kb?X}%2E6bO)KWfNnnrew@t7@aM=AwFuGcN_P zPft&|W}cXCIdQ~tz$9UFe3~ShelpE5CC4o+w@$Vmwv)EzSv_u@YERhvy2`hvzf?ZA((v@#=Azd6^xl&tyl-k7 zr<1y~{QZqn9-rG<4c>RU$XDi6uUQU&0AN6@dgeqgaslp^`mu9VgKxvr#?2_m3mZ!%vprKUdm)>-Va!4PNkOsy(>Hc)ypKyU(=pL7D+*@S_H_A4 zO))hea#xBQ%Z?`&nBk5;DdNYbChQ1->@pjM9&8zO(|l|&&TMSZpr3DGq`_3^QMhI< z;YjEfWY9^sBGW$I5&K#0^Hj%9U{a7%03bYdwa)r!EdZMj`xVwI)-_f*6%&;ob=8xl z$jm7D14HM)h7_Eb=)&mND!!vWqY-RLm5rvX?pB^6Jkd>08kGfcuVqe<9Q?HXG6_!! zRSB(FMOfvHq>K;Bzn7m>I2p<>*Q~iWqF04g74xbyI@#^6yXKr!5vQRs6W4LRGVh(| zYWiX@GSv5`FEGigi}lOXKDS0{^XKYKwl~>VWksagNhnmv=kRSa=G9~75*BNgd-I?s zgWE89zwK5tu_t0HVq`&@9TQC~*ZJQH{N9)GUOs1d4%xzY``ZrD^iJueNuIr_+{Ps7 zJpT^(xV*J|!Y%Xk1>;M#7Zu^9@-lFQ2J=R-enWEq+y`mOrP+@8s>F zx1YAu5=*p_n2~tqU*o?!?mGH$!gL~r>@!I}*HxuJWz(72=ZR1>WHd1}vhe+|5E3-L z=@FB$$;qrym0?s)l%$+6G1B^r&@ImQjBgpMiYmu5mKJLE&nv7lKRx1E^vJlRS~Bc$ z(wbMTR-8tjfxu2r6T;5JBKr6HUn@VAb8wn+tn#orzK$4ql~d5)l|sk;oGZ!8aZ#tg zcIqf^EeFu9ORyET>1k~<)-l5m&3_%&0MM=7_Po$9QQoV4>ZmUFvZ~f~lW0k+)1hZ; z#{Wi8c*R4Z~l9`FU!Uk3#4vW1Lm#yX5d4&P{=5TC06A5F+&rx8JnVO$ zd4NF5pVEJ#BmNWVpSe?a0EhoT{x6y2{FFb^&<#LfBQ`@rFe5XY0hp1Em7SSU-$-Ad zksWMgz{SnN352AQ|FrHe+2()fv#~ORxY=2_7}@lJ?2K&8`kag)Ln97GLqj$W4rT+8 z5g5q)L;n~20TPmbLJjWlF+Zw*$I1MRO$AYLav4*7Tac~G9kAvPm=L6)-*6E>;^$8! z!jIE`hk_8{(Gj&WaIgSd+S`$XEDgz(!S?p1md18E7WUR)kgc7Sj;ss$FA*KnJJiHW zu!TMt;vmD9khC!?COb!C*ZXw%pP-2Qv!5pX6$Wu%_gi@1jT*ooKqts}1^gPYKNIkO zj<5gkk3XXO|NCzL?C~E1{_(&+9{9%t|9IdZ5B%eSe@U*Vak^XJBP0LabH>`VZVO#i!sk+~J5VSte>=m($xVC-OO{-arQu#x?rXSSxs zCiVaeQ%eUsfHm0G-o(nm4zgIhHv`hVKFALI>*R;(4+9}J_nJS>{w)8k@JDAq)a`9S zhF}Yjtr@_`6k-=Z#?JiCLU}QO^1Z7x4NW0Vyt@Xi`vQo=%)xecrU3JMQ7bUO?!LzL z$AO%g0|*dvu(g7GU;`L9*xua&7sv?*QtF&1VG~70urP8 z=5_$ky)Nj!8uXhk1V#-RQ2@}dCO|*j0D=t9pKgHMQ^?=~fbW&TzbQj*(;oq8dM{&o zFJto}7u8?Vvo%Q~rG?XBtQC*vvoRypzG6mb&L(ugWXvijW z+hhFCVTB$(SV(9TT}qe}R~Uv9DjkF4RV=(hRMUE6pV+F#unjYQIuW3ioAy2%OKcy3 zfQr_~hf)|v^qo%Ac!K7#T!==%1M0rEp86hY-i~4zZ$w&n0+A0C-(DTsK2IbR<4!4z z=oHB=qs0hU`?6hW3KN}!g$q9z2aOeJevY1-IuOq<)ZPCgL54p*+wbsHCs3wcW<=V> zYn)o*TQ-uSfE50h8|Vub_fqAC19shSC)&R6v{$``>lOA;5YN;briZSZa$csg5o&&D zyt>SX30CmJEtq@NQB9Mpigx^&#?CB&Z4A!c<3;_~E>q6yl8lk$w$zB-;;}*}D;e*J zJrm2%uaB7K7sdQ7c;6X?RuHdU$2JhDrHB@gUuZI1lrsvHb2O@H&1ZX@jS;w?F)YTV zz90S`B^;u*G5Vbk?CRRW+`X69U(&U~p&1e+@r|5VLqA3a$*rB$)DRcYuFOm`A1L|4 wP{e=EeCCleWEUH?54MDV^L=b*51Fc*ZNWxp$ef%&Zgw +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Macros for printing using RTE_LOG */ +#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1 +#define FATAL_ERROR(fmt, args...) rte_exit(EXIT_FAILURE, fmt "\n", ##args) +#define PRINT_INFO(fmt, args...) RTE_LOG(INFO, APP, fmt "\n", ##args) + +/* NUMA socket to allocate mbuf pool on */ +#define SOCKET 0 + +/* Max ports than can be used (each port is associated with two lcores) */ +#define MAX_PORTS (RTE_MAX_LCORE / 2) + +/* Max size of a single packet */ +#define MAX_PACKET_SZ 2048 + +/* Number of bytes needed for each mbuf */ +#define MBUF_SZ \ + (MAX_PACKET_SZ + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) + +/* Number of mbufs in mempool that is created */ +#define NB_MBUF 8192 + +/* How many packets to attempt to read from NIC in one go */ +#define PKT_BURST_SZ 32 + +/* How many objects (mbufs) to keep in per-lcore mempool cache */ +#define MEMPOOL_CACHE_SZ PKT_BURST_SZ + +/* Number of RX ring descriptors */ +#define NB_RXD 128 + +/* Number of TX ring descriptors */ +#define NB_TXD 512 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +/* RX ring configuration */ +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = 8, /* Ring prefetch threshold */ + .hthresh = 8, /* Ring host threshold */ + .wthresh = 4, /* Ring writeback threshold */ + }, + .rx_free_thresh = 0, /* Immediately free RX descriptors */ +}; + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +/* TX ring configuration */ +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = 36, /* Ring prefetch threshold */ + .hthresh = 0, /* Ring host threshold */ + .wthresh = 0, /* Ring writeback threshold */ + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +/* Options for configuring ethernet port */ +static const struct rte_eth_conf port_conf = { + .rxmode = { + .header_split = 0, /* Header Split disabled */ + .hw_ip_checksum = 0, /* IP checksum offload disabled */ + .hw_vlan_filter = 0, /* VLAN filtering disabled */ + .jumbo_frame = 0, /* Jumbo Frame Support disabled */ + .hw_strip_crc = 0, /* CRC stripped by hardware */ + }, + .txmode = { + }, +}; + +/* Mempool for mbufs */ +static struct rte_mempool * pktmbuf_pool = NULL; + +/* Mask of enabled ports */ +static uint32_t ports_mask = 0; + +/* Mask of cores that read from NIC and write to tap */ +static uint32_t input_cores_mask = 0; + +/* Mask of cores that read from tap and write to NIC */ +static uint32_t output_cores_mask = 0; + +/* Array storing port_id that is associated with each lcore */ +static uint8_t port_ids[RTE_MAX_LCORE]; + +/* Structure type for recording lcore-specific stats */ +struct stats { + uint64_t rx; + uint64_t tx; + uint64_t dropped; +}; + +/* Array of lcore-specific stats */ +static struct stats lcore_stats[RTE_MAX_LCORE]; + +/* Print out statistics on packets handled */ +static void +print_stats(void) +{ + unsigned i; + + printf("\n**Exception-Path example application statistics**\n" + "======= ====== ============ ============ ===============\n" + " Lcore Port RX TX Dropped on TX\n" + "------- ------ ------------ ------------ ---------------\n"); + RTE_LCORE_FOREACH(i) { + printf("%6u %7u %13"PRIu64" %13"PRIu64" %16"PRIu64"\n", + i, (unsigned)port_ids[i], + lcore_stats[i].rx, lcore_stats[i].tx, + lcore_stats[i].dropped); + } + printf("======= ====== ============ ============ ===============\n"); +} + +/* Custom handling of signals to handle stats */ +static void +signal_handler(int signum) +{ + /* When we receive a USR1 signal, print stats */ + if (signum == SIGUSR1) { + print_stats(); + } + + /* When we receive a USR2 signal, reset stats */ + if (signum == SIGUSR2) { + memset(&lcore_stats, 0, sizeof(lcore_stats)); + printf("\n**Statistics have been reset**\n"); + return; + } +} + +/* + * Create a tap network interface, or use existing one with same name. + * If name[0]='\0' then a name is automatically assigned and returned in name. + */ +static int tap_create(char *name) +{ + struct ifreq ifr; + int fd, ret; + + fd = open("/dev/net/tun", O_RDWR); + if (fd < 0) + return fd; + + memset(&ifr, 0, sizeof(ifr)); + + /* TAP device without packet information */ + ifr.ifr_flags = IFF_TAP | IFF_NO_PI; + + if (name && *name) + rte_snprintf(ifr.ifr_name, IFNAMSIZ, name); + + ret = ioctl(fd, TUNSETIFF, (void *) &ifr); + if (ret < 0) { + close(fd); + return ret; + } + + if (name) + rte_snprintf(name, IFNAMSIZ, ifr.ifr_name); + + return fd; +} + +/* Main processing loop */ +static __attribute__((noreturn)) int +main_loop(__attribute__((unused)) void *arg) +{ + const unsigned lcore_id = rte_lcore_id(); + char tap_name[IFNAMSIZ]; + int tap_fd; + + /* Create new tap interface */ + rte_snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id); + tap_fd = tap_create(tap_name); + if (tap_fd < 0) + FATAL_ERROR("Could not create tap interface \"%s\" (%d)", + tap_name, tap_fd); + + if ((1 << lcore_id) & input_cores_mask) { + PRINT_INFO("Lcore %u is reading from port %u and writing to %s", + lcore_id, (unsigned)port_ids[lcore_id], tap_name); + fflush(stdout); + /* Loop forever reading from NIC and writing to tap */ + for (;;) { + struct rte_mbuf *pkts_burst[PKT_BURST_SZ]; + unsigned i; + const unsigned nb_rx = + rte_eth_rx_burst(port_ids[lcore_id], 0, + pkts_burst, PKT_BURST_SZ); + lcore_stats[lcore_id].rx += nb_rx; + for (i = 0; likely(i < nb_rx); i++) { + struct rte_mbuf *m = pkts_burst[i]; + /* Ignore return val from write() */ + int ret = write(tap_fd, + rte_pktmbuf_mtod(m, void*), + rte_pktmbuf_data_len(m)); + rte_pktmbuf_free(m); + if (unlikely(ret < 0)) + lcore_stats[lcore_id].dropped++; + else + lcore_stats[lcore_id].tx++; + } + } + } + else if ((1 << lcore_id) & output_cores_mask) { + PRINT_INFO("Lcore %u is reading from %s and writing to port %u", + lcore_id, tap_name, (unsigned)port_ids[lcore_id]); + fflush(stdout); + /* Loop forever reading from tap and writing to NIC */ + for (;;) { + int ret; + struct rte_mbuf *m = rte_pktmbuf_alloc(pktmbuf_pool); + if (m == NULL) + continue; + + ret = read(tap_fd, m->pkt.data, MAX_PACKET_SZ); + lcore_stats[lcore_id].rx++; + if (unlikely(ret < 0)) { + FATAL_ERROR("Reading from %s interface failed", + tap_name); + } + m->pkt.nb_segs = 1; + m->pkt.next = NULL; + m->pkt.pkt_len = (uint16_t)ret; + m->pkt.data_len = (uint16_t)ret; + ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1); + if (unlikely(ret < 1)) { + rte_pktmbuf_free(m); + lcore_stats[lcore_id].dropped++; + } + else { + lcore_stats[lcore_id].tx++; + } + } + } + else { + PRINT_INFO("Lcore %u has nothing to do", lcore_id); + for (;;) + ; /* loop doing nothing */ + } + /* + * Tap file is closed automatically when program exits. Putting close() + * here will cause the compiler to give an error about unreachable code. + */ +} + +/* Display usage instructions */ +static void +print_usage(const char *prgname) +{ + PRINT_INFO("\nUsage: %s [EAL options] -- -p PORTMASK -i IN_CORES -o OUT_CORES\n" + " -p PORTMASK: hex bitmask of ports to use\n" + " -i IN_CORES: hex bitmask of cores which read from NIC\n" + " -o OUT_CORES: hex bitmask of cores which write to NIC", + prgname); +} + +/* Convert string to unsigned number. 0 is returned if error occurs */ +static uint32_t +parse_unsigned(const char *portmask) +{ + char *end = NULL; + unsigned long num; + + num = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return 0; + + return (uint32_t)num; +} + +/* Record affinities between ports and lcores in global port_ids[] array */ +static void +setup_port_lcore_affinities(void) +{ + unsigned i; + uint8_t tx_port = 0; + uint8_t rx_port = 0; + + /* Setup port_ids[] array, and check masks were ok */ + RTE_LCORE_FOREACH(i) { + if (input_cores_mask & (1 << i)) { + /* Skip ports that are not enabled */ + while ((ports_mask & (1 << rx_port)) == 0) { + rx_port++; + if (rx_port > (sizeof(ports_mask) * 8)) + goto fail; /* not enough ports */ + } + + port_ids[i] = rx_port++; + } + else if (output_cores_mask & (1 << i)) { + /* Skip ports that are not enabled */ + while ((ports_mask & (1 << tx_port)) == 0) { + tx_port++; + if (tx_port > (sizeof(ports_mask) * 8)) + goto fail; /* not enough ports */ + } + + port_ids[i] = tx_port++; + } + } + + if (rx_port != tx_port) + goto fail; /* uneven number of cores in masks */ + + if (ports_mask & (~((1 << rx_port) - 1))) + goto fail; /* unused ports */ + + return; +fail: + FATAL_ERROR("Invalid core/port masks specified on command line"); +} + +/* Parse the arguments given in the command line of the application */ +static void +parse_args(int argc, char **argv) +{ + int opt; + const char *prgname = argv[0]; + + /* Disable printing messages within getopt() */ + opterr = 0; + + /* Parse command line */ + while ((opt = getopt(argc, argv, "i:o:p:")) != EOF) { + switch (opt) { + case 'i': + input_cores_mask = parse_unsigned(optarg); + break; + case 'o': + output_cores_mask = parse_unsigned(optarg); + break; + case 'p': + ports_mask = parse_unsigned(optarg); + break; + default: + print_usage(prgname); + FATAL_ERROR("Invalid option specified"); + } + } + + /* Check that options were parsed ok */ + if (input_cores_mask == 0) { + print_usage(prgname); + FATAL_ERROR("IN_CORES not specified correctly"); + } + if (output_cores_mask == 0) { + print_usage(prgname); + FATAL_ERROR("OUT_CORES not specified correctly"); + } + if (ports_mask == 0) { + print_usage(prgname); + FATAL_ERROR("PORTMASK not specified correctly"); + } + + setup_port_lcore_affinities(); +} + +/* Initialise a single port on an Ethernet device */ +static void +init_port(uint8_t port) +{ + struct rte_eth_link link; + int ret; + + /* Initialise device and RX/TX queues */ + PRINT_INFO("Initialising port %u ...", (unsigned)port); + fflush(stdout); + ret = rte_eth_dev_configure(port, 1, 1, &port_conf); + if (ret < 0) + FATAL_ERROR("Could not configure port%u (%d)", + (unsigned)port, ret); + + ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, SOCKET, &rx_conf, + pktmbuf_pool); + if (ret < 0) + FATAL_ERROR("Could not setup up RX queue for port%u (%d)", + (unsigned)port, ret); + + ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, SOCKET, &tx_conf); + if (ret < 0) + FATAL_ERROR("Could not setup up TX queue for port%u (%d)", + (unsigned)port, ret); + + ret = rte_eth_dev_start(port); + if (ret < 0) + FATAL_ERROR("Could not start port%u (%d)", (unsigned)port, ret); + + /* Everything is setup and started, print link status */ + rte_eth_link_get(port, &link); + if (link.link_status) + PRINT_INFO(" link up - %u Mbit/s - %s", + (unsigned)link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex")); + else + PRINT_INFO(" link down"); + + rte_eth_promiscuous_enable(port); +} + +/* Initialise ports/queues etc. and start main loop on each core */ +int +main(int argc, char** argv) +{ + int ret; + unsigned i,high_port; + uint8_t nb_sys_ports, port; + + /* Associate signal_hanlder function with USR signals */ + signal(SIGUSR1, signal_handler); + signal(SIGUSR2, signal_handler); + + /* Initialise EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + FATAL_ERROR("Could not initialise EAL (%d)", ret); + argc -= ret; + argv += ret; + + /* Parse application arguments (after the EAL ones) */ + parse_args(argc, argv); + + /* Create the mbuf pool */ + pktmbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SZ, + MEMPOOL_CACHE_SZ, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, + SOCKET, 0); + if (pktmbuf_pool == NULL) { + FATAL_ERROR("Could not initialise mbuf pool"); + return -1; + } + + /* Initialise PMD driver(s) */ +#ifdef RTE_LIBRTE_IGB_PMD + ret = rte_igb_pmd_init(); + if (ret < 0) + FATAL_ERROR("Could not initialise igb PMD (%d)", ret); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + ret = rte_ixgbe_pmd_init(); + if (ret < 0) + FATAL_ERROR("Could not initialise ixgbe PMD (%d)", ret); +#endif + + /* Scan PCI bus for recognised devices */ + ret = rte_eal_pci_probe(); + if (ret < 0) + FATAL_ERROR("Could not probe PCI (%d)", ret); + + /* Get number of ports found in scan */ + nb_sys_ports = rte_eth_dev_count(); + if (nb_sys_ports == 0) + FATAL_ERROR("No supported Ethernet devices found - check that " + "CONFIG_RTE_LIBRTE_IGB_PMD=y and/or " + "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in the config file"); + /* Find highest port set in portmask */ + for (high_port = (sizeof(ports_mask) * 8) - 1; + (high_port != 0) && !(ports_mask & (1 << high_port)); + high_port--) + ; /* empty body */ + if (high_port > nb_sys_ports) + FATAL_ERROR("Port mask requires more ports than available"); + + /* Initialise each port */ + for (port = 0; port < nb_sys_ports; port++) { + /* Skip ports that are not enabled */ + if ((ports_mask & (1 << port)) == 0) { + continue; + } + init_port(port); + } + + /* Launch per-lcore function on every lcore */ + rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(i) { + if (rte_eal_wait_lcore(i) < 0) + return -1; + } + + return 0; +} diff --git a/examples/helloworld/482249_HelloWorld_Sample_App_Guide_Rev1.1.pdf b/examples/helloworld/482249_HelloWorld_Sample_App_Guide_Rev1.1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c58e75eec382ec80057d5128062695313bfb22a4 GIT binary patch literal 48353 zcmb@tV~{A_vMt)Z+O}=mwr$(CZQI?eZQHhO+qQeXweLIUzWd|bC-#kaQBf5Y^=ppI z8gq=ytRj&Y7NudJWrZO*IGkOCfn;Q3!l%c#Gqi-^=B5+1uy!_aq!YC^a5fP(F|so@ z`TNMx$r+!6o=)1t*38))pPh+`P8Oemflk@O-h@uh&{E0ShL_jL+0n$n1_si6r%BaD z4vPW$hw8meB53k6ikU#5wLu~%Ux$TM#d~Z6TGrBQS%T2CpIagFZYnNgQm44%>EJlsgLHCje=F(Qc=e*|Y4 z9^`GsXGHIn<|NKz2bULH^_T^h9{xo}mv#vrV-5^(%Y*_r*pcPY{yp*+3&?g3O*;Iu zM~qJdGoN3_!4Va_+BBC)Bd_gMqHL4SoA%4JQMs}1YjO*OgDeh8Fb5ApmbA6z*A%d5<@Z4LM6_dE(T}FCj~7-KB@PIBN!QFXO3e-N z`7=5bNtG8(*C!65C69_)Lg#TDGnwQ22ac)ewHLon^fc6QfQ9OtYcx(;Dtm02rO5^^ z`_pCGU!x?rY*5pdJsR1ypQcTDtUGw&Y7AR>S8X&05PfUp98d^<*IeZ8GPTfO_>x|j zEW#hMiEHJBFH!=>HJCJ~n)^&>o3&5d06we)vALi8_9q{YX384`ZnA(_ zM8BgljW*JyI@wd7;WFKCOn@#qLP+IUi}Sn#Bwh2%a07Ub|eWhaW`D5R+Q#s zM}waOmhiM6wr!_kpl^0=(gTsje)m|-B^EutFq@(2drJ6G*^i1iN*@;4afSRWlP@BwTj#7V_Sf@-`sje$QF|2 zD9=zE1NT=$&cf1PN0q|mB6g8Th91s3x5$MIbkjr_qt5NsC;LaC*t>0P?qt-Uk z;z8R2WuF^y$78^XuJ*ab9samIr@QP;(0AMm@V0%=)rR-&4{R-4ryHlmZ&v_5r8T=| z3y&sTBQT|QxzG@jEJF3x%yri}VLIeDc>P`9e=RZLfaw&BXR7c6^Npwv5JK-q3e#Lg{0#i@X^LfF2i6Y1l zz{tN6C)+mn^lzW$&d>DTV`xW`S_2PwDh#!M4Okv;CUr-##tnxX5NV1enNZBwCHpF+ zMQ7g78iu!ltD)DVIoyA&%AIwWB($6dA^)0j!2P{DahDs3emD&hig7UYo;_iIA(7Rh z<{<^D!>ehUgWa0+Yk%&_3FGL}-F~Lm*oaf^D^%jQ{%AS8^OFR%OOfGSG7Rm0Ksv}v z0e7DhCKXasv*XQd?#d+|ASl3al`PvohKl9?CRD7f|5KzG85x!6fsTAlJ zUuUE#Vp$nk0U|*u&;uoq=q+Fo0wmFc%GAH(Cjynf{fC~G47s@c0e}F~rx6kUaeb`+ z!TJ~&|937K*#EAO@!#c=I(FJ}kRCo*Ph|9^p!q1bhy*;b1tJ7DusA0guBMThG}bs7 zMs~NSGJde{#&_`QC}d1n7cxc7usM)ra=`675chegs%ktT$;sR}Y*iX^Qo#hY5QZWZq%3%&=`?^jw$ z^J_r#KCGPq^t`N2oJG4bwt|E0v>mT)Q{$2XAy;KOHR8TTl*kXgO%U11jKv9^+s#&Y z|6pBQeycdl)o$zFooF?~pI^`b%X!jb{}?&8{~&UIwducPik0nOkz@IHk?T^vmD{9; z|5p1-u%McaI1H8qFQ-Xn^p#2OCmsT0bI1y3hHdF={=7UT4k>}2Ym!i+{PcY3>^$1; z4o8qI@V}#tVBcpZPWbeQ7Dl+|#!dM4(e@=!O29ggb~-TFB2uLz9ytxZQN&xVR6H=Z zh#OT^hfrZ{g@eUzqKn3^7%*JozsFOu`fssYnY)1IJ`@g^!X+ zkB9OgpQ&zEtkO{tok;aN;u&W46rGN=WkL^SpQU)|-L`S4x^z zf#&M=UN7!NHMZ5Mck10Y^p)uwqo;Xi_32rf8cK>=k^{5=d)zuzQ&1s65y#nkW0)R| zNvOl%d;im?x$6@6cLHo-w7b9=2m9;I0$(m2kPoi z{qn*H1#NOpRF~^E>pPI-y!I7JqjqBK=||thJIwil4rf&fda8&mb%623;Pho(cu7!iKBrccoDt$oj-cSgi}7lJgqEk zX-mCx3WU?qTL5mXf?4QzYI;Wie+Q+KhJGXnBXWAaGkA)AprX0Pl7nSsgi|b%Hood( zyMGpl+bZ`SSM+bQ`U#Z)ZOoutMJjIEOoW?ij>FoA5IZZ+Uwm7t ziD=aiZju9f8)QnlKWmO8fmRycq@6f2m)UZ z<{`&%c%L$q?&FnJLVZ=r@mR)8;ntO&`r@`GIs_(P|KO65rgVJ$m4ZUEw&Qzl#(!;btpAP&RZCJ*L-gH~zZ;aBo0sR8v__%_gxd?#(q;N&O>wp$?7g6vO{=C>t1mfbD{#O{` z0>4=!#eXRdn*bu-@j?iL;c^5K@(ux;nY|Qj@-=@pFJ^3y=r-RT8;6W=osQ3!ciXj^ zWDkjh#?G(@UPaFgVoMjrRP*6Zu4-A?<+{Zny*@HL&9rxuFY=7Axnc)Fa%d)R5!I zD?_6sSEp}Q5OkfZwtBrh)|)?v*?{G}3%8XMx_+rDI=GT}8q2yw<#FbC1R)M{GObjY zc9=_T*atF&rz*FX`L%}Jusxe@n;kS(UT!W%Suc&&6&G6mfBD`sK;L8Z||be%fzz5Rw%=9x0Dx0es<%e3>>}2;x|J65Y^c!NqUaeB655 zRLft4SJGWmem5x{XYR=HhBKh|7n_v?+3P=HW2gVW!}dQK8tN$?*dTi7(0^gG__3$T zr!F=Sk!h&_5?qHD*b6vZE$`}Hc^uyvfcennxzvnnnw7E`T@P@;ndAa~?l6~TR!U;9 zK(!vQ0jQ2P+z#il={!MdOqbdH2Z|oRI*9E3AE7b+2hbSk|F;okVf$Z|BGbRydzzH{Z8zzmk4w+i zuv5hwsI~YN^29q_)aijW!m?wQ%wdRa^*cpc<2J}_y$q2@U6Y=ggajo~cfE*3&e>5F z&(}9Q*q(jw6NyBw~y{<7|c^}U0eaeuuBrj$8CKX_ssGHcMsY}||n(5@|zRMunSOCq5 zLwE{-Y{9$hJih%2&cl1F-P;}f1DXs|uoCSIGy4fronGEI?zki&9|ruaE)l_b$ttwe ztAy@GT`rQ+Yj5Od z4U_QpDNNXlIg{#~ONqf)0%0+`kItm9X?992liFgVgMz{KKG;OnB>>^ZVFCSM!~nMyZFa#@E} z^XO<3*vpY$T_H<}NvqkKOs>!PZL!%J+gYj_%Oa1o|4AQ%iT%HK zOh%@EDdWEv#`wv<)*4yZ&*X={15N`Xm_NxSKlmmPtpueQi)xLw$xn|&&3Jy{AVd|& zb1DM^6%Ou~NPr+W0iivxhqERvFaxJ67p0d>!5pTXr4(L3aNy;fZT}F#g9l{wLKY`(JMVm(yloWv2g^_R!P+JEBje zPsif0z=!A+r=3p^9alWWA)7FfgEI&sP~lJff^Y)+qVuMqtA41Cwu&kd z=37gADJJVtNu{kL)A2m1G&!GW-D?$bl9Y1mnc9WgO-$2$XKWiRT}Xw^W$c|(Yvu%v zPK5@NT%_M?LZlUkA(g1}gay`oJp-yG5=9&W0Zi1V4YdwvhfnmkJU8gmhL8k4(JX+Q zY7T)q5!DuqZPGxeZ^ZCk9AE+A9E*fcxrkD~XCH!NGkUn7j_v|u>CTXNG zG9KC*^k3IPryos)ZwX7$XHA!)U#!EN0(FXM)-SI@J&MPytm;9Zib)P1#MY9)K2gOu zrgvMB#6FST6BGpp3lP@N_QBUhmD{|FtcS5f-@NN_uWFA6huc;JE}R2{OjDN>yq9h% zLI?FVD0DaFg8a~N!l!Svqt{=-95iSFuNtuKpHg*=>Qjenof|H3i`z#reWllbK?R*6 zNkyckGrk;-I^FI4ygtO5(bV*f9&GU;=C#x78T!7v8SLca>+yJzTrt}n$MHkz6#e{o zd57!uefvIu$d7h^p{ylvVRC_~&$6%@+m_Z7vQOUyMe&n#{{~OdUJ)NaSjejcGWLlX z!X}8*^$E_aJqK4InV=LaD?UpBBS*4aMlc-V6y8NTxP<2sF^dt#%SY7~dg>N~A3b}w z;P>$l)}?`wCt7li&MB1Z*p4=XRcW8=?h;KIj$52Oq#Zm5rwhuysao^a$F zoimZO(gqwWKg~aSRxaKm`rIkF;t9wpI-B5(kChL51e6@q6Dm}CaJtaQ4-g zt1nFn|EJRtLG35ik-+Nr{k{I>&zbw3i=TB@Pm&kta(6=E{7a~4;Vn#O=}<}c{(OA5 zLP5f-Xm@Gk<&ODl%gt<kMMXW@RzI3q5CV%Uv$4Nb;Z3#sem_m~5>J03?I*ewy5;=*_YcD55_9D|<+ z>G9Xxa^rxi9MoojMn;>tXIHS;m;~OdO<2w3bih5H7n_d~chKw%sWOMd3E5aUlhdLH zsS%g8+BrKJ&kI1<}b;vcL?` z!nFrjdve%Zh=#)u>h}n2{8w#!$GXuaSi^=46*pb=MrvCL;)aRpMG}e9yaa+A_>zR6 z!*>985%^5D&tSKoQBo??od-9uB-&xMa zAKu^ZzYFbsO%7geav8Y2{d_+@KfgacrNvmI+oh9i)xkXF!-!~Vz5B7-!oSF3ukutO zv=7EooYhH$iTzF*_0dQ#rs-V6is_zT!^-ZS-}JIPQoO{_HQToqaE-@8ui@e}RjcJ< zn@)YK&(jvM~X<25G=4pRgbS+mFc==#a=l00@41MY0)WP=TCjAG= zS(hme3;8LQn?;q3uA9Y**KPN-a z_BZ?gw%f|c$fis$PtV9EPtWwf+QO35vM!UeRP%9?)Z$L#Q&Mu0)D$bPQd2S@k!{51 zU}>Z&%BV#bVE_<9^yP4hwq{{ft3)iN)Xl_9916QEbR2vj=8$usM0q>{@Jaj+Z8E>W z=>fc+=w<#1%Kun;{ddoknT6y39K}s(O*$TqrEFhPIhyqm79PhDvYdm^$ezmx&aFx?y=7C|7 zLUqA-K}4tC^*C~EMJ`UJ0<>Z!dpaZ&bP!>zY#?vXDL{3AA7G%AxbzCo{h~0lv6#md= z|E84Eu-RKy0O_ad$SsG6I~Ik!_Xv(90H3c{WTEWFs3w+8h^Fn3>}I*Jd>-Eg`DCPp zkWwE-*)Bs;KLzBP|ob60b8sBfJs(7*=J;VR9sGkR33k;=YLEM!e|ZV zrhz#;hwc$2r4b;~7n7e^6ugJ{J4Bn)f;<2k_9bBeRg$~r>kx!l)qk!K z?T955gg-Q%2fjoihRC}hl+*DG*#M+pAj+h~k-J}Qh^B`S=uK96`S>0Re}btW4TvTL zkeBO0nPeSe1TqWp0ffK^i6XZAky~yGCC(do1T2c0V5y3`xI78+*E7mR!yQy#b@)PR zGRP8$VPT-}AA^j3sb$QIk3-54N*-$-9aDu8Sz;(A(Ot!zF-=iJ($` z-=c8#8H2`f*}7~(t_AL6i~#C8FlG9x%mb-2tB&*>le*#wIwuua88`+gTTOERbBrO( zs4h+v5rs%8?V~rOL*;7QY!YLt0E6{kgdkf>bmiWbSb=35DB(!6EhO=M-&E{Q8Hk$N z@23=XfuK4|3ZQ5JzU*bZgKtX3$(2E6_=4ziurf>W;?jrpDCyPrV{6F8_P^2v1SJdD zQJ6>-p6Bg6rq9Po!vwZY?uA)^ZFf-;6+KzyQ~3UgQBsg+g496aZc1&w8}!zsb7 z(ExNmPti?_F*9AtEG0T#^4 zO5^B!SD_J}6-kAHSBZIG`R{fWe{)Uf!@z4?=f3MjG&cy^7Wj<0#;Xhyy8MKwq60bi zPdD4Fm;n_+1<-AB*%0DmSHuhuaiH0WTHA3*MppFD@)0rtmf)meDaWAsJLnoC$B3#3#za^(0C}sb4PgE>aEQQ~S zFBgU~1@u&i*3O&dMjVv=P9uKN^DPcCz2BEoz|EOFIOA3$TvO>3$LLWEW}MzpG#?{+ zJymXc0D32B-c>%jR`8*A=0lU1Osr9~1PP-M`vAx@ed?l?IF=?=%_aaQmvc8&03w>c z@{sJ`VDew67>nl~8Cz~IQ$)j%*+S*Om_nK-t;QGe0MbA%@C+?4yQ23w(C~)7BxV>~ zEY;?}WNN45Dn4&o?pOmDtWkU@fLe-Tk8;Ob^o$=>+5VuOsBL(W(#Fn7BIemhIXd5$ z(!kDXzPazDd{A*VRI$q0kgVs2Z62+ig^qQxhAPai?{7>PPX&_0B>eGy@$i1J;1(5i zJ{-oV7#74Unk&a*(&5P_z!u6iI{9k$Lfo0sHF=L z{6Ri{$3X;406iHB9UJb44g1eCPz9$+^F=*^Qh&969S{p7=W6a1>5VOmM%Wl@|JL&< zfo0O(>N}+|0+O{LYDxcPRFB@JeQC`!$U`A=gsA%B?ru;euI|n%CFQA%pz?D&0!30&HSV<3OxN309!&5qh>N05uhK9YTqiTgYxvdB3+)nE<*XU%V5h<%~v zjEa?4mQ2L8(LD8-3>+R?_C!TYvg*Kmt);>_8;Sep0Vs3wwJJ1xziusASG$Ikb!P6<)eIk2D_8X^R4YGJ>1i2IoM*c#N{K==OJI22Q)nWXbYG^NppO2 zKoG($HF42kgfTS(kPiS}+51-tW0q`SqzM?%i>F7%=59EFBHCda_i55CN?io_yJ0NyO`k%u;T!~Z1%9-S(RJU6NxvEOwg_eM9 zdbYQyV&7fEStK*@AZ1gWW(|28}*gvGojd?c)V?Pv{H*@nT%L+XyRw%fO2u zZ-kJ80bB(7k}=Ob&SJzcnF-03I>zxD<2ItEW4;W|KVl**6+j z;SjkyA@3vy2%p3OBqId%-pY z5w+pCujoOc7-Rxb8Q#`k5J>2EV#XzHJonQ{c2@toEA{=e>T;mbM`AWoc`d>Ky1CP|0ME^c69C-jPmG3T-V3y( z1f0(Kz?Z-wV*rIPzS2mS$qN{5Dz}J*a~FbtH>rz~fVQD-r{ zuYF=egqKU!!3|Ix)P&-%k?C+52U<+uEnatf$wP`3#07MIFIZ*@;8w6!)}jvpNxcXZsP1-}wBveKl+rQM`$yky6`E3Ho;|~~C))PjFzVRZ@b6_Q2tQV*%j8nRKixHhi zb0kbVirGR7M!Cx=qWbsaJ9J1nrN?o!1H?|v7(+OCQVW61r~hwCcCl8S4F^X0PJ)hTmm+TzmS z73)8*78ZHl#97s?p~1%0(Y35UBwcAivz!b=y^GOConRE!d04{Uc3Un<*I5|Q8273< zauKTV_k$3Hze9P9iezm~`gjKSF9Au9@r!agbcKLtJm{A-zmEe1b4Qts$G(f#MaZHh zNMK}bDb!VBElLt|$s!K_msrRt*>zGbD9SUIn=qq@PE^akm*!bJH#fDx9O59&LgyJx zD&uNzTN-GgB3I-pa=fjvv^HsWwT%wdE@3WYQ$vuDTZQ!{e#h_r=9M`%gJK*xY;Opb z1&cY8d9Y)M1fOd3VEElmqeFf3ONoQh>va9g0VnUm?U+tS<5ZFdkc=-d-OaPZk`U)F5k{tN;ra8S2ks;ZU|KUA%blT7!E5Z;nfgEYHa`n0vMU%`5= zR;V4%Wh?Z^@4|2ZJIpZj#n)`1{C>~@3c7vbC6yNi5RQixIN6~+ZQ3{$!MwoF+qt}IUFmARrvnaI4EOw(BEyH?0<8#w-a%hmQ4e^ zVvR1gg0xWL1$@9BKfo*51H}e>_7gjc)g?E1>4e^OX;S5gHa$oQIu)PzNKT$Gx(-jx!aF%NU zwvq{N>=1iG4DqpaGtPN~0FOlT^-qAA0K$%PjBAvf+Rh5hn~mN`*%QyKF;($elimIN z@6Xn;^zhq(Hd~G!Gr4pOElj&Yp?JO~@Cdi{rWv?2+6BZJAyqsokvql&*`w`RUxaw4 z#?JFc<7(Gu=3UL{Zih{^Pu^v~DMy1x#+6Sg6BnZQO;|e@JkF2|m7UsY(@;)rff?v6 z*9~z5BYa&Amp|w^92TAjMc4LB&x6wu1OP~cXKt;F?lX`*u(~Idc1)3MMca?2 zc3In=jg75~9>&=syEC?Fa)QXlE7seSy1A`fW_A*8sP5uNbTC(E7u%uqvTW-RtzNAD zwguLN?|3OCHocR02H;t7Vv3+o%`+;cP61kV3gh4ONRZ8zh55&){l=Clo3=aD&D0w= zsf*QiCs#e>T*1(Zd$nusE>y(UM>!S-^BE5(>U-NXCuaZ{zAubq=0y`>6#0A=o8vEx z#(ZP9n>vM9rJk1M_N(z#Pjg5X30ZpTVueYWA=o!_MTCF!m=YS5@M0Vxy;J4RsSxv+;OLDkzMyB^(VYj zetg=ZnX9CEAzN60IUyG8WN2Uyk4EDUkcf&SWfa(v4OK0Rk zNnSL zN=F8>9p*(hf2W!G))Uc9lQ|2G(x#T~xo?Y1`=0HaQG;*&vbt;c{7oC8Xf&%(?Ob4} zycc7SE+x44UCNlJPaWjF>c@C}*c3*jZq59CsY^ZhUI-gK6tllE+hU^;8^gM~dG`ip z?ojXgQ@Qub8&+O_29!1Ao-BT64U?b`l% zQ{3Bye)}y~4E|Vl;LO-UDvK$_MwHvLIh`mcN|x!z{v&?kiw^s%nn6cZQQXNng2}Fl zw}B4}VytCNv+F}d{@`#kvvr5D{b?8W^!UfCz-V&&DE?Ur#cmqHc#=K)XIq9wd#(K2 zmDZ<47FT1`y&(8XW6a>#*-T_$MmGvy5}32`@`*hD$28R{Xf{G~VMdM}%{TeI0J-M| z?>pJ$abJ+B2;sLvM<2xa@ZW4*H1@6c@ii&nv}6zgi1NV|Iy$8Mj+sbYR3 z5)Db0HTwC%veF09Ue?Lp!L<{a!dE3E0@}hpGS?k#hAI*vvIEpmHx>e|5Mh?}j{5NB zvFl>u257L90pz2%+x*=sm3R(i0&0E%;kGvr_9QI4Yl3vES>&@()S!St_@3vMnN%oZ6 z>vCHI-cWwCPg2bL)XSuK_Pbirmcg;+6xI7JhGne_6RBR+`xI0FLr^Wfj>D}&94+?| z?gBNWN#hP}zV#ENbx63{htVCJPbra>s)ez*Uw$r6gY~AQI?umNR}&T{(ZDztsAcV{ z;y#{tPL?gF)hQV;PQ@vA&XjEphz>HUSU(i@=?wTBI*>Fp= z0QKk(16U|(l`5=Prf7}xB!qLPJPoUO;T_T?OVpY<5VmUE-rT2S2!TZ{5Aa{3yqoz< zgyPk)h&FoHV)+}zX}(=!H3SgvN&v!?5%CK6Wayz@+;cp~$2VA<7l^hk`VdnWs(^B%>o z;(+nS)u}q3FiYnM_hM+E=3A?VCx;+_3>Gdit+$;me7ux)VPR72=|SM@I`G=Efbq;uVO4x zr*S=nFE{N66}0sWnj{Oy9|uoY{k#qhIEF9Cc{{P5{tX6u&2Cqm&%CHtyt@ih;u;m@ zEFo3*6|~l)xixgtA5ZBZ4uXKH_-WfW8R^1X*kC#qyB%KaWX|}Iul8(7)(U^2)~yYz zPz4Kj@Y(X0KpZV^jO8;}w#*3^7FkCsVBD|N#QyhCQzAZ|x-Dz9#qfj6Y8D*U{)|7Z zF&HY3znQH!Awp4vdb0>N#@d$dx1;P!C1ARn;gn8q?za((v+PKcHLSgSY=TZKaP6_y zS3=iYi%W9f-yOJ)?G~u?Z2j7fWr0>r{e?%2G$o2QZsNz9zj}s7Z!+E^Uyq3ID1dMa z+eaDGOh>A{9t3`jFw+ApN~OUejrQCgr|cvf&(6pSLVAsx5R-7w;UzkoI5Wqki~g+M zHr6GwX3RRh4kpRY+8Q^7g7 zuHq5i03s={)DC(6@(lB?Z<_tO7NW6LZ+X=5ebweUW9D1PJ&qOp;j>g%VO3ya;NQqC z%H3ves(fK5QV4PlnISxT5j9oE;*Z8k>n&qcA?XwP&Ll$}*L=Xxe48YZE|14kRV+OcLX=yp+4?K+dz! z<2o8Mnkum>6e+F%u5A7zfOBUUG&{_4J>KQRSw@Fl2|MBOh03DlUvUB_#ITbmHShWJ zh9sdJ44gT0aZEjbg4S3ihpdYl2+c7kL&fOtj@B8%hZXdtH_bzu7~05YY#uTgIcCyUD>jyBaH%E&QfKjf z`O|PgnWV>Vb5OCs7j~bIVYh&{a}G{`k787X=qa}hpl9U4B1RkKeim6If_ei?gY76D zrvP`_=I!e5VoDh_OsA7XCsGQW4%?!J58Tci0_kT|i8xDOzUfE8MX875uL7+D)dL94 z`sQ_83Ilk9?+WF!U`T}%1|!q;DxV>G15nJPi0P6;dn%)2A5mpEa{mF$>@o_$^c=@p zD>J0TaLZp99&0QJ%3_GJUJ34aEipaRNR>zCK-gEU>ZF&{SdniCnS%xbYL9SMP3;2g z)31h3{!ptD%1Lz}d(RY#_2iQ2HqLe?iAgWS4!o?V4NzSt__N4QThTWFb1+T>C$PW2 z8*LvQp0hVoM;0cT9`Y@KB1o0=32mZffEpvjTpaKWf&@R>1f$ThUZ1RgmhK#E`$mO5X~eh|UpNFV#u)g&Jn1fL+gki>!$=INKFfsjFi zv-qzwu}DYu2Em3u2lD00aFo#B!FwAnSpRF;Vm} zL6SQ37i$0}D|&*ZkXR})1fQBGP9>cZ$ymzJs}9zQy>BPm5LYXSaOd1Gs{{FkZYs5jBOF5u{Y;I{CUtFgc?PyI>h+G84CR`#0Xp<>9!~yvr z<-SS44qYblR}?N-4>&ZB3MB?gc77hmXdjjmP_2B92wJ8qo=R1EY?%|Z{4q_cHDpi; zf|bGpiwGR-<1e>-jIW~+1RT;0bT8IPcdnQr`uXB_3{#Q~HTjOd3!CFe)HedS4=9;u zhRX@%&2m=gO?z&W4J8VMT^7Gg7CBQU>_|h`)jSjE-|5on(I+x*pJ0@ka$T_yP<=sy_aW_F@K76mi(OLhs5a5L$k7A!H1S3IEhub{ zi`w3uId*9WrtACWP}iTwo~^siK9*^lNpBsG3oVthBQM0B^WQcvr=o}Sc}D>kSc#R7 z=1(@CC=&J84TByVHLG0r-#2x>G`ceqMa+o>k6y7T)I z*9l9o?OMv(Sv$Pjc&b*+5gT_iC^*weBH=v-9WmG|+9k`>aPpnMp6%Bu#DH~d=lU%<>6eFN z#l>hj(QWUoTf4QoLpuQIY&vyYOP9NdOnJ)6=7kZ0ZqSFxQ1UGyainDXm*h)c0H=H5 zmh9s$Fd7S*g+3LB^+GWnSvW<$M`uAWZNU-4@HPdJKIcR5;}=*KdWKD%y&hRE-wj!} zNR3K1jl|7g9p(Y>or|T1yq`olvinrPJx$SOumot@H5F)erOQ3t)3eSAl&HKn7K~G; zm-LGYN5Hhc-gHv^W03IupM#w_6JHsjW`gAHEE<&{=0MQbK*Ka3vtB?s4mXbLqsC^3 z0ky5wJ{inP(#MuOwH#m?gaJgpRB{6o2OsO>^g4h1hN4Gn#c;U`Or)e$V&H#`1N7wzqJ;rRp!X!VHy1`HP4^vp@YS_&1HIk5h4Nlrv(KA1ncdWk zX9_$90s6AtR2Xa`ahBtMJ?|;ZIcdn}7(^*8eg~-dQ(F)hvbYpobUF`!|-|-!aq{6*h zGpAFl_J@*TE^ANJ$ma8Hv{vU&#L$=PrB^lKmNxz2tDZAayDjfFl30$ zpClc}zxGQFT&S>ORdcNVoI26=q>bZsX2DG$aZ6#SmUC!7Au2)@myOMG%Ee0?TUAGt z!}DxG^tC64H`D!tlY)gu>S)7$JNV1QSG~(b=IN#oNVY`ef&CC!GCLG>oX(mVg$W4x zZ+mLhoP|kHUqjaidw4sR1be6+-mMgm%X4*m=A32LfC3h$y6SPY31oLB?pVT3TZ#g- z-|if#`@rjsW!mP`v7L|R+yr7Bs**YsKiRIeF5dg0>yR{(l7U&NS9@2uI!&H3)LsN! z_yakExJGP&K=qu+Bm(#PJSn>{E+k;&Y{;#zICh~G%#GEX^#o^hkDPTuEt_dv#Y%BW zwk>ZK`F9+c7A_vV2}a{b>jJ7{LM{RpC~&q-%;Nq zJQ*4wfmf=tS2cqShA8r56O-df8p|t|o$fym)vB8Y2`CX%D!M2VmdPW2B00*%7g@vC z2Zw@{s_!Hs`)}Wlns;e>6^Dc>A|{DT721oP6L(l$?myS@m7enMr8WjChEZE6l;e#J z%$8+wMS%n;yp+|7UvfgeIY%lrklAD=s^uSjbg)syt`RKlnwDQ>#pQ@e0>%j@mKOI@ z0&LmJ9m>y!7BYs9iWX%wR+G|HM)we^S={5!HFrv=U7XzHNr-%2Kio{Aykm|~_ZopA z^5I2v)iM=J<&+YS(XR^gG%fy@6hEP*8FjrP`r+ zDex6C`PkM?x;$1W7zP^OcL1bZQPXwUD;!zo!qL`?AhY^}N0z}O$o9YcTkoVaHB6)x z4Y|bWV|{W`GEpsp;RGffS;uD=ffv7jePXRu-%}{Jpr}$Lky=FX&{dTK4&a@<<*Aou zP-4-wfJEgdfX0zxZ1(^Z&~`8VOlZdJ7Uo%1`9#*{TmXgd6S;#i9L` zwhcNr_*_yGl_@;^!l`>-%*{k5eY1(eg|;XhDQZSzwBOMpMfD}=W*PV8?2`!oaN*Q@ z%4*sd$(lR-jvC_)RnZ8g)o@Nu3%;F_x+K}z6(iCsVHtYqz zP32>mHs$Ie7J7vl(Nc@f8KbSPAaRTpNnk1bzZ3EpLEoy)B^w(n_XPdPgor8Kg^}F9 zphr~E5!pykB>>RT7l`LG2bBVf6#=r6>El6g>)f;Je{ruHC7%8Wjdl?foM!2sT7Zla zDytM+b13GGMZN%3`md$J>xR^WT919ugOovzYGM7F3-U0md_<0x%CsQ2f+wvr$`}X7 zoz0-P7%8V%Y~-K7h+KRQsvUr@GAl<9)Q!nTf`4e;JCeno14vUbkp9EC`T*S=ao0bY zuUfu-`4m+K4>lHzs^(iC-Ho16=dGI~8{&T?zb2m1)#Mv$BcWid#vn6;IS>6*bOfa= z-VorIH~j>D`MWAa&~`=KwLV1%<5u~i}HTqnO>{^v@ne5 zp)QlIU}MANE_%iQ(Yy-yBaMQ3y#7=U?W7@x#FRnIB&EcYM2+K3CtXQPX z%W#4{xP=bKjbTEqIb!USfXZ8_ehlwvHG){D({T``U7ry7&cvfc-WSX8#3+E4?ktO@*!2 zXwJv^F|suXLR4D)WY5o#0UIU~IcOP10s!%pGL9iq0XRnC4tKdtxU|Hv%xdqk6uE}_ zd7a%#y_U)oqm~Nb%ta^8>e;iGccd&yq0lxHT}S1qe0_AuS42me#>Lq!{!}a@_CkfL zg2bvqU4$7aWG0-uTfuZ=V&eZ{?<=D!-IcU)cWoMXXx!c1-KBANHtz1)H16*1?sVhs z(73xhAAQc1bMJT8%>0<2!(y>QQg5a5?8>`SNu^4)D@X)cLICG3<4J1a8iRJZxaqiW zty6*S?Cj1O7jw$ps*O0J00Z@bBFwY_6gv!{*i{I81>Kv+hbU8}o4G90r<>i$z;|q5 zH3^=pX)JQzje6KSb`n<>rVtH6{<*OO;LjQt0r}%7y7bFen<_JOf;dD}vKPfVj^3eI z+X#0iKhRYA((j{M&sXfNqQm^ll+}M;*=NqS5QGe<3Y6~yp$s8B7t6oRDozPzgu4?L=5aAK!bt+vVLk zEwDi=eBEJdP4DeH zQWku`7tj&?(So|qCN$8z{ryoJRW??^RQH&Q>mad0CPBT=QSNKomhhtdq9R$LLDIq^ z`Y8gmg4}gvz0pE-IA^J?9oClY8;!?^I4`Ejv(Kn2oatw0!Z-x51EO!v+fk-LmDA3- zw6-hOb$D`KOx!}rh^J7ENl(F$O{bBn75Kd3k4&SKnX#Ha}v5L-`HqtMK7z z^BHs%_*NY* zDNXLk*_-4g1yg#KO0ecF>Rev#H%5$BjmK(2^^}z9dr>$a(lKiaB5-%9pM4QT4r6yh zE?n!dNAuDiUe{C&m*u?mE5tZZ9!+D#vYhjUehG zR*jimaeU9R5!Wp8Z7MHY4 zI;Fi4>1%sPA+kt-Bl8D)C{TDs(K_mjz3v3(H=7pob;;1QybZZ?eR`2Hmlbyr!@&M^ ziuzvSGL<>6H_-N}0+{b5kT@VpGxxlcOR)jw;AbW1ChhCj+INh_~A`x)JA2zhT zL{(qyfS`h7nV~GrFii7lCyn8rEZEP+#|RnSgpi5dycWDFCK|fo*oV>g$cu+lzfju2 zOq5}%9NLXeE0q!AksZ(BFKF{Imf&v$JD-bz8DBz`G{wor%`0S{aB;d{aq@a3vU_;z z-E64iYZLh-wtneb5QCB=aqVjw3^Yc~2}EjdBi;}_Zz4a$IM`Xk>v=0SJgPhvg9(0r z*kZp_p@4Zg#K^Z(%YH$%4R@T%c~v_ED|~=*au=LK)z7C-q?koA{_(jGT-GronR2ME z9-2C07m>D(WJYn?a17f_!T&wP5-uhfXN}tzKA=Uzjm&N_KOp#{S zx$O2MR5#60p9zQpFd@fqEVV$q{JfKl=+!`Dmkh~X!z zgZLj!SmwE9Vh8)Fry(_bbF23b22R}oIs4T+hS$frzDK+4ej;WAj(}=1SHt^Fe3XrA zny{wDU(6B+T^CCyBx%N>vYecy*Fn>_Si;vFCnY~Tq!Doh@f9}Qsis}OnU-oSaOwHA z!IELV$1TuZXddFt&iy32KWOOYMB^}e_Ugd%w8xqSOvf#RI`c4m(xFqq%=GBa^uW<^ zpnX>fpUBFo#SmW8nn*Hi&dJI1Vzxvi{T2fPWx~Z-o*=ld=VMV_B%51~*8<^bW2G|EL&b^sw^!wR>M;+Phf_X}qBqaFav@jK*QgMjLQOikG`U^& z!>>09p`CWjStYrpY-Q)(o*tl4wFh%mS-&cV)~;(j)?>zE`u0A;0!jL;0i(Qqp2NP4 z@cCAy`^x2W&s|?O;Ud#q;R z#RQBWd{T=XnBJ{F?&IFBOW9W++G~8rrW;Y(7S#O@$Cus4%C5boNZa&CXUxnDd2=3y z&RZ9$u}2G#UPfkC5a(LMqwDv=ZBTo8 zK-LJPeI%1Fd9TbXw{LgLZP7z=cArPDIjq7J(cjqnK$CL295A2U%zFS~h(UXb->$8% zL7`_olRAejkELp!z%IJg(AT1ssv?PrTe&`t!f~S@y;DVg0cvj zhb;t#|2lZY-}Vrgm|6ejnooj;mgCAGy7zw7;$c0L?~t~kt$MdPzVeF1YH`b=^Pa1@t6~_9)B?meckMO$OZjDp}2ILpA zShj)8o%Omurd7UUw28<`nkjCX<_P<}C)fqWldij~3=hv4)NEp=Eop0*11R@k@C_<0 zOK{cQ_xo2jQi5zG=Sxu{mR8Nohb1ZKtQWO7Nb7r$Pv#2%rDzYu+~=~0Uo>#(TL6*4UKfv6-IIa)KU^^v-xTYvF_igH`CoHJ5XwSQAeyW>m*xS$!aM& zx>>8>GaRZ_g#@AD^9OSVsN;3rkF#S+H#x`&Fje^_LV=}5qBTYxsaP*T=n0idwUjT8iDJYS;0fP`9xZ6BWSBG=ME&IlcdZF9+M8V{ zBAC>Iq^p($JpRe0zAt!;aZ;kAaCu-ZB;pTMaIo%>3m&k~I@?ec6oTORd>Cd_4?x=O z@X_2T>ZKANjx1fVDyN9}{PA_v@&dGOpurgJrS&{0<{kSw>%C)(BAis=t&W@~+X9J5 z;pG>0RO7gmDxVUOYJ+k$$Ur=);;|&IAV46jjU`Ha1T}ZeyM=VJ6n`j~-f6{1P+5x; z7gB_5xTlv@(^^ElR82b|V?lhWA&=okgMoaX_sxTJm-ImLa3~JctVYZQsz_ZeqFe!~ zN!G|Nk<5pkBD~14AXFIX)?*M-rMgp7h@;{rA6VT4-7>~SS+QW00!54SmFig_iO-eW z3??oO^LI#Ji3>zR^MyG)fwxyu`G~!U&Iy*A_^})CC_Ys!TEZltlzUg{56hyVN;JaC zB(f2P7FkjAapVL)4MabgAEq4s`^ZUSLelI)kd)r_R2`U%6Ui70QL_@q?>1+0M!JDT z#X*_DsEP4eFGqHs1j0P%h!ygkvjRO<2tl0Wlk3Pdi~a!~jZ6o*g0ln%xEBji5Q;`i zwf~G%WvZ~fJA}hT>~yf8)6+(PI5T*X`mAZmm_E9oriZMZIWUxKg{(5?fnKa9uw@eHL?HX1VG@Yb(C#2cvu~_S=JJ8AYx*(|`7Emvgu*>S*16v= z(^Z3rIhI3ul@qA3d3XJRf#g3aY-?-JAcGedvS}8uxg_MNp8iVk?z-FUam!t-Srh(L zss}MgU2m7k>MHS!3(mO+`hLi1{bdcnOg&Pa1;vwsMj@ooqkl!b3)HMB$y)}WRTF6u z@de4MHEW!(C6Rv>;k$fgQc=sA#M+<{!>91q_!){7mw z=9h6!rpHfyJvekB^vNx&#OWC42&!jT;F|;6#hyVUEicPNrwN_U zI>d%jm#ts%=!9&jJAZ!i*lpUC%ha9P+K5SMHDBGCozUcqFep*PDo#bvpsxMFw(Xk~6P=WPyvQ>+F;k zBc)sOCOMHIQDnh4@kE&WShKQ$vF!lsf&vWILTI86=3h^%WIVlymwAiKiQBXgSG)uI zJw7u(D}oPrI-tsYVo$%#07NO!R#e}t$P$K?{CMv zLrp~GLLg25B+a_BlK=-eA69+D`4%&V!V^0}C={vAuW%Jkb{>4*%VblmpmcT65c?03jio8#<$3>CXw|Yk2}L_zn!Z(%Hu^HE z%jB9gem*)!b9lPy3Zb3H!rf_X-e_t0Pvmxl%YhLYT@xU+;zF&H@;Y6sXK0+rK6xMJ zGpzNCfv=RFqA_~>(G*H2-%St~Gt8by`;JBU{6u2u?B-NiT&2BPrf%w*xW14(1y>Vr z3L%KiPkurbZ6wItO4oxb$jKRA-v`i$$)g7;Pby^Tn9z-HRgP#|5!+}GVtpxnC~tS} zeBg1bK%loaltc~x%FmJY>K-t7i)DUN&L?3wN`j16!>!YuQ$|w&18%xN>tAIH5uP=E zMu3Ii9kyuw5sUTP0n+@bBz5D6>iV@HKLc;>>rF%O8~5}6PvCO<@9o*KPNXWvuE50; z4A>iB<-mRXR)dYg6Gw0lp7H95QeK!W7ynd zY~_0bY2QzfB6i%AD$TVN2c*GX_xx(HOz(KPLxB{Xf;-2E8Q5yY>dzA+csRwy5#*-D z8(4n9euQL;Zgs%d;_3mgMeY+?>d_Mj0@~PNA)^X{bkl|HN^3y1HNysW43iMk^P~0F;`7QRM^)pYqiaT?xNnl zKp{zxgK2%H0Wq9F#>Q`KlTHiar!a<1e-=?DH=Q* z`yR?G)U|Q;*S1z2io|QL1~8M76)d|E-F;@S$W5a8o!OWWafJz9;x+2Zq~Nzfd&-+W z-502Z;EFBJ8r=RPEwVK6Sk^a-Mw7$o9qojOlTXF1 zF&1AsSfnWQ3XB+W9Nra<&9ar$2r+cd0#3JMa&<8q@@)qi-hhrYQmB~QBP+@ALA?gP z`kZs{z&&|k1x}sf%2e}m1>Sm0^$W=MjU3a@I*+YX@;HoMe)^-5sAw^YeK4|z=(#09>0XbP~=nK}QAxOV2ex*+On>?eB7{~jFRCUxOpcR8oR6y5i9 zN>e(9-W9ZBPt_@s_;t^W)Me#XQC{x7V+}{z`PfBSNl<-Zgzf-ryof)*-t5lL=JjRFQ&;qc-O0?cip zP3fG&(c@m@V_%V3a~q2eT(3eM3Ehv}CFwJR*m;&WI~JYBbox78Z;bGR)G+BP$Fscd zj|S$F9vtsZj>{JIO+D}M>1gXZ?gwmhgn77w6}YgM#}WnCFU*17IWtT%@FHw|xcS8R z@5R@|oV;Vs1>g=bHJ?s9xl<^=Z6@F8?G-LXog#JKK$%3ZSTo*IXw?$S#i`~)fORw{ zzy1z(_+%BL_MdFJ|2mx3-#rKESy}&jE*z+?^=(BM)oY4&6|BcqMjqQvy>eSozkVFzMeVR$D3tWCO-tV&p3U&&nO#=8-Q8l-tn;w)N+t>a zQ20X3{5DA7h%%JZV!6(yX*Ez!B06-gcI}_Vhx{xWmK>$53MO-2yUFHydGct8^qSYr zWu1pUJ-Cq@+f!uJZ`ef+6->~MsY&rNRgG`-LqPbq%is-{ca5d$vV^6-y7^1!_n*_Q z$&u?3-v^i=tu#$CgnyNn$FY*+0JoL@;W7wb_J_-07Mh!rSFBO*#E+E@hWZ{z*lS$_ zLZG!zU>e3DXL2eh=B1xXJ9U>Zo@@j)cufZ1geEMSVs-HH&LKTiYi`|2nvWyu-2c${W=9S zS@xw!B}H};)!kmqPKFCAGIFZnLRl!qDNmM56)#Jc#f4K@e7DClmDvTJ$(bGNjbw_x z++V=JiTs?-0pGxf*EEX#g^{06Qz8f>fEOcaWF(C0Mh4$w63U#GFM{d|5gaRC(ZY?T zBSGbt^ENnDcYFee6}w+=__ZrFr9ygI9P%<<6ggNy&ZyN`NKSNbVCBC4vsRmuM5BQ2Os+r)h}TDI3n>y_QACL{q81g zUb|qkbf<2CNY&eW38y>vz-CTNsQ`A;^TmiH`?C z?D96`^t4+e+rv=3`jb}82HCiJC0_Ij@_`qXbW?ALa9KSi!tvhLeyD!7X7>)3Wf(LL4<0iS4X>NHVk4)Gx%u9@p232c zk`-BbzEmn2Xa8}h1dwrzrL+AD?7 zvzUOXbq~tWb66o0<=3VqdBD=A+XOFr&V*38`+(v`C0v6Cho*w?^ow@+h)@QUj$CFX zB0H<8B9KN z+~_T5cI!ss8qn9awe6|swnp~ua}=)%yUM<>pj2H?pRFS0#pXJRhy3O?+WP$hua#G+ zIq5;`iS!hehId7g7~}-4WWwg!RC~u8RQL!Z_)covLhId4pLcvX&lYZ`?-J8*gm2mL zUM)6nAF;qQ;d}mL6y^WXl>}yHMtV9WCj-FmY%RZ2wlE0&b)8|Q2Ca;yR(Gakx*@8~4;}6g^$Be+w;vo{ zL0Q?rVD*RN&)mZ0E;73Jj7!pUpZ)eeyM1XFm9N_n9iBNO(=jb(79gZ(RwwKcO z-0nk(M?sCpppAlh?;aSSVx1rOW#SeV;nN=zYFC z>s3FegRPCtjeWc2%@XI7yP&)s_G$Y{!yMvj?-PVOn1vA7?Rk4P4l2IruRU2ax3eq< zZ*5Uu)5=+ctYw>U*I27g^PJaD?;1U3y=9+yPODrFR`eZ0>Yk#XfV>)%u#2o^7dY=u zJ}uW2D?Q7;Wqg6J)kPeTZ!vEH7VYsSRK8Wg9(YTS?ZT&8vOrkNRy-M6@=o8pS)vD9 zqrSj-dYb>NtGPGlAI#mQdoh*r1;K}_D@w* z+TXf}dJj0v3T)-&Wfk(NPYjMg>_UTz$mFg_M`DUHmb3f3!B$<5fY`n7#=e{6@m3%h zf#_J5ZL+k;-;=hOA-|!$fyncrWA`4d4T1Zx4>Y;&RaiFccYgmKWiT3F46a`jBAZFg zu4sRxH%g@OfI6u83mLn�u#tQfF>hFpHY1E4%v&Iwb*4(ol-2BaSv=|NxIDYv9akq)^do;#E=`!x-q#MF1P?lYwUEZy{vZb520q=U^1Fc zz@8*1R*J4(pX+D3KgTwdR!UM!vZG+surrXI*?B&y4I$uszdd-`6B|Qy%2r)_F{%F) zZ#(zm_6T4slOsNov>1)!&k-rB45%|u*$cRUXyLUG*fcVKyt>UQOy-U!f-H=gG#;$X z>Ka_M#itFlr{}>o@JR$^9FTj^rK;g-#Eo~o<_}I+Z8#gnBEA6E0}2VBF4%l@=t)0i zVx5>gWQ*g2^1N<#_f}6egk;pq>s@)sjPYw!f z7AsR@W90nBGDyw8v{C^vbf4_vn@M?irr}lcE*skOX9l2jlNS=$D&6(=OR3{}41gUX zbXX{xHN@EA5Dhw~z!)7*npR4Kn^0msrH^Chi`#f%)Dqd|_bwGG2w}O?ZN8>vPL1&& zug!L^-fK(#Pqh#wxkRK6U6kacT;?STU4;lJ$9f95yYlRQQHkOa7j3BspaUUH(IR`p zy>*5jU!qr;bm1*U@{W^JDraCZy)#H)yXd5@p|Oo{=GkF5e{KLyRm&t?_uwD5>bry1 zr-U3b+s)SOALbVbnMw}uxGAD9q;FsklkN{5XV!Md;P=96@oKF!)s5`uOt+kJqLWbMFOUFzPC!|QTMI91elPPh$P_TsLvx2!Pj7bn5(jgQSNa`gzg-si<#Hcp=NSkHz0lPSS<8)PJ3Ei-j zQB1AatAW`;K9N3ceviR*feM02pfH7YMOqYN9t!;;-=Kvgx|E*hi|xGktw^+x5(VKJ zJTu-L#0>kZrw8Ge=3eG#{`#$vEVxWS>|mXo%h9ggi8m&spqbK4r%;mghOwL}k{{Gj z!XOAZGqhTISdm4ij9r`qHgPo&(HT;t;@RRj|24;F3Q&e0N}KP|w}eOXsZ2earC7nJ zWdgv6Wmr6KYT0c;iG@fNrHX`^F|BwPeJ50{GGSs1C-Ib?FH2XI&J!qv)d)+{dXroTi% z=t?HxO;K`F%b@Odrm(awbvdmLitQFsWeuf}=Evj8yEaSZ-OK@p5cK)vgd-LcXWep1 z9LtYb-qbCSnr31;9T7vwbRMF1{Mi97K?r;MHP&)bcChqaJQlxtHEiSAjD0gts|o>S z3K*B~RsIYfdNQD_cpUC^`etePlq%j_h!j@u>T4wC(i0|A`E5*BGQTmpbP++9>=uo@ zbr@N%f+&Tg+IR{5cIiaPjKPw%#PEM)}k-KEgzgVfM;&Qh&^{v4!a!f2IEYb~M{$Jk0< zfsrNSZcrHv(Y4amBAv4CxeNkdevvtv0c*F;a^aqJ|Bk7jc?+8^X10A(Xy>TdEjSql z(u3`>1eG_43P0B=nRhpu+0ig;**=&z)0`X^LFBwMG;$rjklFO9hxK^Fy#QS9a=Xbr z12GOWDMQOJZd`kmN7aH;qxQMim{_|gxl}bTbkkA1U-*_+RH%NEqP|q_v@q{`mfKCe zXin1S0bau7V$UIkt^HaTj)&NJVV*nJYf$S_(E1~?*$CCKw2|w?vih27HVspjv{};#Fj90%KFlU7ubMHqEH!akeeSB2s? zyR)6xxar8rZ{#q}UM+MBfP-ez<9cO<%$my8oB05-K!@Rr5OB?~*e}Xu4M^mIUyaW1 zPR{sv%nRV2tJ54}Fcm9W@L&MjK}VwAbK_T(lM?>peEc4h$!PnT#2q0~czVD^YWG!d zP}(x@akac-AYF6jm8hF6Zc(b*O5qy!+trNNtqRD7$3*q-a2XML!6KeTWeF<_s=Q0e=uh z6a{va8W;bn*%xestqRsf(rDdy+iff-0J=U9Q` zMXhcbMi#o-d@U?DkCI@1?Bfcea5B;TkcLGR*DRvhZU&qtV2E}NGCR^5TLE^R<{n1! z^A05v@s_RjYxGlDw74>Q_-D5HK*>RObd*9Tzr4WQ&j})@lVzhK z9#;eb7?Cw8Gt%it=XX-=dsp5h0z^%HT!%o$ks@@FNZS?ZW{~3$`K)-|BDyh~O`MMam|6aZF;CWmFLA1ML_2Xewxg9W* zYe$?}eePZsO@4a_%xx4nWhAV(H>!#lW@Ca*=4Sk+so*QnbbKAk^ zetzR(acPuYQ7uabzIuXXC-x1%Z={vKl3K{Un$9*#V|XLJuNJydDoG0xAK2A`WKi&O zQd(&F*XiTY;rKvH8{0<#?0iH4jfbZ_@D+i&@>0|_tkodMG+?t$#0E-q@>PV+8c} zg^#A5_(QEVI%$+KM(Q3^J5MnipI_>===Lz=$A8t-={Tt*VfMQlZ+UiR!&6LXQ5^JC zAT&{5-1XbeC6frlw;q#W9@wv-p=1=~+?gyv=m$b0r&zEfkD;cgWMQ+xqir@SOM|tx zXIO+_2@gYWl&}Uf4()(ZGa%z*qqgf&reyH;(5Rs14YvLL8K;Aj zYw8&^QYAzEwNNWu=av92MEaAo%K3^4B-KLkkzKiC2ncH+b~NN#j{){&FqIgGUU87r zD(<2?WK%6>vzoKi-Nyl%$xT4Z1?^~e2@)Ce4D*@;nV}mFsBZZr98V7ZinLv@L%a|T zZVHZ{=>$@G+3IFC+nsSx2s^}3oi0o=H13bq>5*n+V|VCHt7?_&P(pkYeYzjsaNo@K z-+rE*k`zP_#i5?XO6ES1%~0}#K2wHp z+mTi%zG)F(ResWk``U5Yv&eK+(K^;t7Db=gy%Q0Is$;0(n zZGhG3p#51KwPUwxTm{?3piBFK5}({=i-6~OL1sRlIzntbG6eqv@`|~7#N$H+wc&bw{;N_ zm>cmBsjO56@cCY*}GqJQh~<3AoEGXTJjla9{C z#f8>|iPqM^l#YRen{5dkazjS2dNc+bqX$@_y={_3h7-*U3{=xa=W z|2Ox4tN-sr|M-ahfziK+?y7J1Av*oPivB^(DfE$48DQ(6Y-?-9E9#(cZ7id2`5`vL zht#wT+;sm=@=v(9e>ch48kw8834f$==4GU3V5FgEreR=Grf1~*_+@0JqG#Zwr~i}5 zUnu{L#~%_H|9?CfI9b>@8JRWzBEz3V|MvLb75x8W@SlYE+hq8I_J13DBg20{Z|CG- z^@nmA8PXYB8Cx6M031KyXZTn6Kl0I<*g9D219I9V_$Svxn14-^P@O$d$&(*hJsS3P8jwVFNI>>a8UB zKti8D{-fELK-k#X*vj@}irWAPq|5;XGz8+tR#vtIYPJqmMg&Uw)^=9L1Ooq*^iQR= zGXK|(|E&F2dHxvBzl+ZOF&x7`mG9p;{MLCTWBY%t{pW!Gl>PUc#m?5@56u26N&c15 zACmlAum78qPe$e+I`AK-pSAVBs=&t_l9AyQwl#G64ZDOeualFx5vK{qM+RU6b_1G^ zL=fyW%tp+tGzLtptTe193~VO)tPE@%%*KCAi+_FgFRJneJ{mF`Ce@G3zrMFqp6#(KGym?_Zw%FDJ^M=zYAUKfWU9{_!30 z|9y6U>gxaU-+vDHf0^-L+5KN9e?{ZJE&m@}|1Cs+CHOzM{))zbTmC<|{#%ItO7MSh z{S}S>w)}r^{kIVPmEgaE3+A6zrm@Y3Y3cG|L;eRdmXYxDN z;^w9kHMas7JJ5+*eHc5!#)iN7{mt-ZWBE%&M)9zGWtkXM2Dpxistfz@S!P)r^35Oq zv9bIhU+npDvQtH<8rpK(x_|92T?4ylnh(;|;C7jsB2@7)dimh!;)L{FCWjuna?MU+ zi@dBM?GZ$kLRcd|ClOBH6ZR)l;mY;lVYqO<;qk`cJG1QAFP|!HTskj*C)jrKUdy^T z+7>?FAl{a{p1Q16^0jS4J7U+#0-cM?Cw;hKkoffs1KAGPSxeeZ+nlq1V4z={B1jh! z&@TuI9xe!(HJga+kE)!xIK*1Zn_pH6O-VpK)qh4{<@XrN&h;}MyI|`%CD0I*%r6Lm z3t22Kp?TQb-0eHuH*WRG?OVYv#H6zgzSpvL)ccMS?!Mt2ant%%^?={E*#~%g0^D1+ zy0PDRc5DhK{Ph25GB&0UxNE-iDqst7}M-5T4W`o%_^0dAM~v zzsYlS=*l?!3zBjv^T?U}xV6?gN&UiVb#%O%L%5+}y0*ps$EfeoNw*A~Z*G)$9e|~j zh1VcPJTX$(N5c^qM?^j+;G8q{(0<;H?daF}J<| z1t)(+06gRFq6KOroT_nlh?S=u1CgYzn~7=TOA5v`e?_879C(YfgzE}8GGLgfLw{b| z7v!~~qr}Zff{gOmGrt5<(mPXF4V>i}uTo4q0ziy$ti~KKA_K-Y@X60}=|@1>XK^x z$o7I&_&-f+CZ_+%#9?J&`{%Zlc$k$kL!uEO)aWysuAZkqhoq^w?nx4e?bX(_*i6XC(rYj)Fnl1+ZW8?+2Yh9Nl@E|o{FJ_m>) zm-)p5|B)}TdqvNut5erU*Czsvh_bJJyd~VDOh)waMPdliKd!|?e|Q|$dI$3bV6?CE zrQB#kbiUDSKjxVgId4wO>P7}FB|=}*)C9*1rJQhdn#r+BrcNS34vA;w1hXC$jH#5) z_za#LMjwqT%Skd*t*N#&@Wjvy`Lpf89J=#pJ!ZJp#sU`GRWkEx#IPv53X%07gA%QvdXQlj959lT@xBaOb+%QeKR9!)B zswr?C9}+o(8cIW_wmo=4JQ}TceLsgwWrB$iN%3s1_>rCP!dW-MhI#;beI(-zipzO+ zBei2)@Sy^9Vy_7SXP}=Llz@YX;Hwy@tGIR*7=gxInXkeMw+EE*kPZtVF6;iqZ~Vdo z9)YE(>on0@(`AUYH{9f#^yxrW`b$}xRs@<%Q_-p7< zBys;yum4A$_N;9GqSwmG0`Vk-Q0)s;FZ@{kAWxBALSj}YIs4V`T~=g?Gj&Bo=zh}K zHs($Mx+WECMH~KW35t2tR*7DdU8hTe*30aaU9WP5GlhleKqXVb%OS7U3%zJuFVXd( z_eqr)E(GDyCW`Yc$jiX<42nAXNBQ0Ya_lnn3YnojM%GC9kOM!vER{U`7qZ07UuSi! zNMq_{s|pK0M#~_7iDo30kY^(0=&COH5XCQc(&t8t%PMFo@uy|)sN@(b|H@6p_jSp0 z&i6iZpWB7=Z)ze^wNFSWFN*>0zW~a&Nx7_~Rh@(l1?OW< zyDx}WYrA`#yt3_;?SDDyK#$+yKIweGY1hl#LED_O>BZKV_c_jK(He-a3K#T#RRC3Z zK%QZJ9@eSNdq+rn$r^@&P@@N9vKz112hCGn@(0NHDQ%SS$9*~(8tb!O7BM{3B)Pju zz40!EfR*#(h1(|a96iNB;W2`ZMvI~Hj?OEPEy9m$SiYnk$|)%pl0Q>M0l=D03I#nS z6=W^0)}NHmDC2XQU{dC|sxV|XX}p0x^Wcf~{@C@x{|LSWga5@?U{G0{eDHJG%${l} zGJ821sLW{4RD&XmFenTZh+0K352|t>G=GOA5H!8{E2u7}*`Poy>eGb+IZYF8jidf% zjl-rZTj`EJR>+Y2ecf0U>^EO%Z59X5hcpm|^36uJ{e=ePJ4!GZD=@d-kwi2_S>e53 zRAVH}UN$#xememNbXMtzvPPhEKqA}n9zS%u?Dlynoz#nysT+J}-KD?y|JsJo3mppp z>piydOe@vz4tj3zbw}xk1{0G@C@Vy!49@v#CL>=Pn~rcjG@s7uj!dXR-VpG#gN?X+ zL}C=tDBVf<{_zga#3M%kkGlRpa{gpx|Chy&wz5ap%$sJG07qrQnptF>mhNS`PVqlr0(bRvRm!8j>PnHPKhyg|3AY#&S2rg}YaQu(x>>4fSG=CQrOGdqQr^ip= zF0z4z*9SGK&maMhz6-ZO?+=Ula#V73dZkVS={M3@NJY)zOOrW^7Rp(D{Uhq4_43{{=`zZUE z)sKSHB5ir>KtK1dqV`!+t=)Mdpobfvy+~1ifeTWxA-9rKi!5;;FsfGtMcWc#N$!}@ zPN_Z#B5tZE(*uXwPYOrEmp%(zu=FRy%snF*PYRdwlSwQWlvSNL#!c$Q)Iu8M{LJT> zo&R>3-~<~ThoSv9 zh-Fm)Sy4fF!kTY1ldI1q15u?mC&Pw1NP(E2RqXf1Jf;sl429OxPxxF_gdjRWD4_D3 zB>Y3+b=HrR47j}RoF*0O;XRN<^6&gYrx&hb=mdvaf_37#1}jkn65F8E<%hDtjiW-& zk8{I+B|n6)C36~i2`PVGt}I6riV*l(L@){Ei><-}DK-(ah)y0#L3H|a*Y|A=z4Ctg zGpq-7ut@K<3Za-!i&pQ;O(YYFe9a^VONOKljva|obfB30IX;DEr4971Mq(vR3!>@j zKHN2D30u3R`Zk${2nppH$L@oDXFfUk)FkUHcG@izG*`*I94VPZmvO-tIn{cTi$rz5 zFEa*d(bhR8+M5w|@9(_rZ@PkJE8RdZ)H!#*b2*na5M%#GmH)4k`TlLa&&t8@*JX?J z$LoES9;*Er{i(YY3`Lxq*fzOT8KExZr7eFTm#KUmnZm+5da1$)~O zQG88kqXv=IsE*DAnW{W6XI|_U5U835k^?TwE$Z6ZxWN?v;S6TWige+WQ|-?h1->VN z0UH6yyfUqCowCl%-2j%;8760b55WZd`pNe7lKyD3`g=&*{uRgZ!?R~4l2(=PM7~v0 za&0Zp{e(F?@OqglrQ}n*6dJd%`3dE-%?e+JTDYg6KhBE9$CfxJ!p2syxe`{4-g}tD z82RD!{OE!)cS4p&ER#*KY8LYEsnNo4G}l6TH9PS`zj%!!1`d^m(z%P9yqPA#^rW~Q zT!D_nC!YSl_Rc&Ws`mf?$WpRojS^E?3uk65GuCV)O7<-xvtby9nX#{tELoFm72>u; z2o)vDl0w3S>x#oSY>zeDF z^Lk##|KQ+WX;qK6S)3&38;>RSgYwhY>|}T}vcz_38zLHU?*xNV-&i4HIT96Z)aE7H zXRja!yWCxZkJqxq3W~jW5_q6Xz0TD+zK0`LI`+lm{kJA|Jd;b;v%ccLp>aC>{XUUqBNTi^MN6JjWRmQ_ZLw}?(F60;7@-}{X zrtuYamuX?Ek_A!AQ5adXzQH-{wN@8w7m_L8p`RhOctPHsKIB}#4>Pn(xHHHjT-&nP zc9&bUgIelk6NJr#QmVyOze)k2hSI5ESeoSMT2**-ibO zSI+T}L^_>y#!O6jiQ!ctcK8kNeK2JB(Qs@(#42Jr)aiIn_q5=69QjVK6@a7TR@XL1S1fSrf-bw4_~OS zj`<+`%37ls(OsZ-W#H?+%F}~G0+IW42Tuh=U7zgzY!ievU0jbGJ3IWfpRV*gEWG@Q z`%49J&UCfcuRhe2JoF!Ecnw<;TfH7^7gX>X`T*}BEC-A6oK-AIQZJZ?B)+q6L8guj z+~FTP%1~UDIq$tI_r8)KtbV*t?KjILj}+*gNE{`B1trbdF7svewNnCmcKR?6SAXV0 z&FpHxGcLhGu_NNY8QNIgQ%R_^wixU6*r^rZy#MpE*V=u{tVWw`v4Of{m;2W12+Km} zY!Ot)Wt7{^w_}CS--TB=tIi)fz&CJ6vNlaMH2>YkdQ%$l(v3%-g>LuEJ+ju(oS|Fa z{DZNjFYj`q8gL^IM$H&U*o%C5q(54HGtsYZqwQJ%VW`zT_dwDyzMOR@c{J6lb{(BQlKP;uTRN}U(2cCn!kX4_&Mi*4vl41^}V=< z#ik8ybHxoE_e%Zjn`QRl>{GACimRP(GQ69OU+Nytzq$XxybjZaK2+`z_2H|@fg_wM zANb8T&_x&6BzH(M=6Xq?n9shQp;Tn4vy6tv=LQ8viJNXf+$YI~H}6z`4SgxGa1~=I z7A7;r+Ps&1SsIB`^{?Kt(R{6dcGaq?-*TL7FkJyT}{j$pct=W_j(4WTg z#LFk+Uqjh+menI<9kh6*PAKbkbn1cAa~-PLIFwKk5WJIl!m~f`=@EV}j%dZU%Zhid zYC9glM;#A1x2LRFHGaSH&@I(t*X&NNr7CUAE;f>vLe8pg69QQmmPNvkTqQW zn+b6@D&(ID!h;wv{$Yku^ZNw_ZWQm3@mG4l79-WH%g|L#e@@mB@XerpJ=kU$rY!XC z`nmdek(R6*zs(mkJEbzsGF^M|Y{acTM8)v8)p$K>bD7sJfST!KWHM27ayGM4#`6Ti zh11!#vm`~aIr*x>?GF(lU8SE^QG0#WeUx`V0y~SH#w)8%s6Yg=9~@y*)OWExaZ(D) zeM{dGcU=KrR2U^q*>~F}Nn}Xv3m39dknx^LEOsvnCptQEp*(wR!C5b-qufb*AB+A{ zD`Ykn1$S`>+2~GeeUz&3rpLJ4E)VD|yHsXGco7uokT-YW<>XDbSCH}zpNYQO5XO)Q zgSe^D2#3U}BfeM01omF9TAlOE^2zC)EP5JJP+nb}=xJ;OTU416%CdfCwd=ah>dD6$ znO^5wxJ)T>K-3fEWozrITE&5>sE1juGiKy_*IOFWGkOPi@(u=M^~&bOKJukMA2F$7 zu;U$L(-O=+YTckr1$HpfGxd5bc}yMaH4-ndyf7lUInsYAFU9!Fmay$6?7zRw`7c+s zIP*65SI|AR3rCln_jX=5X&NZhB|5R2$;KSTf#SSy_?|4T|Mr`uvafLmOy}kH5?ao5 z%yh&HDW&P$=x5;`vT>Ogsy#-BCJ{pXEJ!7Jee^;=Wp7MxdT1cfoKGElE^+9EUyTWKa6xnlW|6+~AM6>dWbn+vm%EH(%e8(jmPnwe14u4^n%P+V9eJhx(wO+V+K5e`Yy-BF6-r`byu$oJIIa zn_pZ)#?8dXcP$di3R}z%3Pz#q=eg=a15G)CulBEM;QGE$NGBbIdc2b0Jx5v~XV_eI zqP3jj1Eq>nA{!-QVD+c!W(H&)rd!6Zy^mI^{-y>F*on! zkNo|ikAKc{=u1KWWLaqFqAcV9qlthcv%oXDMz&MLs6q`LE-rdhVs;DFOCWw}2f>y9j)sF8>)z;uNHk}mO%#UI&a z6>+@t_8J|tZzz2}9rW~MQJv<@V+~ONvDg3p(_P;#zGb7=`&Ngam^3N~-!OVvVqQl_ z_w;Pj7|~GqWYd>JhR=BOVm`_p%6}pnBd8ml4xBJ(p8mDa?` zLFJkw(|P1o1IV%6{2mpI!`d}@d5#M;o^uTMWltu!ySP2i4j{vmk>I3U$C6W?zOS5= zm1-?AuvetJ=(H{^=;=0jRFEbSoY2I z1D!*Aj7#Mdr)_&Mo$_4!(7Lzp)3+sU4p4u1}4ec@~XlO=hHezOk0lm(o&KQ%h+b?quIkZQ20e4!B^w?`ibt zCnZ%AokHhl)wM{I2Y$rT(9DKhir+?2^aga=$mXhRlH}(sI)!ID_1gF9dWBdH`rlqhG#ydV}9(Jae z&|pEb%Z9eSQ?A}M6UySi7#lwBz+6keu!j+S5Uz9X{Xq-8-SE(M2s4heMpNAw!pniu z_T0nCtyR;>U%2x~#E~4ELawj;%1m~lrCK{W)tq@V!h$2-HtsYCD=LSVu)W+- zA$&-|L+C2=_@ghM467MR!xJ8V{KT}P9LOSkE~~L}r$*FHb`xzjF6QB|m4jS*(hbKI zArX2?rjb<;J3WBBTTnAU$w!hcChSHoo5@{CBlDdx5ht|WH289lm>f6xSSZ{r(=F-3 z{CU?Y-J?0|or=!D;t{HlGLK!T>~Tc)tIpI;pU#m9gaF%x3ojm9qg7!;X^id42Cy-@ zd_Ib>T&Q4UJLa~vUkQB=$5bR9!_>zx9Xt)2R+@%*3$5}WZz>vY_n4{PDa4r?d9+1g zsAHJflc(CRme+naDSWxHa;|@cJ5a4|cgT)o@mUgjJk*`u=N+^d&~iCpom$m8zIwhW zQSLvYdz$DDrB+8=0TMoYLt^pQMS{3t5=toEA4wBsM8pV+lWAab`a6J$oxbs`m2T%NKdg zT*A#$x}}QHMW+cdRYg@<)C%ftYVxARq`qKD?A_Q*He2ztW999rbQr7Fi|lG+>&r5i z{hx_FL-J|=k>hW|Q;6y&6|Qv)o)+xKSL9TjuCS=h;+A zz|*GTbOlzw?b2g03OR-xPZp}tsx~x-F!xpZ>_>o7E>5fdZZ3melW|hurygRHUwYq8))KZ6Olh$cq!X7pKOUg zk#fVO?3sm(nOecuSlN?4HzZyholsj8R1i}zQpmBnWm7*;dq3Vk%Ky55@z=xbVa(cx zEprWWZLzv$7F||yR+Xn0>iF$1=J1u>F)FrlHMBjhVi1t&9l4Yje>FbUFy64TL))&R zG~7MI{o0J1TCA`(U@{x}_EeDdpw=<_`V$)$=!p)5M+!&>XgTl0Zht8f;mu35_NAO; z^aXU~P$Adzr!A7d7dB2r+|KtrKj%JTKVl@#FRm~CS)9`VZ(x-+mhPRt?vhV>0xyO? za5Gf&K@2*-C9IRvAHzE;!rNwSjHLr*?w+RF23A#7ytln;ZFrhsgR-@-WtS^t3(w0(VGyWyL7f|V-*V{|X!?lIi4 z;q0+T)Iy5yaN*EXZ=%OlkC&7a9?D~j4gF2&v#-Zg$1x)nZ%1p74u2T3nHZQAAK@Ri zoLuU&d!yFzus`TYV*~bDi=R)eyb4B@ZC5CfC*o4(ZhkHs|2c*F#oW0eKt;yEt5$E-hE#P-0Pu(g<*SVwEwlAw}t z;&E@mfEa9x)TlwzK=YL+hEMvNKSpQ9_(TB-S!4G-j@AG?syr^-W87=p38J#15n^}5 zM=s?i8_nT-qie78rd%$$e9k<)JFGiNKC|i}K`xLKtgU>xPW+*n2H%>&=UrYAwh_64 zOM(`HBspz4BS(Fwxr(n9pDTTEM(?Y~10QnSiN8~-VkP54nVIk}_OV~DZw}JnIBFCn5_7t_w+_B`bGSI-+HIB9lhP+SW*Vl4Kkgg&Sib69L?swb z_6ZNFeZ((TG*t8k( z&>)9T*&~my>>JIvk+B?E9XZqE-+i~2(3>LiM5qJ#txCPBZrSxoZyY--JBnQ-;ZuC9 z5WDI?m-8QeeRCK{r-K zM{sREHiH)TOa@qA2)cklgP{v z@1ZjtHT?@k;{`yIoxoK5hhPuSKbl{u(W|Yc)&h1l)4^XbWoF>Kl-^c`XYbVbf7mx- z)8f@O^)ZM)$nBW*x6U3?|6p+A2i@^gQ<-)<+E_X`l-ca7NByVf-O66|&X&~5L3!a+ zA0*lLz3l4D`-gb}z^v2jtdR1_gJ01@hl`GnCZq`Wt`#n0r=mumrknJ>sFTn%5{|w7 z7ZkiMa96ioBYyMyu<&IvEbF(B(%_uc;Rk1hLMAg$ zE6A;1+8_`04qMHa#n;*|-4EyqS(?jyk655YkVXfEgIauhSJkK2ngYjIrXyn#{6!w9 z`-S*dC)SpK$^;)HE}vgm{pBEZ+a%22=?y42y8J$g#{UJqApz1G5}Wjf#J{6AB>so= zhJq{{WH+ENh@zYH7B3?&rvQ_|IASm| z3V25>QVEWLBGC%pO9%S@r5FFxUtSJ|R#Je2`gII+Q*4ew$e?kKiZVD*&aMcwgJ@9BUg>>eWALNJ08Nwz#aTf1}!Gi(9odrc>Bw32L zlmE{k0&VL%3h^@pLCgD@_@Sl!|B=#vZ@s@-Zq@EzW&FQN`=*wWGesbAO8lmtMa(;IxwZ|(?wIYFcV9aB))j&cXZ?BHMr zTm{Ef;2`IF0tDo=K|@n@bwHQoP6hL~{K28$W-4HV*1>=#o0l@6$)*tjPsCAd?Y0T% zw|4No00c)zJQ)x2h!k76Jb)pCyeWW1lR@ScOCW;;q$3gU3*bmpG!_fOkbtw7yAzu1 z4s=0GndnZU0~%}h@(G z3ef(3cofYUMRP`Nb4LB}(x=(z(`@v&+35eUK~bFnL)zhtXy!&VbE9qMMt>Uo@NxrT zZGs2U4=|#gTnn131^XyzoE zIcb|Y=}&_nUO0j`0W2Vi76dPvqZiH5Yn!9j4-Y?DCfG*7AozVxwxfYQ-#fT%9Tjq$ z{)tBS7^BH<6tK_JCK0{d+$j)wz!l`%L*lXnAfT3gH)uPi045dV9HrYz1!uXuOZS;=` z3<$I(pX0tpNor0LKs}Pyg*;vqd2)l}@d}p?R3j>6BMUpnqOen3*MwO})?q{2P9%;y zJj>y#%D12|vl}CAU_b*Wz zVJ7=^r0KXU!QL&A +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +static int +lcore_hello(__attribute__((unused)) void *arg) +{ + unsigned lcore_id; + lcore_id = rte_lcore_id(); + printf("hello from core %u\n", lcore_id); + return 0; +} + +int +MAIN(int argc, char **argv) +{ + int ret; + unsigned lcore_id; + + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_panic("Cannot init EAL\n"); + + /* call lcore_hello() on every slave lcore */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(lcore_hello, NULL, lcore_id); + } + + /* call it on master lcore too */ + lcore_hello(NULL); + + rte_eal_mp_wait_lcore(); + return 0; +} diff --git a/examples/helloworld/main.h b/examples/helloworld/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/helloworld/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/ipv4_frag/490761_IPv4_Frag_Sample_App_Guide_Rev1.0.pdf b/examples/ipv4_frag/490761_IPv4_Frag_Sample_App_Guide_Rev1.0.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ae0a011352d75fe54dc523bac5d190bee9ace257 GIT binary patch literal 67178 zcmeFZWmIKJ(k_gn?(RDx>3Qe_N#GxPULF0O7Q+yG`da|cT|D-td?0J8!K3k$QFx05-uqKS>Fo4tU5 ztDB3tu{|Q3)kdR+y&^sf!Y}%#R_TzjpE2yD!Ug!V-=}Nvl2M53IYW|VzO=)mz5HCB z#2l4?7SB@XjX(RYaLq@nfZr{u(M=J(dkBj*m;t zohQ;SQ&>1?5gz8*Wz>JrJ5Pw)xUlje!rHIzktuy)*CB%luV%&kY7Xn+r?rk57nMV$ zvYx-(#2%(RHxcxc2LDTV#TR(==%CQDia;kO@R90m};?cmNGul4bZjSsTx z3{2a)^?vb76?hD0<*w=1x0TOp9!t&&r41^&qDMOf2NP}q&fJQzn(hiqn_4)W!3E!S zmKa=D)i;-PN(p~l8IAk36@6>W;fNVKuGT(fe%|*NaA+6!SYy(%vLve0hi0%;#uW-; zSNXgzIfzBIE?;PHjvu_%@L?g3zTJGX^O6FziW3FLS8mz(yP-wmq9>aB>b!?oQ!PhtVm-$yjdmikgCem zKaC&c<<;9KY0TC32+wHH1KY42x-$$*Z8#X^34QkA9)V>I$^^zT&tx#!jKscc5r|+;=L*L!3CqY#{n8&BhRd5M2YhX-*munfgi3dDHYILmGMI9IH)7O3e1=&m-5U~CH1^dvh zx5}rMUxHN}HG0W_f%Fv$2Ai*jWIV##dRb(&5c75g@sP0oRde^c%-J`|CHot}k94iH zfB=7307%@JFA5@@xr5n1UFrSkH>YF;vi$}3E$+KDL`<8enlT&=an6Y+lxZ`l0${5X48=(7x>$Bjl1wm zaEK7VYP*z2M03&8ArW&1JNvn9P!E!kqSP!eH@QC^eGwV<#OCK``V~w&K}1zc|J7A$ zE)TKk>KA%^^AVr$jQfT<^2t2)wuci()WWf1ASj6Kw4B(VBE|lHgA^y{e*=n@l~og< zR9iRWImtXhuMC)bn3SW9=VaxCh=!sSl2Qd&!=kc;E0O36*FBS_LRLJ3Mjcgr^Y8!x zg#yWxLnHiC{y6>ue=OYpQY9PrJBMulNtFyry$&G&gx6Gp7Fh&R40d%X{T7X6jS4Uy z5dzj)4ff3Zt?2WM12=vxAOETQhR>;yp$cNzGzCpcRdUr!wLi+Y)cixec+lmULB{Hb zd(q_?%k4`VthcGf`Rxd>j)UqXb_gF2Gpy}xvUJcjb6{R!jr=ObPgBUl%}6-_f5nxAwQlmRrnmQf?^ z8mAZBT-`_p@6bFF|8MYU)$EsI6pM)~#4uYYK0Wv5D@~AU4 zHB6}91tl>EHz|=!>bj@!6HX902~-8_G+Mu1=|$E(V~Z|1Ff+$sU$sp;QN)xcv18$+ zlX*2kEqZI2ri~sijHuJIzGZG<$@Yoc;`P@5aXPD}gO?zaHSaBAFi>Vz-u&YluBBy7 zGNVeXoPBa1sK-&}FsDJE8D@M)vj(N$=h*S+9kbivI}gFQ6C=s=40$E0_?@2oq^wU= z(XSs_6Zu^%JBf$gSyyA`vz2fKkp}W**jeOLQut_9m%)KsuR^~_m@}tJKgZt*c*Z0b zCk;*a_y{Z^Z9ljZ2wr}|2vV5Gr zSG2KvQGe1L-^O_HL@|2P=b>o+=1hFVo&Nlpi335{9})*7^D&b2Pt^jsd z5^_3os4%4_hi5Mp6_pHA!xaSntk8c~7 zJVKApE0ZF#DQPj4U2*)blivjz&{;&JF;E0uq%e|5q9eNrBf`uqq4_r=h1Kb@VYz=$ zzB!e9?Q8PFc&g~@Mi2uyl_hDyxu4)b$(LQ?wS0@Gg=VH;+W|Tqg(3>ZB z%52QKKBH5Mv{=PBglOIpG+9u(ymOsgK}pRi3D0>@vZqzjTThEf^49LfgAUIa?CpghL3nWTn{acGsZ z6F>Riey~)hth(0ORM;A{?Jy34S9KyUXr7+uLq%y7je|pjqvrZ1u)ilZOjS|b1LMmT z+?snL&cW@d;#6z}=*2{ntK>UCOINwsHK|NXW6w|`kqpO|t7;p-U~{{I^+2_%=Pi!m z!KB=$DNvz>aHtddT(nOj%_e75mo@)T5MpS`kTBwMou#bv>D({l^QM5eV%| zgWsR}Irm>MdzOFmbFTj%KS$~(dH3^ed=#sE1XNiaR`qB&-38DRgw#qDRAg>{`Q5Xh zuV1L~AqhXhOPLzu3~2e6gflX>5q5ws7+PPtDXbx9gHhQ{U{G+R;TvjD98!KqkHW#E zS$96CrJtmj=eLB{XfUoZhs8BC>Gnpw3AzYx*1O8`R1uU|&*dAME3eZ)7n-SmBr>awA%kdv#qB>d zzn=9Vm{*6#Qwvq>KK)=aoTs;)iB&yhA$)|6eZCQMqF`Dq#(`V3JJ3Y2%Up+mwpK8} zN3T?BTb-e9O2pn?2X492r*@5&NMzCu!Lp+;RjI*nI~Z5y81h+>$o!&8Nal+e?Cz6e z02l(uGX*>Gp9;$TQZ;)cu4^;BXbt1r(% zH_-6l7hfaQbtE$h70mU{YvlNqRF3;HPTAk237gjf4Or5kC`kEA5S=g~=qboZUSIWv zsv7Q3&Ul=16D<7MUOyN^7$!@%ARJSS-Gz*`!P&ckwgQRM6&r#boESq_%<64Xz)!dD6=J z61iaUM%i_d?ws_*al*l9ysjxyI?0uqBhljL$^QgFAFdI zsXhIbyagNAAL}LS?|BQ(|F~Wz^hSfRAcPxII!;r`ui&Ee-Z4aap(BUrVlmGdTVSoV zY}$+xX{2ytz!4+bckfb17=eO@fP-41>Y%tKd@M^CL%}g_VqKk|s>>@w=S~kG2qeIk zDPTqjlQ*t1^F3I$8)E-Zyn5t+CxxQJlcoRC>7lsb2ehC@;n&F&&7OVmic)phZ%W#C zh&P(JvW$Fs($xEMQ=8ST=&e?2!qCvaV_**AA^VaUI|*0aRy)|28Lb6LCshC&1I4f+ z-WU@a!24vwD}CcoCP=zsBjK2i+m@S{hMVOh;cxwdX$126c8T|=DE^i3l8cM|4;WcE z*?#v+0N_6%IiS@WPr!llnkrDrjGnG<-cd{gjYTQKV1?;SBE~HSS{w<16!dn`b)G@E z%DWC9qLifG{;dAb0rSuW zM~OzHAS1jd-zZj&1quf?oChIH*(#7bNHm|j1cD!IU(ERTe~Y;Ap5AH+@Npd$Ux)@+ zL_iiRw^1{K1%lK*drvlbX{6EQdt=)yMNk_JA^;ms)~E(X9PLxpEQl*3dyn2o zcF$=d)Ti})+zfex0`8|`P-Qp?8>L<7ZFVOv19l^RvAJRlN~v>$Fp%`~{jLUh|@;a~zO_WK@TI1-PV z17KMlVg|6Z0Iw=16E+@E+BV^I)zQ#v%&vBz~vbp zPvR0euR)cubx5c-wn%J*DdUy-IArF`b3p={9D!s~ug4P^b66L~N5YoNQ!dnQ3(YOo z%7GF9vWJ9*#6XDgq<%(PJg<9awPzep>ZTMGsB>3m12$iIzzs&@u2dP=7?Ps5C@fL4 zCaevTid0GPy;9MtI57S=6+Wd_NLeI8C|D{62Q1|~X?~jele1=n2$zyBN*RTCBw{{( z&sqpwq|hh`li?y!V^7ovkUEW1XZ}wp`YE7hTrBrS2HkS}!vdH^0;d)S%xKRm>%9)! zZ3XTM#9(y8Ey!0G^bN>AQi4BYggeQ6LxK_I`c|jfhoXRK7YhhgO=pgPybfzHBq;{> zCB20Y4aqjb6?V%}l1xB<0>QGQf+de!7MDQzslE+ER@l7@BQmSx0zzs{!yTAFVJeoI zG#IH=M`kKEK3lQ7t#jFG55kwa2FuPFsXCI=Gw$F)*XFDAWR52!f#D89_zmSVH2hDi zaq!j;+CH_cd3E>srY+a))4_)v(;0mrCSNZ{mx5cdN!)3XjDjCipa7zlv!J^yDe?r%O3DkebW-j;BJF(7ivyNp!?h2XM)YyYmJYNKBDUWA51%T5O0JHJ9*7o zFE8Rd{yt8MDH^?;vb0)) z8|}o;+#=IROuO1$0E52>-?Z*Uyy&>osZ_*x6-LHqEAp`EvF*9Qzkdf@e{ozx1jctd zuik-v+^}G%f$P_}*!P=;IqyJE%MV2)W{z4mcQs;aKEtn{f4YTZ3I91<>Z*tUZBXKd z>m`pfCbWLsnLZW5?2Gr>g6F{d0`*4zYp8zb<5iiYfR|ojz{ray|B93H3b&I7)Oof^ zyDQCUj>@;~4RMk)1Bls;)UuUaprbFd+{-x*Lf<5?xb`6oanmK+E=v1j zGGWDw;B3I@>B)fKLpOIU;rfqmh&L=Fki4~#Ki>@emE0fe|14u{zo!e>{^NGwL-Lq? zzYv0$ZU)l$g+EAtnOcIXfE2WZ!;zpUbR8*fRg6U6DrH1*xv5CLs!>ScumnqNK9^eD z=!24th_7{swEi(5YaVp0ogcS&^OJ2wU(x(lj^zMq9~&CXmkY7RZA9g|V)D;!Bn^*Z zv$+-dv|rU{g4>QN?(%tfOtYZc8$jf5HCCpzqh|VRXM+;m9ZV2h(B<|4^+)O<#?8e% zRJI(ZU427OAAFvw?>lohHUy_LI4@sayZrT;refoC>v5pvuwogHkKjKUE2g|Y4l%eM zxWju|v^X7jK<|NgGEK=sdL`L7YqKOdk0 z0j$7(e_%GQJL0l4oY8$ldu`D}S{O-q87E#Zb8E#W-PYch3l15Kwzh^xB1`^N;+<>r zT<7ipX%sbeU#flfbyRVNjJIy;TiGdK>e06fY;!7k@*QNbIYo5lDLMW7?w6TUAMoV; zARvtV_FMdvF8yXs?)QrV$=Waf~svu%6*_Gl6K=C`HL=_a%W8jz4-GREB8)lKXT zN_6YTOo11?hCh&THOs~MI{okhU`ddIdX6y=MBoz)-%?>Ne$YaA5k7o^C4q>-4$tUS z%zSEG5g6{A2jXuKBBALJm(b=986Z?p7=-8fZkX%mwJhimU>9PL#Kq)j6 zDAEM8=nYwBiUmlq04!lO<6Yrcs%TPLq5BVltf*RYBBaRMkm$=K)YoU=IFdrG`Fm?X zWtfsMDhlH)n#*gNGJft|^uTfq-XKcZKz0y*?la<07FfP}?M7~x;L&YR7KaOH1B3}j z9}o^H60lRqbkgr*1-YOUFf#4q7_~=V-6lfV38|^#SPxmxK1m)5AA>Iv>E9L941DB$ z*kU8?2%2?$LFfcYW7(?WA|ZqBd_^;a!Uxr*726WkPCuMw6A*%M)h6&HhF~!5gE=4e z$0hck+kK}2?GK%Wi3X?rKJ_+O;N!l;Z{$h)7=g9jOZf&aNd4%9&RHy+yNVxZ9_uqk zm5E*gsDJ6uoz9T7gY{4Q;Ca$-Gk3E+wmJ-d)7k|Nec9W6aB~MD6Q=m+qfH~!*03c_ zCO8{%F=P3zEMW75quvQUX0l%a_3LV7bumcqeg=dsCh)kc$E$ zh(E6bAw-O&Yv4)lR)KBU7NOkt=w0EW;bIkQIo}{z`sPevs6*8faLKxY>ddEQk;?Y9 zV015G5&cbOQLN2TP$d0jW|6iKZ;3hzINMgI^%IT_Kx~!WXSyF22)e;Lf|;ElY}CVr zVC9V=5eU#s*p_TJW4H{y>uPBxusxH+`a2<$4xBNE*S{ll0czGI4tY! z-6Pa7qaJlYf3;1Jf)j|P5Kp1}DZop1N@pY}c|AG&%=dW;;X#m^hZJ`mp@_A>Xrj$If>;lX)qNXqCdSzg9ZJtTdE7@szFt z?R;F>JRt#PIL0}L4HaV#CF_jCp476z5lnFY#BsOufKk{}8PBtMj&RIjq8ZJl9 zIk|%~R5vObk|w-UMQFb$56Q`>xBD?V zxD$b%c2;uLr}S$|T1G-G^X6lVy=1GfgwJav`~|mS!~4%>J8lvQ1D924<{gH9M&7Do>=$xPFZ;?UxK77UVJqM1a|aHv|;tlml( ztpu5Th_T>%fHCgK-xT{cMzG5dm24s`ghx)=B0!=8yYpR)u2z0#v8@xXuf=4~mSAk# ztpymq;DRz?As@sr+<+w`pd2NT;UF~gzU?vnxOSI?6f-BFd`g%u`&bG`m+0*@I2huII6OBBc4+FRE`ZWP19k=d9@38OopJ@zg>X2R%}_ z3Wu8HaoU=Kr>QG$7yQ^eFFIfmb{EKaqeIW#qMdmv`G zc6@s-LkON?aOA)cMgKbPbzD}EK;VPui$I5(8nhD4F4P$Nb-POiWOMxnZnLJ zxP|i4b;oz@wsI zN!bDhS({eRdfCrqMmyrtGlt<_QA?kTB~^EWt8G2fHe^&mD}^iPSwomsJ1r+V#uCeN(;dZvV~C?W>O%yys`aULvf zq-vl_s0rg4y(gxD$LK~$c!%BujQbd@tV3*aU2Pq}c1IboiJX~T2Z1&)m?1fTvJStb2zn=o1XcFMya#v)Uo$NLCvd8}5j zbmPozUMtsjcNn{sWxl{jk_t#vRGy{eWo$IAOmIsVa~xwjm?#7#md>Aa#aHXM3cz5D zOD9)R`a1XGkHp+!NRV4`WBU(1YEYB>vfLh2Y}kbE7SbeXszc+YdDa;hdTeZmm2C+c z0%V)liIs0m`xsR-@5@M@*`>FH78{D3lW_-NBxOi# zv62gK?N63*eT4AN2nG(SqNk87*tSl8mKnqrJ5JYSmQMVB`SU?lmjna_FQ8^39BNLF zBhBR;m)sduo!H76ni*c?g<#^Y5#-H}II-NQCpTujse%287iTenY~e8WC*u|{ezRW% zMYhXTCPOtCn=;w-V+YeI`HMtV?Uc6f}P){*~8|? zC5eE1k6^_oc8NyrWN>GQ*v|pj=SR{g!MGZ(1!9a^xn&GDhYYhuPb*@{Do26o#hee; zGZ$Lq$<8Z5r`PpAtlHW6#lu`mry3E5CPzk~1smnLsx2H3`zpB?CjIOB-`brt{R=%A8qVL zoiXBqaHQR29_5IUFCdmvz*oggqLl32qMV>%sczj^y2SiRvlT0Opr`^bXsRD0kI8^z z81<#I9HCz!QWV2u-6eu}5*k18s-i~9sJ1o`v?lJ?#%d~&`L^cK!)u}$*r)a$+hN+q z6zJCb*Y74~Im7JJaEHbWAnhh8jpNHGU(J-v1G<8&j7JLOKr5kUAX3@r3-o>|tMjDQ6%-&JPqV?`|iLu%px9FeTP>cy5`*vrO$Vzh}rz+Er zM9OGkx)b(Ihn%aP(&u00s214D*9p>!2sfbBHi_6Pz#SZlRB2k2^i_D(QAVNaBxC_g zY8R`O&M^8bEA68udt!>f0$gum#{|y(U3s9aV?vti@T}~6F1K(aROmk9u@LzLqF#6w z56*9o=LBRQ0~@|f$fPrUWF`Wv94i6zH6q$Ls+4LczFbh^D3_)-V#$m`rG3F@WluB> z^UfFVX2Xw?`4VvxHIvRpYii!RMtM+Hn4Oc&rZ(EfULYjVyMwGZ>Z*Zk-x~>M_flSq zeZN2*Ba~z-#f@C;-&HN7QXwSN%V_B=U0jxbzZVH009 z8yk|gt6dG7LtbEV{E(+W8`=@Yp~?*O9!sKW1|zW zWLHjnS$_rwqv}y4)eSw2PR`qolnkJ`l^2-QZUBdF2$vjMWkp9c>?NYRvs;W*d1V%N z`aas;p7#E$}+eN`;rv!?5W8BHFXeO_Mk}P5WiaBlZ@djVv;WF`3oZ^Gd2d( zcfIYZ;3e3td^pDHSnBR&88MQ_oS9iAF%%`5qwBw zOtKtMh#PoMTT64q)JCxN-F2rF^e1_lm~3bu5bOEt8urX&m29Hb6OT%F1jb}O6#N@H zB%1a$V9s)_TVl=l8v{W-jP=#8vil3elwUfEw#)o4R$o?l@NXsc5Jqdaj0?>_r%XSn z&>OPDW5@O6l@O#HH$hsgYk~W~qjwz(o5+k<~Vp$3#OS;nFOV8(>Z_ZU@*`-*mzd`3hifgH=uW$&g zYw2okJ(v;|C0%G!>C!$``o(=bUfU!u5_{Qt_8z;6_34vZPVzd*33+J~hp8T$)r32< zhHnWikzE*)KzX-^sLth%JY2I156w;nPZP~4^CFIi4R&Zh!(ot8os-U3WX{k9{sYJL zwP7ANIQq|>$anr>JbFSk7<#JndIL0(;Ogsw;bjVV23*#>YY1mft`#sn5b)0v1eCd| zka@hTD!ZR0u0|L1C`EX>PZit0pWaLQ>7?X(V41G3s!hpZm}vkle3MT0t!`;&2)VJ) zuCRI9-ET@oamZubBDlM0gc>k76JK~stXD5)qfi^3<`Y@-FU)FmQV@Uz5|qvTax`jH zahVK)9%*434ny@d#SAv_>NDRh6sBSdU581~&?rgc8=04HxaV|=*haDj!5l603&D#o zUAMV~C)5SxU|*Ft6q@PNpM`xk32e2shHGTD&PlY-3)0ufmIPG^;}mT6W}Sv!qQe={ zL8U@ckCw%^Kg^}7{2Y1S)Vp4kMf1mO6!|7INe(%gi7rFwxSQAwO%QLLi@tn*uidlYjZLe)@T;Q13Om}TRlZq1WqDZBA!mi(kqmS63_L@ zlD{8D%m~Eq0n|e0nfcJtkgwje%B0^Ad$=OT9o;jQ9N9S-vyw+6Y?lP3EsF|D& z0dJl~=0I$8Gx_I=@ViHbRWEE%nW97+tFVR-nO7G)E=>swzF#cjH*If!?`(Lp(L8N| zwtbVFTuXgcFmEBe!*16}Y8i~bgS)Dc={m_+x7NRCDQ-;WOF=A=Z#{LqwrKNR96}Tn z9DUS-q4=mNIn~z?DeB9*WqEAAw>U>+L>l}7HDLC%upxXv`=aDSWecA1DGGErsl;WW z@39AS90H#GSynQb&vQgK^X|mVJ_x9Vn^7`N0K!qK4mpI@0gd0M>2h9$4@E{E&zHl) z+|8`6ril_;i1i+$OK{}VZq1TfY32UxC(-e@M|SMn;0LiO5_8`I+SzXy_81IRg0$PB zXrfYdu8u929So|-i8a3tf7T(w^Lf*U#$5H+jsPD@mqF7lLg!Z(udI}+rmNS$jA}Tc zI&V;y^w>>arsbWWF%;u0;f_?ZRlt=hyzWc@g|_pjyr3P4NKyXQN2_I1yDp7J!9@E( zsyJd+^M$)f&eUT_!5!uMasKxPzYB=H#QwwjS=}7A5_^eLKArT)gX+0Rfog#=)}LV^ z{2Wy2dl)L7NY0}=$KU;VqWdD z2if6Lmj$qeg~PAA+sK?Zm|sh|$5KX*7__lhGsNpq5ZtlAlZof6b|CQv-F9f#APJH`mE&(2lU} zb%v$U@xPq#_QVuvp-bXhwePc|OPp2LYW0xwO6ihCe&Z|CM(h; z@w=GQ@MTUqD^iTt2iaMfKWeSD&3(_f&6KCy+D4n5zq4@sj4bmZIjCW+dY2wtdxK!_ zLun1f{hTy0%#Y#^wu;{HHzSZd`YkDunrBQBJ3U{ukKrgaFN&kfFvGG{G#Ou2zFDe( z)Z(%?yA5?yuQxZxYKuW*j@6fYOtn9XG>`WO1>9;c8M2zl(#N%;KQ>Hao8Cot94C!C z^pvDCBAFF?BNY{JfPYqy_!+X8DWGY>H~B-YGreL>M{fLG+tvaN@}`_tK+~pXSRd1-_+p6$s-H?p`_!z zFu_Pj^%sqPuXaEAwRmqd`R>HAqEYt9z3tJan_pdH(on*aEK}K(4}3r>>OEipn!{uO zF2=JlUhW$!&7{)e%#z!(xI)T@{QF;pLo!4Cc~Ta{0-fF6?WE#DiYFSm%*_#onW&v2 z$eqo6VZV%!YeprK$JmHN&je7W#;d@JOKA zr~@eGxoaCsd_GxM2&_5hkwt{BWOMpjZ!HdK3Je;fOMgQ)xo54Ecb}0aE}CGxovCEU zwng?S`}Rq`G@Fzg;!u1)Dp9>WEGde6=SYH_`DarVamj7M%{>+lp(p>bXF@fS!Fnb)kQhK`rVy~G*e&upV$#^wOjRZBDRlx^wD8 zm{*Uiwe?II5?S-q8nyn!vQWoKqxin16VlaFFQU-^;i*U`^aQ8PUKkCxdb46i7iIN~N^81pR`G*E1$$JaPDCFFbU$q6S+ zo>d~WjbRiLaaEPQ_AlbOzJGCWF^1DmQa79LEaE7h0%m{hxc*#p2HBBWyAT^WuG1uc zQc|8)%-bYDX!ZD(<%-ob-po5_QdY~Xf}>}|!mArGoLP1)LT`D1xZ#)hsGsvyzs8PK z$I88J{TQ8z+djj#>H%?S-4RMW`=d*7{X*6Jj3Pe#XU6s>8NVR2u`wb9b2(Dt7Y!n8 z+1#wlWlaTvt_H|jb~N#dRQ7;F->ADr`mNIyTF-89Si5AasZEV$n5b6m{nBA}W)Bw! z=v#V4!8yH7DJa^_aTV2?@s+dUfHLgO_L~50liM{T=CTRBr7NO)E(uZhAZ^b5C6lk~ zxzH$5rs9-&CB>7D*S%TGj4h*Nudp}B0-^TmE0=O=AK~ociz*E#CzKuv;zVL^Xn*SI z6Sx*U5Oxw{cN243A-@eKlf(myDUVJ(SK5dv@e}xy60c>CLT5b+XFZ;5%cm5u>HUU( zcH>JEoH2+%f$M7Wfb_k&-soT6@@y_q-@_xGO<8|{toK@MJVIzp=i^I#pa<^YnvMqC zUH9im+@C+`LF`o<;DUnNESv8&f(^_W+R(s(88|%Iwx4h3Jx%H2@no!hx$8(JCL;nx zFQ`Ngy#u>N{pL~f<*2_d+xAl+#QA6r2ccV>zcU_L4ir&u;3*C#iQxM zu8g&YGfXxWs)(01!=dIy?KxfMWnFjs<-!6vVE*E|gkwioqYugGSHLC=dEI$*}>p^{dCd*^ht33j~^8k zvJhNWJ6T-x#K5r8SF@R)j_KUWr81(Z!sEgNc>V6aZi$;bdcCVdG|J<7OaX1+V~^-~Rw?j4Z4?04^R5 z0Lec;WCCy`d@g1dJSt)m{~YZ7jGxTP&CQ92nc36RlgX2f$$<5l)f#kP7jZGZg-T2AK-rGt3ki_2UU!6O+ zGW{+Ulc}RUvzM_GGYb=d`Ts*KFaO`k?d|{99cHHgB6o6kvHP8IGgD@BJ9B$;2RGOE z5m^2gz0<$R`oGwDZ+ItW`v>v6GI-=2&8#iF#oj9)`N@96lM%qq2mq=9xOv#vcsRJ< zk9h!q-fElMLmxTo@%fGmLzhBzH)y>$!)cjxfv#_!Qft)5Bj21u+ zPDXZfP76j83wCoxb5o$X85@8D$i>C+yL4uzJQj{F_QvmeHFk2cvoUh| z{>`K0;%Me>YVPufZN8V3lE_(`xEQ;5lW;QuezzAAzW0vYtljL)f3u!{+K;fE+dt0! zVNQ0|zZngWow0)@KbaS!nYo3ryPX@EfGCgNAM(4B7(1AesG7UESvy#|>e;(FnH#&f zI_k-Lll+TB&)UJw+|JO>(bCb-%G}t@+=a>2!_tS3`8N}_v;OD!zn}fV03Y+ebN3I* ze>3j)TmEtF-D%#vmHA)p`cLkE6X^fZ=XWswAC3L{uzxe~Hv)en@HYZ~Bk(r@eiZ`u4{v+-8-7&s8X2WH~x=Q zQSn8sDPq^{ADs4TL4~bV$MkJHrPY6_`I~m1;~EL8sOTjj*U_si6ymeZtP&y={KEQtZ; z0Kz(15JW82D*|0otTuIfI~tHH7bXzi%>46cW_?918c>~(0k)sg)2JGZWUTp;ZfBwe zh7+o3B(r02E3ImGanuu_@WF|D{4b$C5`-l}W#VN0&(P%2_anL&>q-r+RZJygAkP5j2;nL?qXBEO&qhOQ zw%JL-BK=4%+#ew{nhTbRHn1XyFJqGaM0&@d!G;YYc%ri~MzJ-CsC1K)qy$VYu%Wf& z+g&}jgm6fLnRf=L!>i8`aegu3%RJ?GR;HJJ87uv|JD6kRn*3L=A%1Xfq}bdmrP&@1 z4AFczh3Johy9Y_(u|vJpCxLqMTDm7zO;1=Y6DR`!^cE9{%NM#l&ygBrnh zgHu!zD~~>yKvt%)l|#UDCQ_$<7)Q&UhmVnr7Hnw+TC0`X zK_&?mzQ5KV6k&*@FA+XCg3wZ39^pZrK}lNBBcH%V2MafIs%=Y`AhH$H>hsZR9`(Qr zF;Pqb{w>4k5Hs|EvbKPIC&AE9{I)n?kZ*61Xe7Moy+WRb1=9;emH&&GHM;XXj~WiC zwe)Oo&H^L{*&<(xX#r;I=PH;^#$F~| zjK)-p4J4cmI`w`8pP57v4nbymq^+89T`1+k7%!d5BVRlFba)hS(cvCgGoJ8I1cF83 z{Uu0Qq{|RXy2hV8gY$iNeJKb+h*Qc2FBkJw*8QhZFn>>5za3Fcm#d zq;%Ic;~hn%@FY$k@&cM9Q@XemMdBQ|uAD&pJT+a2Hd>ft^k>CgI|pTq)#Oa}@a5AF z9reMdz@UY&6RMplcxTYE*0lla1`HMfm4rSpCpeR9)?iwWQnf@nE}unMidV!j_|$>R zGkTjA!zuZ*8+F$@ouF2c`zYy^$+X*pK-|x^@1TI#L!? zOiN`*@>(6|m*l?|3${>A^*Zz~cv6NvFw%EkTh}+QKU}@;Q#>Oz)nhWBocmJ$@;X#v zUcJ*j{UKTw50lOLGv~2Dbb;~Kv0b3KrN^&>@Ee;!>sdMHjqZ{I5S-8ijknwHqnfSj z)^$FiTwQx{)~6?`XvbNB4t~34D~cv#@%#qYv?QfXXC(dEToLy}_yHQWm`j41DTsJHP!RA?a$AxI6nbrQ7skn_6V$a)pkpOz)yn z^F8S3y_&)NxWL@`v7cGOM{eR50UmMEC+@GJ0%=xO+E^>!r|2L8Lk$U$H;UYSpZIT{ zJUHU2SX5&$&?LYz^)ci-{K$5{ek^tksu3eYZjbR-MsGrEL~d3S=p5I7IKdlO_^>i- zQ93bAQJfs)?o8zMx%m_Keh~IvZ*AL1?UfzMr@K%mo}_zL^htDO$D{g~-K`k&;R+x7 zAHV{?<-1cP&~P4QkiGlqf?mf5!otJjcx))RuaXME8Y@&+-w)A#lKU^Di@NyuAMMyq zy?uE6+Ci85Yzt!|@8;X+ZKZfNL+62KVax86h8@WGF6zS|16_`4! z*xdpCyn9bNW^wEpglt5_Sw%A_66fi^?0wuk_imP}Oyj183?;;B?Uh{8WoV4N%F|_& zthRkyQO;;%zR%ZkKl78^gkF*Ne)yqWJS!Pf7WAx({Bct=h3P}k1cxtJiRpV3zVf(7x7tW5B?OO*7xi?&QOuR0Xwjo9i(GpZKAf%vo=6JsEnn~2QR3XVn`v4$nB2+h`0D1>=}&5RhCyu=&9p7V+TJQtu7s6s891v7hb362_PY4cfuHsR5*Al z0b&OsJ?+7vlFnU%ZYc`ioCExxr>$iMY`gD`L+@L5p1INx62pbj;0QCK=mPVT>PB~GI!&1l_SakRa~oM^?Z5d+=I|pLTJ3-2 zgD_41ZWu%F<`O&eGTu*jwwyx+9j+?oLix-^MKLJY8c`L1J3l!K4Smga`a_)FPrv^K zZQYx3zfeu4^(ca_JoOol|*c+-5NUi6Q!Jq60 z|Lylp{?0l7opb&>=lplh`R|8-c$O_#1)05%}vlXO{o|Lu3Dp zJJ`8cS^hogT&**fv?}GXenanKnJ*fe5H(QMm8AALzL}zNv!;bKZeK{uKOYye6reVJy0-&$D&PV|+2AB6oM& zh5<@vWE$Sqgm3H1{ ziupdKq&cxSq$D1#us20H8;fpqPK^7BbrJM>GS_#?;C?8@7E<;)3$Bfnzn7*wr$(CF>Twn?Vh&n-g$5A ziF;z>?8e>i6Hy=PUsds|ij1g=dNQBPgo2VMVGTe}mL{Oc!Zx=~{t>B8c*x(WR}dyi zxWnsbkKPWYLEBUhk(5#v6jb#qTtag`2l0*ppK?TIQ^BZp(Jv?AX+&HCNR}nC8H8U> z9TVxm2UnDWxkq!ij6D#%7EOy(uO`UDKny-*n#7hq4xaiKYsL9 z>Q9i7yJ(wnQVqmEe)PLJWT%uA8h*63!t#)%qtc;cMJy6y&Z%BNFFOo#+z~lSFM*D| zaU60cUoP=O3U0G(ksnV-hu$v3Y!J+p<3fG3kayFgf86M)IH_cn!O;@v#hE2>HZmF% zex&q>03kmEup6b+E$l^r{7C{HNxbBbS4XaQMUCofad|_u>~9_*SKvq0^eoRv*C%pK zDFDwAJq;|as(&HF2PR=TL_`AF}isq3d0=)us0h& zqFh)Am*g)Jh3htt1Bb*ENdPt;>z0iH?_j(*2FE4&(J>f%n@TVxO)(nhIqNZ-oJ{Tg z7VH8kVw0g)r#m6&3n}~Jrh_dz=O;7vj}N_Ywx3DMLP}r`Ka-RxzzniAK+YX*GE-@g zH_ybQh{AQ(nTvBSARL>!V;)IBh~R5K4Edy1m(NmTxZ}qLajgDokszaDJ5k~ZW&R%< zdKscAG02@klq6|@9PH}F$RB6lR1X8E{eh>$!U8IjZc=H(C9`4MRf)KO2jPj4L~0hz zP26&+@Lrq$jno;M@xt^Zj#woHT;K10I9a_fbaNHP#6;4dos=yn4D|^n1p>!F20V#c zii2b^B~tdp94umNE|5m~EUgy^n~hg-322Oj8sXkOD?r+02ChQ}b=9#-M``F@d-^+8 zvcja)ySSv35uNnJcJ)_c4FV%Nq#&}Q96zgRS%YQMimkMz>CG`Lzg#>{B$r4j50Chn zVPPKK!7>VlsFqQo>fYATq!va@Bu3QWO!ofxnm2+mTNYC>zr9kpB(rZ$lExZaR61K5 zJ^eG22EMz*I@h~iR6mWw`%y=|8#d(r=0W=3JZwoQn_SdiG%v+4{(Q#t%X^3~7#ID% zJ+EW{+LSrdfy|((V!y4W2=^21_-=UjrhK(F!%C$wh8F6ZQ+jqQ?2E`RzPob{X^6 zK}p2-&5|?2RO~Lys)i-BuPa!EOmG*^D!OZ234W*XqbZ2N!D>NtS&B44i-(=1+KF4% z2=ZCl*)DB#uQLub#)XTLEHs_Da#06T4hD8vaop5vSLxhV0yiGyvQVuL3N=pNCm?Gv zURXjcAgB|R1p?1E5P$~ohP12l;$=EdT@ZOvGfBW3H>JNsRY(zHDC>(gHJNm}B%#JASd&gI6Nf-c1QYx?lWq;N z2&mrj`*5+U*b457lg4^Nbu=(%WE*tjd1lLsSjDkA>$XvCdOJS&YQFMSY8HyAEHAhl z;aqnXXhRa!!_DU$hr}|r`Yd%|nuAdID&GX*!!lXa=2oZHu4NVuX{7cukCSgS+^LGC zst!h0dYjT_Dm$~Y1D&T#6T@XD93_YXc9$#9U&C{u zju_;2Or#^2PtR-j+OS^C*YGA(r2Cj>(2}`^^KPjK%{Om38}%LO&$9t@2HjEECMjZNxAnUO(rN# zpaBA>X4@N@$l3AO%pNoA)NN}i&?A?ghMZqI>-Rpu1O$*-68nq-R)9O08tVyO-eTU_ zinQdBSwPjoJ1~vpsGHu;gX~VF9<7OUPY=u86a3+^K`ih=R!;YUDeJ34X(M-cteKLy z##2FU#5e2b-WGvft{;sESJ4Eg@$At-u3-Y+Zl9uJ^KMWyffW_nmFIA(1n$Y}EG_J@ zWroa`SYgN0`BaoRaWM{r_#M5|8|^8Ui`|-RnI$@vb&5+ifAUQ*phKb4)5G!9qeWHm z#)DwaSH)I6(jcHn4MOj4h>rt_>BP?R{q59U%fZi!I_9R z`kX<7?|0fQx@ur&47D`q$EO*lu_X5ys!_Xx#-v5tEr^W-Xan5%_*eVrW%j>9I!rj4 zyygi@mG(G%r!G01W#vW6CljKM&Tz66)bNxRD)w5p#xp(8&zqowtG5{xtHjH-drI_( zrwjcF4w)sVLGL5s-&gM8B_GGrFOhk+FV7;XYw2MkfWY0i)ZoxGMoCcKQjEXLsy#23 z0e@qMWaVas-hr#Vz{}|SwegQDz&tvzFH*5DLISNiedbv2{vfT~ig(9}2@S+vff~Ho zZZgb)X!nbrFl#BVyj@)xfGYFYiNgzv-RmoPbwI6LuD}Q(qWN5Ofvd`7uz}8ovd7ic zxeTFQ1SRg03$u)ZowS$0hMU0;Xx_%hu2sIgk$VQ_k3hH7^(Cc^l~ReB?Rl@L@Yi1e zz1q|82DP39$C%leP1x39(D|$oN;$k|4eKlpyZbf~)X!i7W8_^q5Zea%6p-wRRhE6q z5ahMg7eE|!e4pEt%pA%VjlU2joBTGp;F5VD23lt~{% z;t=A;;S64XeATm*JbTg4e$2^q6D~eNHVr;?RRGCG3}HsX;|lbNog9X+K4CowRMQ$7 zU0L}qTc%yX&akGkH-_rug*8GL0mjnxwbDeqKQ?T51JTsmrXYmhsMpMaNf_wU4-jP= zYyYlc$l`CecWArnu-27lXHrU$lTd#Crq2u0V>erV$>(#`hvD4UnNIYXNZGXlkOT85 z14iB87@DB4)|pSm&i~!hXNoNd&Sn=(+rlGH*t*>6msLmVI-THvazk}PIs3)I5&av;d;yQAfUD0DF!0iRCOCBJ6 zs{zbaJB6P***zQy*(Pg`SkcvlqPPR^CZv_Iu0V9tmPj3LD;svct z14jW-a2Nw!X4!`7yqu^**`G7fqxWh#iq{7IQUA!nAV4E{aF_0;Ie6G2cA6{u1_Q+C z%}1#%(*LV8=g}wys|Q|vVW?%qE$skQTGyPbflEFA>fvwR&g5FEGx)fQkNHVJu zZi&pCn;rlFEh8H%9wU-DtdV7jv0mogKxDaTz5#uStQq@oT?ZS!#~pc#l$X}qNpm3n z&)+8(t5)(Lb5|Ov=xy8B1QI#njpv(g$ZfQPkMlZ|&DD{E3UfOCRrdKxaGN*{^5RG654VH1U*7KOSJDQGu z02$9s3G8M?KtD;mPNn>vHo-)x<1Rq(@qPy9{yal<#WJN&PT za^62|Q+)CEFcEMQAD|P_)pz6@j2*4kq*OK|?aIrOOg5|IkbzMgU#oHkp-)5Wc&piW zj7QG&7PwFCCzMV1C*6CV8&GUh2pCWD zluNe{j4$)sKs{ydr%S+8_Sl_^njJ88?*x{X&S0B+(|-g;ZXBN6x2mj)Cgix)!>S!^ zq1d||!ES%mR)C_ZfwCyG-(jxvu-roX?qo||A$VNL)f-{IO`vb+;#VAjY-xt3!ZuPb ziFRKPl;^uSP=WTDt=Mk_?O~tDhCe0=BmYMFOr3Z@xdj5e1)%fD_n>u)q0z+9d%#LB zBhIDXPz2C>Ve@~m=-v>O&WoA++IcU#k3}&#E7a)a9cp@S@3B1#J>GySX;d7z+>l;u zST=RW_crbOwWo5u)-!q?=NMB>O053sGLY-MLsPCC$lkdo*FR%_y(oaQG<>}?{_5#k zX_+c?&q?gFZelt%JEhHSOT}z0NaKJ8yQHL&zpX|nrEqT62^@$2CSRCni1GsnBnnX8 zN%fI_Fb-Q%eD^*o`uMq%Z4ljbMTI(@bp%$a$CjDKf7I6c0{llzRe$ zxJ)tEO)^h(`rcNsFk#ecxl+zq*6D(1Z%V(hyn=}EQ+6x-#m^Hv)W;(ADE66?va5{C zE!c0oF_BXx!SD6mUw&;Mq@kROaC3KLLF}tmM{Yt4UVKMPJc~_5y$6D#a`;%CHlbz$ zgxxOYq&B3o_hj`P^As1v922NFWy@2FsI^ZA@Ws-)hzt0e%a3H>fM)zEEeJ{Q4stDGBH zZgB*?klJ@3P3TuX3ZZgL(h`hjHsVf&;!ZKagEAw+O4miq}`St{LM zZNfAbufM61bia_G4ojZ>817-~sc3yn>9$``Q`yu)In8i>dapjNOLVffH_BT$;LtC1 z)@ykN`$ZxJ-V5drQnp6v8h6-*auz7J;kNvX!7-G*>7eXz;sScD;dElgGkhqU*>JsT z)dvQ+^Xgps{)h2in9zyg&DUGt1Co3=gp+DZHGClCt~T-b0iQ9J#K7a?Hu>~AR^sP* zXPQH>KFCSurt?g;k0|WF{MS0of38q1^ z>=6lYn~7myT;(IC`Ypb@&EddIn-(6&g5`>p=1S&SD^@v^H@k#V30%nLef>5_$dRl# zv$q03HvyOu?3uP0!G4qM+kQh4-P7FA%ZUK|NcJ^ulrVvJm}1R2j6eIAowNvE4!(SP z-0h49w!k4s`K@B`07&wTpQGHs$PJS zj;G}RpuaQxTOR#i0sgN5|5t$jE5QF1;QtEne-ZeLz+VLZBJdZ1zX<%_5cnUPz8L=B zy#)W7JN}gU{ks4^sXpbfA%gfP68@+b44;HAO&6fzj=$MTb=ifrIc;2JjRH+508s^* z-#_od$P&*`*D%+<=L*BGhdf@A{xT!8{W>H41pn@3 zZ8&^Qq$oY9t$}clr96m*QI@4^#Kg2%jx!mSI!UsL7R2XHVE$>3 zKmJI8`n7c$`;yHa_KCI0E>aTegIJ>!R+d1QDuqN*x!ypX^j5Kmb%oHroT~9gR4OCY zZV03XIuWC>Ph;{2HF|x+_LTSJ#f~aB=!uTy2dg{8GZa>o`JLc z81%b6DnE7C-%$b{{0TVr^%?}>jOvL2%aCP?e^G-0S*aW0bBIeSpqQ)p@C(Q<;{Ohy z-b5p&WS(%%gcDCZn~y=J=EEQ@KXeYMEub_~n+6h;g}E3bffMde@I<)HI-9vzlg5>f zRDkm1<9|%2M8^GcIAvtWOluU<0G;pe;0P`T$itWR!|?x+Y-YY@nNAdnkEZVtV3EWz zefv`hk%Z>XRBfO}kE=TXJpcpPtI0nu@H@Res0yMtq>_Uu6rTR|5nAB)ONo>-e?pa$ zesscOQF*D$RRET}CF=Ct4dnbX2+*1eJ}t?3#vXhdzBsfGwQyKL0!6*p(V?{k$OCRE z0;?=dxPm)7lu2QkcinU4Qj#1n4<8FXcr*jB3Zc?!0CZ?1`^j6^3spiWgib*a`gD>)9C2$Y9_+8XWGFSa$wIFDC#E$xAh2yjlbm z&Szm!NNFg`7h}bbgg59Vu**=LYNO``x_(7^Hzfb50UaG!tu(!-P7W+AAbWyULTzkk zv=V$J3Tc?W=)oKTA)wk_C7!^K68KxVMhrZ0Onf)I8kcszn_tgDun}^pkiEkfK(zu=$5X$^l#ss!l4o^bs-1y|m<5fR5(c2z3v zAhPgPw0Vv}8uHPO2`XVnaH%hWfP?NV`fMf#wcO=04wbgyIt3D3k7jvvw}nFz^Zi8( zmcCS%0kCP8OXg&p4e+*yvi{*r! zi$1Ri`#>;9-X*#7XQ~kZBv>WE@yGa`ijKnUofY^Ix(zH(+cK5I0mK8@WNxn&t~_H5 z8H{=7QEh|s7tE}!S_m?1X3|#I=<~90cTc$f0viL*J_H@md4D*;gqifL8nQolgrNye z%3;r1H$N_VPEWr5`z8A#Pqs`~B)_4r^& zTS=dp`awqe`@hZ%q4nia4q{(8Loi+->vmdr#1Eb%$GqZR6kERj6j>#FJPw5*tbuEI6-FGl)qH#q=7PGW*M|Ru5mUV|`7OTn$cGGsVtT z87q0}4{*>;!j@jTt-mf@TQF)1N48h0HfVe_cD0ptAT@SDX9v@QOv0(_d7(nH5$^Dx zfCf>hN;H)cY6>xqYvWQ6HMlgH9c95|(ux}`vMEaqI=~|!MmseS17iFXJ-$@Ae#@Em&EFj9`y{KmOqAL5&6pB*-GB0vTBh;tjor6_Dr zedZ!zi5A;5bx%%xSV19mrgm{{SyV9W#Jf)6GZ@Ojfrbu3cTEhYkvd${cMRlFw;j808+%S9W(2 zU{sf9L3SckTb87lNjDre7-y7Id`oQ{VKqTtBTT0`Q)`>PQPQqX2bcD?a$izu>b>#l zvtQ*-(8ch&tDz$%>I&??N86h6tIktQ$ycXEn{Hb=*O!ae^YPG4`%!g-+zIM+L|fN) zbFxMq0FFx`|9K$jdCrRV>u$#GOn2nbbU9(hIwtcm`?d9?VnXy&YkmcsId<3d>GtGM z4l9D&py0mURP;gFHT-vI8KNVtG!jk4W&JbVM<6aB1Hm8Dr4<{?$`^d034x0B^-5-w zui{LGu;L8wAbe;cok9DQgHdD|QeW-$ZQayXi#Hdy_w(KB!`N|uT@4Rs;-n=jkvePT zrD+gHThzy{TQ=S|9oyb6H zppWwW&#(^2n^eb|VqGL_!#4}P*>J?*ck{-*V^)DP@!aBW9L`M|{Y&mi^o@L4u+IB5 zc@bIBoBZBFq-*}!=OWrhcXH>#%(He_>)(~L){ukW@9`T!yD&%*5WUeZ7+nKe28EXv zkGu!0Z1>!=xF5z1vv?)DPjAO;1zj&-d5Qq|CfeHa)5?`?YRya76SC62Kia4z=0u@# zoyrUl;0p_3waJ*y)>FUcng_(AWVDg|%!qp3X;HO3iQMMPg;v|68p^uLQugU-L)AIe zj9}cK{i(GHMIZMUJ`7iE0OyrglzVvhag19Huife#AJU}sHvyw`;zF{j(qJt51vAk6 z!_dLN5l8?D*T5WG@29GT8Z_P&H&*cr-{btwEkqo5ou%-86t?q1PP$Nd_=>tkaefKK z(wn(B&a~-!$HDlFV25?>v5pP}ymzW}s~H_dl1N4I3IKumZ0>}x=_m3 z&j~oq79UUkPhASpC<_C9ADeqQTFnFPZzg3 z>#!qVCN~Oa2Hw^~MD;ZNNFLRMEj{VGo3mRUsH{6f=zulE)XuufpfszUxKG(HFUXeTtD4(8YRLR*&OAc|9`lLxgkN;NU7bwDy{~Q0To`;mF6D~tM7_)i zvhqO^CroI1d9}SRv6Y$8z16(sX+z=d`n)`+|7CxSZUe$a;nm&`6M<54y$xnP6^r=& zCD8**6x)0R0J%{9zX&>A-Bb&~c!NTTu9$C^; z&ib$|vLa}O7C`!Q(Ge@>rNqw#9!RU`3d+;l@)Nhv`PFZ(#pprS77t3Gd{v7ZbZ@&1 z#1g_UE|;o(_U)(5$iWr3Q_F^jrs2F#|1x0xa;q4fskJEF>51I&Ap<87cP1Re%cy=- z9bnoLpj$MGQSsJRiA0joL@pU;0CX(0{>u|gqk6c$fJkXE;dEg~N#QlPaO3_p>SDY1 z66Pe%yBo{&XbH3`dwze|q=~g&D(4-{I=oij5vo|cFuj)iLCY`v&+E=GEkt8cf50_p zRxZ}j{9l0|T{%fXI*0Z@p$D(OaJ9OgzA6p7ooXe%Iwn?1*>vq5U`9E<^G_lbj{83g zwiDAwc_a@iIn^O>#eUUj{E$dWPRCJomHOEYy8@I3>o=p zl59gRFEGIxuGxvN@e+3I#Xd2_-)6)ZV7h+WC(WMq+hyj+OeXIf+#=MHK( z`{{T=^Wkx=N&Y*4lW=yx0kI!|b z140_nE$|At90G)`k8${LWVmh zZ#!+IEb{UBjFQKd4|{WfRa*4AUVbgXJ{jhv(ev_n5kyPrEXrs@DX`S6?YaE(_*!Y5 zR}{YwE8`Vufb?Ec?raZu2f3)5Mhpl1&iToU^R?zr*E|Cz7TAB1zW%Lp?O!A4-@L=W zd53@V4*%vI{>?l5KS;m7extt#{6*j|0)G+si@;w5{y)5@?EjNCWF~q>#($T-iqti2 zH-}Mtc61r|bX$YPrh=IU?B~En90Hip*vCL=;KBMlQWKhsTP z?Tv@XHL&21Tf}We?6e8z4D47k+(0soePS6xy@~F&%6d!2J zz5?JRHmPZQUI1ea_jdzodi~O*xT?`oRo|yht1m_EU#FnRcse}lUTkhzC7v=!Hsnq< z+-_?6My2b&*@m`hX?tut9$PzXbuT(TYdb7Q^ji>sL3Fy=IqNtgBq)Dpqa@!}KNUE< z91qzq$dY!UNQix?#$8g<<%b4%ZG}zY__k^$Ra7(UBvmjL1;QpbMwfLq6);yQ3_}!G zfWFzH7SAQu-^A2Fdyklmu9PUiVmDSsnSu}D9c zUWU5~MTChV_W1$SmoJVg2P?4%nuno3x>m5PTn3yCaT6P&fJ;VDY!6?NFPnEiujbII ze6bD?43M5|H{;l`5c;ssCHa$xIFGSMpaYo?j z0GcUjVz6_38HA(#qQno-ETRDxo1#Smi*KP*$41)edSsBO_+S$fw;Di7hd~k((gx_| zlQ~>>$gmP?6@BWmYpOFBq@gO(^g#A=axN;E=ApS+tx{H~&M5Fn;f*)dWJ;JyNFWf9 zA_n{rnAD!qoDSfG2qmeb!oTffdI)04jS#rqVR`~?A0Q0MMi}Sb15W)n}o{u zv{DY?DJAW@{N-4@A(G9->Xm7$;^RV?qZb1^c14Ubf*qkd3*r*P`D$5uN$|_tPzh0y z!}|sh1%e_LjpFYPt)s2-uGwK$36~Ixy*Z&w63g0FN7^vseuhyU0;%RfFd|Ff2jhU- zxrGgIRVgbwc8wAjNP?l(NPmLBkgbLX^%rH$E8PR3?Q{m5=R;G%#$#8FM8xIJ{>^d-Z{LyV3)sGu_1)s6Dm$*p>fyPCAn;oPOS33Z=A~9 zbebm#Y0Nx=2akQC_rDpDU>N-s!=lt1c^YZvH}Io8`F@8<{YRH znUCWL3*;kAa}fevvSuo**mt(IU%Efx6Ap}U+BK%8D%X?^FFCd0A2{s#GsBExm zK+#c48v(4bpl$qsQOOxFdhR1nmi@FHcV%C> zDjua2%}+@uDloxtQ*3+2}`(*m@I-WmE_X9Nywb$%JTNLwp%~YoQNtm zbFv$ach{JFS+cV=QWUnc%5uo@%L#zyAE61zb> zxn+cLFRIAwC>wqBrsRE(Y-a0(5((MS=0i|0IRvp_w>oM->ALHhd>`Jc<*h=u{IumP znzz0pip|-B^y}x{h;41H7Qqc$joZu9E9swHNY1@cl_w%YR=}1KK}=p`&S8Xidt20& zVcrJq0^oWc1?c#?Fr_*GSh`Uy)G+eBMugjR3=OE~9fGb;&MT2NE4kY>a13d11(&H3 zXwMz`JB~`X&8{7{sGzQypR*Aw!k-=#Pm&q$k)B6TK~3xzp|>yXacD_%-LtEM$BlaD zZc1_yKp&K#t&*teK_AcG@V2s-<=hd#R2UHi5niZWUu_XG&xbe;D0Ftx0whSV&)#%@ z4El~gCsxnyrp#BY>0CqK-_Y!B0nacLPF1 zLCHqEXz3BMLi+fzqc81|0YrYmg(iu92De^4UB~RZb?`cC-j<+@q!<(C5P$CTnTU!f zwpEULBbNrQ8h26{NZUse7oYWBhdF9(HG2P~wy(1xpOR*{0wb@-9l{}V_G)P}{%w$8 zd>fQ9$V7F^{&khw3W5a2oLNA$=z=G@i(x~M%A-S`r1n4-8tEM6u&+HMcRu;paY;Au z#oXNUD^&k9;xUvL;D>cY$S#=9?@su1t;dnaOj;8sO%h9c3MD6TPN>N^q*Y>O+cxSG z0jn|k$f5#tjoh@%AGo=!$T>o6^xrM<=u795d?|fC@0Pqj+vODQUZF_nZoVJy z1jt>iKFn$Ny{DmgYNd7GMi94ues&xUM$#V-#}KUyquz8CrS;d47yk0sxx zppJQy>quCVpSV|cpeUQ_06O+F?(3rUQFJfg2F#I*T#MS6*|N;4S3e>-dEKJxT5aUj zUA!-jIz79hH(CTexp6-8fmR)RBRVNP2-UZQV~W}K41t}NHg z(5googYeuecQ~~y=zUD-%FMRh-F!)M=l8LO ze3Zg=y&n3)gujlxG%`V&VpHn4y-&-ep6u49E6>Jl1Z%TxU~ea7uu=xq^=&8mSatm=rbLc(cv=&^OKoQ^gt09g zwld(dvJ*x`HsCHA6}C{LZ@NVn3&B~ek~{msYVblt3a{&RuZ1lO2I>|1s=rFcwhSM& zW-U1to)lq@(r3aEo0qb1qjD^TYc}t{&c+NkWG3D&N;X4XHaMw#50F7fUL+rijfAc% zXt?JdR`RxUXqil)a>sSLKPic1DH=Sk%8BB-Nv0faSy5&WRzFpEKC&ofW>L&-!ZTbp z_JVvmHdv{usvAzXZcn`HgT68V9y^@=)u}pXEC0B5amOwOqTHFujQ?;th7>W@08-mu z(otR}FSp>ek@!F%AIM^&6I;R`KDfRJZ@P-jq~#P_5m|*U-aN7^WJIN7a5EHa!t<6u z4$VQC)%Jb+>-hsFg9Jk^5zvWyJ&wx=N#XgN#eQiBOz)%J?e)VFGaK%gY_fMMEp-xh zo#06F9LuM9UP^1)z4FuEwG=qGE$-K}5f+~~8@7NArViJL#%;$-wx4gC#)qgMAi&iH z!hbZ~aQydB@K<^GYr6Ssy7_Cm`D?oQKNN?5UH)a8{vz-ffxig+Mc^+2e-ZefD-TTn zR_7{WZsla`Kqq4LPg|$4p{gA*(rp)-(Fe$95ZzNtYb!Pfn+&sF-Nj%Z@7Z^TzrN zlAazzA%i4?BP7&XsHmJ}rm`(3Cpq0JGm@TfAv7?OVnw#!u=0h)zL+9~?vjPw_H$;#mdr>j8L`Y6;+#_tEiCuU85GQ_(b>Z~ioe|RoEM*#VirIx zkQ%F8Fn{WH)LbaNmp@HC?ZRKn9olW|kuZgu8-mqgTo_OLXU-JcvvDZ1>9k0xey6-F zZ^6>ybf2yk8;Xq_;AC<(yAH}wWaB!yTy=Kevw4_$S9edVv zTq)R~8>^YO_BV9G>GoA>XLs0~zE$ev!VK8^VdMz%)_4rq9<@E@k<3iVKYGQ5Jz<%*^W)UgBbL|{~N*UIL`O)67JK__i;fuX%HQ1dx*JfX) zG?US74+gQ#7Vvdg`Gz)+6{uwZ$CfAXvW5rh1+6g_OY>lQ#>>72^(f-CgF~`lX0iVA zX>1qLEy4|XlaAV{7r^}inH>!H3(IGixz0mPpb=lNp^FnQ1so(3MWzRo(}Dnn*i%`R z26+Sr20{}X4@D7K)BV?lTzS#Rc1_RHR~~GgYD*_BUD_SZI&bwNG2G>k7@eTn z1V>H3&Z@rKN4DVUe8$@+lkA<0vFelEP$t7I=Gw zD7zL;FFCeb5>v|S34daq<^JUQu?f3Wptv1SU@jQ$VK0i&(b7Q1`ulK!v#27RxHLj_ z@OHt*gWESpeQ0`zOXbPO);a${0+Rx3k(gz{uqU;;|HSM84jv?wzu6lT)?)Qh@}pM= zCQM*ZE*W<8ChaY+DzXal7E{VE?yP3r3avBMD+-S{>c}jb>+G2e&KqU(#FttFm)@*6 z4DR32<2EU_bd=*-y5y-IPb`U^$5xY=Uhm^~VWV9=`(@45$rLvCCPv3h9BUgxi=ofRb&p+Wb!o5dqD}?8?*o2`@;$ zM_^MWTQ~N;I~N92D(6g93SGZ>3(>PE9!$5&HEQIV*3*3n)@_y9JXye=AO`nK>?*Ke z5hhXDc&U#4N#vvjsGv{b0eRquc@=#f2z{_aD%zk;C}%T=%v1DCLVysAz&$b5Kq=Rl zMj7!atr)10=o5cynp38tSGMGVf)jwA2<@V)3%{gVpgbU5y=LV!EAB6CUKo1b!JfF_ zt^52MRcr0Wzsz&`X)wvEir`BAOx=fsY-aG9iq`X1aMtRzJ6c`ONK^w`5I0;hL`o`N z7k|&1A#?#vuu~UYoXMs%#)3uJ4;1w=f#k*)rjFCauC9TC+ijj72222+wRV=_EJ<}J zIOcB8ATln_vi7TMJ&#i%B$EzO(lTv!E$3EIi230iFr*LqMBev!JlISznQIw3-e+5D zlBYyZUvJwAA@oUv2zHP`wiZsRV@JX1s6g4oqPEE;eb2Lk3CJ0!RjT!So8MehYB5Ea z(B_4zki5CV!SYG`b*bAkRxd+do$RILZFnZJB-A@;pR^zw^CQ69$>AWN;jXT=znmya z1^yUGjgSArCIsP-6@s`k_`Ahp6T*aI6{jcYmEHi+-#r2=#_tEVt-r*Dj9!Xy`Md=L zZcXiX>uf(C5YZS8<}Sw)zF!Rqzj+-<2W6aSE!HE`9UU)`KA=**S>+E}kS8Q-40(48 z7c>F0@ELhM0G9*AQW}lIF!pgMb`5>*-ml@vb2OrWio;lNAxfRi@;r)AiX6O$0{NI> z9XLC1YBQT{-@|%no6Q|RoP3>?Swt=kn2@rN=ofPdxVyc z?sRYE7LIOa=N6&Y(+OrIW(qVlt=MyPHP>9dqPt#1t{6h;;Xe54swyj;1JPp zywsUGNLoQMD7VH}_J|aP`*FnB7#3sV>HOwJ_S#QJrw)p@yjK`6BjZFr{#Bw)Z*tD8Kn$3lmCvb? zG|4R!m_s2ct$U`2pZaV3{H)xs@L`3uCp(Q#sAT(-!tv>Ff|kzG9I=#pGzWqU(ii0V zO%6Mh_!=j}iWLkO;d!BE>Aa%A@_av zve8&$Z;DJ6pU=E3U*i^O{6D-R2Q%`QvBh5$V05!eu0r>K!Z~p|Wizz6*p2VxxB9yv zM7x=F%sJn?gi;rm$b^$7yoycIFid6>jXjphh7d%ahoH>tEb&lm9h{_ud z3^X5)NH|I^vt&f;;Uw7?&PSBCiL2X0lbYX--Itr(UKDTEL%u4#=0vJS*+oiR4uMz- zvR;%vx+AB(B%g6-Y9`(xp;=-0DK4m-uHwlY4XZ!??mtK?Y@pYhkwau2I+#gnoEVpVY974q@r9>Qhbbf;UJqGKC;>I2*upGtHNP8*1&^_ z(3S2V%{IGg;yuIAJ_>t0GH`d^Fjwy%DI#Si5-B*6 z@T=WyW^>XB+ogwHLwCXSc!8iV*(59IK2-CL!9UD`&-{@vO1JrRf3zQXT`BA)ZTS#J z1ilyW#q()HDNJo!ajvWJ_u2hs5(l=;{e00`YT`&(qG< znu^>up+q3bucsERS+^B53S6PiF(CPSRv-_|cqjnW)4AnMPo1{bc~(TKT6PquV;f>e|70+sD zR9-rMAnxQ;JL_f$vRb=LS58wy6{NT4og5KSV}@4RF(u#7*f4RfiUeLiznQp{pnljr z@QK2*YC!(bU|*k;b{>Sc?6A#uC48crCFYEBVhKbND#Q|+&A)=|QnAZt>0^PCk!k;e zt?AdBW#U!%Ri-uv>`7m7Bg@6Ti*)5hGSI_?@%Z?%j=}j`x}(UD1G~g(vdG#8(Rjw; z2dRd`+~>6%8Z&w|g5^1`NW06i=8~z*Al@DMZ`qo17;fU<#3bI$0?^>mYRF`Gvfp=jk~T*+aJ|5P6n+;Hr(^a;F%#XQtJ;; z*tiyhNX^>Uth0hYvQdn>g{_+lIs*@8uyu7&9M#qn(YvU`vMw-~*!Ye#X$@o5t=*ns zww++Cjw`5YQgxAK*@|RR>^o2#IHl&+I%3MUsusYO?uc~V;850Aa76TMy`J5D*SoLp;2SybZ-_>75{K#%r$6InIZ<6Gy7iS}%Z=FXHBT2fO zXa+9_h3pT>BBdh9qrZ%4;8VCTm(49p;eHE5h~FR~Jj~<-iuKj8O25nN7=lW-zTSy1 ztmOII6pv1STw7Z}a5c@p@l2zJMP{ss#4TqmQ)n4SsScsr?+4MR!gFwVXuGu{44L8u ze9b=&99t4Y5BNVAaLle#uk|qvNByaOyugz554Oo`4~ZEft-&<@oOVa@ZJ$eT*)%j0&iNvM5?X72fnNyokJtg){Nu8yKh@t&ri zrVFl0KV!Q>&FR}QgPR_Q(_@>gg}0dC{8M{`w|cy6f);$iyW8jd%&8D-y$UL~`r;=p zv9_EYA;$nsWtH|9-(hCo*Vk)kGd->%<#S2>raDPcW;)6jp0E_cL8{U3;fXr;^YO^& zIFVAuNx(k1A8@}(ul7%(ip48Byi3b=>zml=8utwVs-oSfmMW9nltP*nd_kF7IS9V) zOT%yLHtrCArvaZ&Ohnr)zM-@@^!*H+*e@;~La~&MKylv4xE}wIkR3O6R4OLSQ`vua2#vnyUZtN&A3%9idqX&q2EE{Li{6SMqvR>3*{b5ncB!~tzR_-^{B_g5}`nJMN1}UU(d9`%i8$bWd~dms<^>6 zA}52%M%a=R2`S5Ohafgf6q~P6NMkx8S%W`p((PPR4mnL1Zup2~4m^<*r$A)6*KK$|7;$F zoBWLX51vP+e_KcX*X98mJp-Mhvw_ne@{dkUT8i!;7LZQb*h$|=-%01J*B=ewnk z?`Caf-TeS`J^(*1;H`nU;~3fyR)Y znF*hTk(P;#k&Tg^8lRD#o`desM`mEBVPN5;XXm76{+Cop$ny_D%;jKY!l@u6^1t8k z&lNYJnUj+pCmo%ut1GQ56RoX-DIEg`$Dd;u>HhQf!9OSd*EvGQj)o5Ac24HDHu(RX zt8ZZI?8Hs@zuNomu%?!+Z;A*~r7BXSH-$6^Ap%Mjq;~`?Aqfx&1d`BulcEBGC`IW~ z6zNi=Sg0y0(!qitMY)q$tc%1|%_ zDh-yEhQLh0V5A%jDGNOUh9JS^}_rHwFAp zA^sQ)+qC~}-DA){RqyIf#BUoX1`WdEvCdc*G6_&WiJ9ymI=t1I9F7`@7UeXw>9m*X~7E;!BA!G56 ziv-jS)pZ1<1+)!4WCeikx8D*KwLM5oP|mJ+tbod&q6wIh0LwV(j>BMg3>J_3wbGr` zUuD^D%U{J-1e${0F}%N&u+P@E?yXtHl|bB<^WRDGYeCzR{H50a=D^d_ z``Q1$9eU2rzji+`e)RN^>IAg=meRG=mE8f&A`uEOusj?Fla`aWgGPUg!9VKSt&P9E{zqMZtD@Zm|4v!O0*@>8v$ zu&t9Xj6s?>@)?kUI~k92*&>vKZ>Jif?6JVo0}BFt*B_antwewXD!)y#y-ZdEmdJR5 zy|OYB7Fej+Ix<23v#=X^$gZ(?vq$ESAP#_~PjTF`o~F2otl&c)xL|Uy zguP#XebYZ@PG%sL!d^KlXO&;_tym^ZPb%Cg&-~bmZWe4p|JarG-pPQJfPk#)l18bp zJXNt|j}kO3kLrWG&FhC(y$h3dFKhJa&PSz*OFvae=t*YaHO_Ns*q^$d*Y_cPdUEXY z@;MgO^mOK~s>#Z31XZee5m#`nG<0hHI9nC&`zHD8lE*h+WZl`b^AppPvjNJTd?)8w zSh7EaBBEsPG~G&u+cuudZe74;2MT9~1qzyBXEp7jZgnmknzrC5Q9A=`pgMG{*J{HF z^^$zU13&Uw&djftT*ZJN#x$7vy2%dK*Ca@}m?EY{a?Jgu*98tljiA{FbH9_fYS0r>&RAsmRp!ah zGaeeM`&DQ4Mcs@q?aO+i;-bmjQ^CZUrpIN`>fo4iS!SL=AY0Q~s#&Fy&-B#--C+%i zs*ura$@LbVDcV%j|8uEHNOmM zI~;^6qkUXmYbKjZpDQE9Wy7W@=;lk@WB)u=y3BB|VbUwTRSq9kgLoS$-DQ2L?W*=xVG;}-qQFH1j!HhUo zr2c)+K?(zFh}Cb$>VLHJGaL^7^Es1Nl&PssJf9%r>jx&Tz{Z~MSz6n;bEWPiYs@`} z!lcM2=*d%|@u!y^vi<8d*L$$Y2DF}EkNNC-m_$tnCY&3L8f?yQX9 z#NeB-mIHR4&B(G|o=39{&w+C%QyA+mwwbe-3Gwv?oK6=QQ7O5{pEhqYf9Q@2lxw~~ zrRs{Tk}G(m+|P1vHNtq+s8Ht=LS5*V> zd`5fS1W@tGW0D$?A|qK+>6?h~^`izY2lv=NUWMOQ3!Xa@VZ85kk!$Cn`0MvEg=r`7 zOVwF#3=_`HgW=x;e3ox`468;-r3*4qc2S?3C~s>y#XDU7cB;6U?M17e%VTGZ77uHxXC)03<{BL}spRirr_>yr7KKd1N2_YH|%rdwi=Wzl~t2o_mcFr$@Q z&Y#bMIC|md3Ws^AzR)pHl44Gm(xqlxQCV1mRb9O8JJ#QeQ$3_mYRH>?@NB6u@w<|pOaiG1_i?)=}?Eb7Pm z&##~_1U3vFr`$`?ktSmHThrdPecZo}uY@bWcbm4ZsZJR$gzoi{tuE~+>O#6JSf#$VAhdpvm2fibJ%=)o?!s=Lb#tEd{d+$4PZ%|x)< zdsAP5Hzr40LXS5g`>K3T9S$JQF4GT`OU<)hV$v?|jM|^Y-!ry)`0lsW$8^c8N7*}T zDSbjZ*+{gO`AfuwC5k3JZVCT%rqcSPJ*6PEqRI~A(ra?!`@5*)?pripKe`0Z1U8l} zWOtW+lltUcRA9vod|Bf+n?RXv!S#0y{9nBM_#MvQ=!uDiV`ye+&WJaZw8t@3MWs5P zFTrQ<^lX+KqX^K^?3-Xr5xApc-;jM^?d-Df;m=bwxj}4Je7ux=Gr=vfA@WlD1|E`p ztc0(yKdvmeHG#WvHRHTy_>Igfb3^%!vnxx}Yp*Mez|OE&FuJ2dP_WZ{F*3+YLc{;c z`Dur&)3=PRpJNgkI^y*b#q!K;5*XLW`HH7RTHhYRRLXf(EO5TN&mVL#tx8z%f}<$# zs93ZvjHRnm5G{#`Ds7osRHTPcMLf~Z0s_B55+N4p>>KlafHP>MNk z1g+4dEC{si^FJu7<@EIlqPpZ$|2ym_L0u6|G8|<*XLHBzrRYjUDULL|XrEYHOh%i|eZ86t3{qQ7|5ec&GMqt=a00 zcs4T+U&1Z9xWOuIjmozajHTXjC%hGJl-@eD*QVwij{}E_U-0{C_7AsDt49r2gKYE+ z%Q{k+&s90gIWG#S7N|I~9D0((S>b#ojsV21wIoizlaIiU=HB)0YdvvOjM_@I)adQ- zx=p~BI3N2q7L#}KFA#3^0fahfvB9rlqUMv+JkCvlE?3LG5>I8ly4*8nuG;|Y<9&4K z+deSGsZbG(-s=(%n>K}$8tYfG~Z)iB2cx1M9UiU&ui%+>ll-gM|aoj!Ok^mBN_ z;h~uHTh}j~1KnZb?aZP_nx~!nx;gF8mbd<}d|!Gw&x4@n)Y{^+NTkXq1?SR4>Fkc zg}=WOIY7A6Gxj(m*fK^pq{80#GE!`x+_J=NVXhS2L8iBAjkmpWyHA+WAG(Djf!O8W zo`_pV@KMTT*U31PT$^Sl8_Y5XDbLd#Lo_`7MEpFJ&-eu2c|r%)_9EXzO$%?&AlrN2 z(JiZxr+yjG(H1tqFNMr(g*<}(w2)xz4BaQVUL z-rNIOk?Mp<33vTkf7Zbjxf$c*=)$9{n40(?q&uUPp?U49GRy5%_l;`r&;pfSarIl7 zv+bwA6-MrJAt!_>a!q?2c$Hq;9=IWp6`9jAQftHcVx{{KL#?!x@Jst}#!WNw>mHAn z2w`SU&q2jJll+TJm6*;et_)L4JRaH>T|u8^jr$I_zMQ`Hx~?6fIO!g@_r)g>2%qjchA-nQzv?r` zd9Oy{fSnF=ZO-xCX|Lxub&SkrOiC>nw8oM31VufG88#pS1%n) zfAemX^Zc8o9tnExI-mWHxYR&_vzhlAKDBh^6$j%-%qC2q38ri|Rl@9E8P98c_CLe^ z-hXptj44++z+d2!a_8+zXvST*NLBl*b|wqPhj|sG=w3>@OK=T7)$jZloi0P`ik4>% zHj@jV37rgNdaFGCRyMRkjfhxbrsp#rrAETcl1hY^z6ICa>6UJ^H+ofRNkL2_wa`i` zEiEfwxc5{_=*j};aM6e4VdhGV-^0Ww`r`Xk9>?AT#}rzdmwwjL8C#V{(>%>j^q;Dy zN&WKpp6y0YWZ%;K(#s{^Pi@KzhQUK6EwXVS@VXh##{)Ee=?ae?`R>nlP6pXsniyxk ztwv>b&VzJ;{3+#_S@CBz)~vEtbC*X{9C_nbN5YO{Pb5i%O>JhV6&a zR?$C=%U||nr=^s_`Ri!TTRy{3fr!-(c4m4lsE_9IvI^sVcREP( z9^6oMnOuHk8g53ATLD@Pry0*5_0;SV4_h{seWmg#{l-%;zTV||@f}v%8AjSekN9-M zdYc}#hNk+@o?Wg$8-3h7-gI1Ix<)flJZWa%o_E1@7VO-0W!8c?^@oX9C8=+}DA(25 zU%S^Z@+F625a?s+_&}L6D{1^#kooDbz&I3{W-~N2ZIUMt7y!gHG56mNfL(8d|IIwE z@XLDD^r}t~&oM^ziF$hHhap-EwU?^)QmgE32%QOx7wVaP|0w7#ddfsZC%%9U{c;wG zL9(_Kdc=B*%`P-Bzv|}u@Z}cYy_OpgJJKb&>A>i6&C+Y*Pt`FeyOXHUcKom^+s-r@ zWG<>Xt;tsx(p<{JCPr)bzJs@aPo^c*Jn&uL{6%qTKIKO2hM#2V2Faw|CdPysNjh{L z&dO7PwDARPP|0O#nMc^pFN6ebK3w-1mTxFM1;1GLXcQlu15TJ2d3^Z|~n1yq1zMk&i=`{EJ!hP?8 z@1N%`9F?G^+mr2HlgiOh_tw7IV{JzCFqzkRS_dms3L3du^HN{wz{$gLpZDE7V_K_T zf;}wAHHK~rQR~fK@lU*Xym}$lMZ&#*P1E$yySYO3>B<3RngP@I=0&y@vU!KgKVYM? zpjX;WW#aOhlw8Vtj(G|7g0$1v6b|7XJd@IMxs4h6lcdbQx#v7);eEN-^!P*m%i|)G z1u^p2Y;$M)GanX(Irj_Cnz4vWqWEr;GHbe17t>%jy-h&})9^={1(!ywc*U1KcH^&3TDf)>m#b&rPjN=U@^;WGZ zjK6g~CCNppvJ!geaJy*VsjQP>d7apfBa`7V8H#qY?0R&)C->JGUD4iLALdSeNQdAd zUex3t%H~ zEHJ4Q!a|d+M3|>(wWyC9YKr^FV=;OAj+M%14Gmc@sV`I3ax7Rex%;DRQhj?d-fx`N zz9b~|jU9yg=qSn0%!8Pc$$rU=&NO?U@%x0{*HJzmuAx&AePvBNrz`ncnM&ig^o~;R zP3C}zeE8r2+Yn2ON9D2+HeQ`Yt*_nRX};_Vz4F}*C?dXMiF~9^3L9&ytmCX{Y~zf_(eUQ zm8Qs!sK&!_w;3B)tJ>ciN$MY=E+{*xmgSPkQA=^WHa6oXkI$44LBGCZlP>$kkt*qH zE0e|U-)mkT6EbK*3%8yfT(kgUg8aOsgnl;=WOqGK{nrBlj*#2!17S?{NL}Fl_0twJ zujVdQt(9TiLMzEZ!MFl$X2yv9H};FUb%}%rHWNm*%f9sJR7{_#_h0wm)Q&e-Errlu z8_6vwu(eo!Wj=E+v*6&FXM?A$=v&PnkOyewecp6OJ6?-nJVvWHkdN2b=qX`Fddq#L zU7@hxUS482_c(siQ*5r8UE`^M-aYziLL1M1LpePQ$r4rKt$|go0^5tZFW%76bZVw0 zX$4cSiH~Qlf>4NRaL8TF>sUvA{zJ#TK35mn&BwmUpHk+G`7-fNuq&$^Cg0Unh$*iM zb?NLjm705=goPxS__BC(>T|7(f8Y$5ZyuWs&H8Fyhik^Zc8q-cB5a~VBs>Ad*(N*6 zSF0(+k4&!HzA9jDzNQMlzVNP5E~FJ%)#fc8Y7*CjyYm3 zYF@Bvq*|Hvx-Mjbp-e?coa0@Al)urXM$WR0cMY+!&bbAgYo`>;$JBDHeV?>wq+GYK zo*B7jc?eqzwD02Tnr>yR>o9bF;|$+AJYcvF~&+UXmRlL~D*@2s+mM zAjwBxwdcHFJCz)UP3n+TPv-O!7gf(^+C#70SMscti!9DO$Y#07XU1W)z>lZEGFt6r zaxd;$vTK4Q&%ZsaLy%AKu2UM2=J)kUD(rnGgQ_^7W?kGjJ!YFSvaqgqBrPn9ChF+H zQT>R7qYIi{?eaHQeN>bv{5m>fZ$t~$4Ik7F{`!iVFDT{0-D3earG&=Ak7;u|-U3sg zc^M+v_UXrZWyVmo&@`sES34q$2d_IEkD`FVTKkBzOg)(;KZ&y!XLeADmAP>2{DmI2XRqY_9Ke^2X!=&}eu z*-^SlvDYhzGM55;da|HFW$w`l5s>@=pZjh5HZHznq}1AIzvkJM@UDy09EBPV(mCY0ME63wj{l?*y%*hI9Au^zpZ?$0$~)INjZw{9A_VKVRp z4_S%y^1TozY}<6QiH6%oKR1$a(GsSe)XuqJ?S_X&Br{V)i&Qco3cXlUBa@m~n8~aQ zL<-lOY7k~ zq@M5aw7L#qRO^i|3QB#PP4fO;5cM5AV_59ZsI)&U*I<2=ERHGBhL0S7L*}M(k zEDf!XhqBO&q#npcmLgWtX*So3T%%U6ytus1JHd8P*M8qu?u$+%PWw87C`2eZ+o(A+ zD9OQsU~2P+TnH-p5~|hU^ZS{5skkC(p9HZ$Rb=`Z=})ls(=3OaGNhhT5q6*yKS7IN zk_meD9$CF_E{OL0o=c$<=l9l8e%ZeVB?wgy9u+jxVt@v<3+%)*GQ)uQcwadWa4RL?BBP1A#_;I zmG{QpiHBc5=~q(~g~mRb{Y0~h^rPhuPH!xyI}u68WTeK(x_2aGRghKd=;M=cfp9Ga zzMx#mz15>)_@Kb8eCpfdEj7}QO=JR(;cS|_#UD~Io zc_fpm1MUD?lp^yY+0O*YoRrUa-EqCcv*W`@`GbrXF1&hViBg0NC+%rR>O#gTayd!- zupsWncH2@*??TF6=IIEmEloewOu!6eMqx(4gLj?lWK+RNyX#yv9WTrEh$F3X!!Jhm zy0KS#*KwR>AcU?omM;vfviT`BFa%N`k4YETVkgsigq>HVLcuaaI#jFGy|lbEMA&+w zdYdSOu2)A{swnE$sq%_82tSEi)RxK7ijS*EAcv9D$gzvjx?)ZVd{0<0l-}YsSNyJQ zByE_72%$uVMIT-3moO52B?FOIP-O!Zh)#;sm_tvN*?i6!WqN{>th-uy^&*FfW2k9j zw`2jT;506}qM#z3Tt+S>CoGyxYI7G}s=SoSXf1Ybyrf+t8A7l6Dx=!a@~ZSzpQoZv z5u9p0nLbAB`5N7X{LOCe)7%5tvdps6WoBjen3E24da2_YI$4Pw`W+V>=C4bD+^im- zE<r@(Hwg!G0U;| zshPBiQr_An86D3o@mEJaDlKx$iOLzsWm?^}dN5dbKgK80C(Y;1nlMwyUNvF!Y~5^Y zw5Ew!mj%qC{PdRwuCo_2Ig9TZ+_7-dw?27H*EiK8VmT+~M$C2n82$1WYG=xdLS0f^ zZq7L?UE)^*8O;a1)AP3+Qayh5!Kv>TDDmfU59JW&!6h8^-9C~8+}lo+T6|^#>H?~K zIG^=-TdTy&{Kk*rrMYfl3oakdelQT@64Mr25@XTD>RKd?CwnA+bIc{QK<_|no%P|K z@ zaz4pxB!9Tg1MhmnwUczp6*<26c%UhH{>`}Jgzbm2cVl%&My7_XJ`T={ec&1~pIq)g z^H%9a{eXW<<74#AR&Rrfys8P8mnaYxbj>2Wubuu5=d{J5S3+HA-I4nA)sO3+{O|jr zhBzC1{SFs(TRpKJ7^dB*+gM&z`rNl*@a4NQ1^fHMhMb>dJ7mG~CGwGM9Gu-8i(>r> z)gr4ZkuH(ooMWa?S!e}>EYmJmD2vA=F5F(aQxtI|jMR8cxPm5H_d&Rg^OGfMn8 z*3 zZURgVW?-jnzfiVTwp8wkF&M3$@T+IO7jv)Zn1!?_Y3`#>-qJmu3?>NAeR)TG`w*hx z3A*dm^RVYpsex^{1eh|sUnG`_}H_25rBPq@qvp&HQN zV+_J)9y@7>X^d$cjy~Po*8ttTJ#r`fX6dnYoufL;bB||6W)BU{maIDzka7Bx{rp2p zv)IM5$7OxK*~@38&zYYsk1a8%CgeQM%OX7(|8{2mxM_>Uy8>d7<6ZOH?~If@gZ$5b z%J}f;+M%(Otdy0A>WH~spYF;&Twmhh7Ty<#jSA(8h83rlzROJXOd3pwV?V`Q;$>1C z>~iSo@6YKr?PQc^NX?7U;II99d0PIa^abgAcPo0cKD@8~v|3Kce!zzu_Rm@u`G9%m zX*Fb4Wi&|tI&*nuIXPxE=E{ptFM>^u=(~Fkc--?REN))ua?HE)qAg8QK?ji<=rL^b zqGsSr!9*UY>CD0Dn5h6)U{}T0N|Z{Q*XuxMYGwky+7_G4*BtS%&_8#t#%Jo#2dh^1 zXVbI(T>j3-EjK!P2?Ij`jZ>NvdefdV*3kn~%DMFE-XBemyv11D2Y%fY_I zeaCsqhRQ?MooG>=T@89axx2`<1+2l7)I};Hd>vOnP**nmu zIs=iGQI!5JdH}BR`><{xV+RbfMf@ukLg}yjK*}NIWP#JD05HTh-eimVPt?PI zLcJU0@E_3s5?8Jyv>jNEMq%w_F&L~gL>7&emW9bdq;2hNZKdU~c4&kGR33~#$^DQH z;QuoQ{11Lv7zCvt2SrHB+JfbzWg)im(kKiNqK?7H!r>4!$_@*LZ1exbE&u?bKfwcA zxR>p0;QJ^3XD(ZD|%_(uc(Xy6|W{G)-tgv*P2 zZaMh$^uJqYY{$m`5#*w-A_3CE;yr+shp8R#VJ{#FQa1*IxHMdVeIWs72e<_MD!|G4 zkstt--wu^mRt9MjT*yHFHa`@;_5RctWQ9_<1xU8uAsH(Jf54-2EFMF$KC>$*e!GAl z0^scIutY3?F(g?-WkI$?tOpi^MiB`vAT*8$pc?J)ST7KUKt`d_0KgIC;O=6NBDy={ zQSM|A;9&p&L015`#5mDO^0g$~r4!>P29&1Pb z^NxtKcOZkDaW3v8kSmr*b|AQu0B3PW44`;h6bbwDW}9_eKpR^;XwEuQUVr4+R5?R z{t|$|!5|HHA_2IN1)<%ETV3D{+`xe@AUI)NY*7Gw8;$_|EEr92_1+PV0KjXpK!X8@ zgaAYy3IZHm92$jpA&@~bzz2X11`&Zh0Z71W6f6ehi~>LzE+7ono`}VQT=DLt9bJ(< z2_$zwVK@R2M0No3e%_(b?!e0zXLkVz0=A`6909Y{_1mSPu^0dmj{*Vx{*xY%#~DRJ zyKiAO03O6vrkgv8xV6VAaOr@;+wIiwMCzf(%zxb-l5UnMWg+bMuY4C(%-4hV29jbhumNnxxpU< zKe?O%@HNgA?+r58>0GlNRc4%-rG`L+fxSupwKn*TC z)e&~c2|MJ3UF3v62!3*5a2^1N97Nbrg8L4m`wpY~E=KpC9Ns&bz!(LT;Qb@{%+4Mc zKPI?c6BS}tRO`-0xrQjBGYOdU)ChQYXBU!y9LNwbavU1i`~a9^wx_%Q5&WvHZmp;Q zR#Q_&sLMjtA#jMCx`L)G7ywJdVQQ)>aD=KHOj%JCAq!DKs6$}tsxTD*p`@S*QWEri<*Br`pNc%9^i#pgv0?44cIB z-2S3Zt4_#)4C+{wz2_bwR&#UK#TC0%KcN<8QMOg&c-_e%2k2QvQ+6J}eerkJ+PxjauGny3(IZS$MoSO7IoBzqCD Vc1-kexEun;L@z3;VF1J({U7c#%P;@{ literal 0 HcmV?d00001 diff --git a/examples/ipv4_frag/Makefile b/examples/ipv4_frag/Makefile new file mode 100644 index 0000000000..78b5f8d103 --- /dev/null +++ b/examples/ipv4_frag/Makefile @@ -0,0 +1,63 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 +# + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(CONFIG_RTE_MBUF_SCATTER_GATHER),y) +$(error This application requires RTE_MBUF_SCATTER_GATHER to be enabled) +endif + +# binary name +APP = ipv4_frag + +# all source are stored in SRCS-y +SRCS-y := main.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# workaround for a gcc bug with noreturn attribute +# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603 +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_main.o += -Wno-return-type +endif + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/ipv4_frag/main.c b/examples/ipv4_frag/main.c new file mode 100644 index 0000000000..0f0c5f63ad --- /dev/null +++ b/examples/ipv4_frag/main.c @@ -0,0 +1,707 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_ipv4_frag.h" +#include "main.h" + +#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1 + +#define MAX_PORTS 32 + +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) + +/* allow max jumbo frame 9.5 KB */ +#define JUMBO_FRAME_MAX_SIZE 0x2600 + +#define ROUNDUP_DIV(a, b) (((a) + (b) - 1) / (b)) + +/* + * Max number of fragments per packet expected. + */ +#define MAX_PACKET_FRAG ROUNDUP_DIV(JUMBO_FRAME_MAX_SIZE, IPV4_DEFAULT_PAYLOAD) + +#define NB_MBUF 8192 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + +#define MAX_PKT_BURST 32 +#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */ + +#define SOCKET0 0 + +/* Configure how many packets ahead to prefetch, when reading packets */ +#define PREFETCH_OFFSET 3 + +/* + * Configurable number of RX/TX ring descriptors + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; +static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + +/* ethernet addresses of ports */ +static struct ether_addr ports_eth_addr[MAX_PORTS]; +static struct ether_addr remote_eth_addr = + {{0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff}}; + +/* mask of enabled ports */ +static int enabled_port_mask = 0; + +static int rx_queue_per_lcore = 1; + +#define MBUF_TABLE_SIZE (2 * MAX(MAX_PKT_BURST, MAX_PACKET_FRAG)) + +struct mbuf_table { + uint16_t len; + struct rte_mbuf *m_table[MBUF_TABLE_SIZE]; +}; + +#define MAX_RX_QUEUE_PER_LCORE 16 +#define MAX_TX_QUEUE_PER_PORT 16 +struct lcore_queue_conf { + uint16_t n_rx_queue; + uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id[MAX_PORTS]; + struct mbuf_table tx_mbufs[MAX_PORTS]; + +} __rte_cache_aligned; +struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; + +static const struct rte_eth_conf port_conf = { + .rxmode = { + .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE, + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 0, /**< IP checksum offload disabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 1, /**< Jumbo Frame Support enabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + }, + .txmode = { + }, +}; + +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +struct rte_mempool *pool_direct = NULL, *pool_indirect = NULL; + +struct l3fwd_route { + uint32_t ip; + uint8_t depth; + uint8_t if_out; +}; + +struct l3fwd_route l3fwd_route_array[] = { + {IPv4(100,10,0,0), 16, 2}, + {IPv4(100,20,0,0), 16, 2}, + {IPv4(100,30,0,0), 16, 0}, + {IPv4(100,40,0,0), 16, 0}, +}; + +#define L3FWD_NUM_ROUTES \ + (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0])) + +#define L3FWD_LPM_MAX_RULES 1024 + +struct rte_lpm *l3fwd_lpm = NULL; + +/* Send burst of packets on an output interface */ +static inline int +send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port) +{ + struct rte_mbuf **m_table; + int ret; + uint16_t queueid; + + queueid = qconf->tx_queue_id[port]; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + ret = rte_eth_tx_burst(port, queueid, m_table, n); + if (unlikely(ret < n)) { + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +static inline void +l3fwd_simple_forward(struct rte_mbuf *m, uint8_t port_in) +{ + struct lcore_queue_conf *qconf; + struct ipv4_hdr *ip_hdr; + uint32_t i, len, lcore_id, ip_dst; + uint8_t next_hop, port_out; + int32_t len2; + + lcore_id = rte_lcore_id(); + qconf = &lcore_queue_conf[lcore_id]; + + /* Remove the Ethernet header and trailer from the input packet */ + rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr)); + + /* Read the lookup key (i.e. ip_dst) from the input packet */ + ip_hdr = rte_pktmbuf_mtod(m, struct ipv4_hdr *); + ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr); + + /* Find destination port */ + if (rte_lpm_lookup(l3fwd_lpm, ip_dst, &next_hop) == 0 && + (enabled_port_mask & 1 << next_hop) != 0) + port_out = next_hop; + else + port_out = port_in; + + /* Build transmission burst */ + len = qconf->tx_mbufs[port_out].len; + + /* if we don't need to do any fragmentation */ + if (likely (IPV4_MTU_DEFAULT >= m->pkt.pkt_len)) { + qconf->tx_mbufs[port_out].m_table[len] = m; + len2 = 1; + } else { + len2 = rte_ipv4_fragmentation(m, + &qconf->tx_mbufs[port_out].m_table[len], + (uint16_t)(MBUF_TABLE_SIZE - len), + IPV4_MTU_DEFAULT, + pool_direct, pool_indirect); + + /* Free input packet */ + rte_pktmbuf_free(m); + + /* If we fail to fragment the packet */ + if (unlikely (len2 < 0)) + return; + } + + for (i = len; i < len + len2; i ++) { + m = qconf->tx_mbufs[port_out].m_table[i]; + struct ether_hdr *eth_hdr = (struct ether_hdr *) + rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr)); + if (eth_hdr == NULL) { + rte_panic("No headroom in mbuf.\n"); + } + + m->pkt.l2_len = sizeof(struct ether_hdr); + + ether_addr_copy(&remote_eth_addr, ð_hdr->d_addr); + ether_addr_copy(&ports_eth_addr[port_out], ð_hdr->s_addr); + eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4); + } + + len += len2; + + if (likely(len < MAX_PKT_BURST)) { + qconf->tx_mbufs[port_out].len = (uint16_t)len; + return; + } + + /* Transmit packets */ + send_burst(qconf, (uint16_t)len, port_out); + qconf->tx_mbufs[port_out].len = 0; +} + +/* main processing loop */ +static __attribute__((noreturn)) int +main_loop(__attribute__((unused)) void *dummy) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint32_t lcore_id; + uint64_t prev_tsc = 0; + uint64_t diff_tsc, cur_tsc; + int i, j, nb_rx; + uint8_t portid; + struct lcore_queue_conf *qconf; + + lcore_id = rte_lcore_id(); + qconf = &lcore_queue_conf[lcore_id]; + + if (qconf->n_rx_queue == 0) { + RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id); + while(1); + } + + RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%d\n", lcore_id, + (int) portid); + } + + while (1) { + + cur_tsc = rte_rdtsc(); + + /* + * TX burst queue drain + */ + diff_tsc = cur_tsc - prev_tsc; + if (unlikely(diff_tsc > BURST_TX_DRAIN)) { + + /* + * This could be optimized (use queueid instead of + * portid), but it is not called so often + */ + for (portid = 0; portid < MAX_PORTS; portid++) { + if (qconf->tx_mbufs[portid].len == 0) + continue; + send_burst(&lcore_queue_conf[lcore_id], + qconf->tx_mbufs[portid].len, + portid); + qconf->tx_mbufs[portid].len = 0; + } + + prev_tsc = cur_tsc; + } + + /* + * Read packet from RX queues + */ + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst, + MAX_PKT_BURST); + + /* Prefetch first packets */ + for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) { + rte_prefetch0(rte_pktmbuf_mtod( + pkts_burst[j], void *)); + } + + /* Prefetch and forward already prefetched packets */ + for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) { + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[ + j + PREFETCH_OFFSET], void *)); + l3fwd_simple_forward(pkts_burst[j], portid); + } + + /* Forward remaining prefetched packets */ + for (; j < nb_rx; j++) { + l3fwd_simple_forward(pkts_burst[j], portid); + } + } + } +} + +/* display usage */ +static void +print_usage(const char *prgname) +{ + printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " -q NQ: number of queue (=ports) per lcore (default is 1)\n", + prgname); +} + +static int +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if (pm == 0) + return -1; + + return pm; +} + +static int +parse_nqueue(const char *q_arg) +{ + char *end = NULL; + unsigned long n; + + /* parse hexadecimal string */ + n = strtoul(q_arg, &end, 10); + if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + if (n == 0) + return -1; + if (n >= MAX_RX_QUEUE_PER_LCORE) + return -1; + + return n; +} + +/* Parse the argument given in the command line of the application */ +static int +parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:q:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* portmask */ + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask < 0) { + printf("invalid portmask\n"); + print_usage(prgname); + return -1; + } + break; + + /* nqueue */ + case 'q': + rx_queue_per_lcore = parse_nqueue(optarg); + if (rx_queue_per_lcore < 0) { + printf("invalid queue number\n"); + print_usage(prgname); + return -1; + } + break; + + /* long options */ + case 0: + print_usage(prgname); + return -1; + + default: + print_usage(prgname); + return -1; + } + } + + if (enabled_port_mask == 0) { + printf("portmask not specified\n"); + print_usage(prgname); + return -1; + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +static void +print_ethaddr(const char *name, struct ether_addr *eth_addr) +{ + printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name, + eth_addr->addr_bytes[0], + eth_addr->addr_bytes[1], + eth_addr->addr_bytes[2], + eth_addr->addr_bytes[3], + eth_addr->addr_bytes[4], + eth_addr->addr_bytes[5]); +} + +int +MAIN(int argc, char **argv) +{ + struct lcore_queue_conf *qconf; + struct rte_eth_link link; + int ret; + unsigned nb_ports, i; + uint16_t queueid = 0; + unsigned lcore_id = 0, rx_lcore_id = 0;; + uint32_t n_tx_queue, nb_lcores; + uint8_t portid; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eal_init failed"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid arguments"); + + /* create the mbuf pools */ + pool_direct = + rte_mempool_create("pool_direct", NB_MBUF, + MBUF_SIZE, 32, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + SOCKET0, 0); + if (pool_direct == NULL) + rte_panic("Cannot init direct mbuf pool\n"); + + pool_indirect = + rte_mempool_create("pool_indirect", NB_MBUF, + sizeof(struct rte_mbuf), 32, + 0, + NULL, NULL, + rte_pktmbuf_init, NULL, + SOCKET0, 0); + if (pool_indirect == NULL) + rte_panic("Cannot init indirect mbuf pool\n"); + + /* init driver */ +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_panic("Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_panic("Cannot init ixgbe pmd\n"); +#endif + + if (rte_eal_pci_probe() < 0) + rte_panic("Cannot probe PCI\n"); + + nb_ports = rte_eth_dev_count(); + if (nb_ports > MAX_PORTS) + nb_ports = MAX_PORTS; + + nb_lcores = rte_lcore_count(); + + /* initialize all ports */ + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("Skipping disabled port %d\n", portid); + continue; + } + + qconf = &lcore_queue_conf[rx_lcore_id]; + + /* get the lcore_id for this port */ + while (rte_lcore_is_enabled(rx_lcore_id) == 0 || + qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) { + + rx_lcore_id ++; + qconf = &lcore_queue_conf[rx_lcore_id]; + + if (rx_lcore_id >= RTE_MAX_LCORE) + rte_exit(EXIT_FAILURE, "Not enough cores\n"); + } + qconf->rx_queue_list[qconf->n_rx_queue] = portid; + qconf->n_rx_queue++; + + /* init port */ + printf("Initializing port %d on lcore %u... ", portid, + rx_lcore_id); + fflush(stdout); + + n_tx_queue = nb_lcores; + if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) + n_tx_queue = MAX_TX_QUEUE_PER_PORT; + ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue, + &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: " + "err=%d, port=%d\n", + ret, portid); + + rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); + print_ethaddr(" Address:", &ports_eth_addr[portid]); + printf(", "); + + /* init one RX queue */ + queueid = 0; + printf("rxq=%d ", queueid); + fflush(stdout); + ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, + SOCKET0, &rx_conf, + pool_direct); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " + "err=%d, port=%d\n", + ret, portid); + + /* init one TX queue per couple (lcore,port) */ + queueid = 0; + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + printf("txq=%u,%d ", lcore_id, queueid); + fflush(stdout); + ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, + SOCKET0, &tx_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " + "err=%d, port=%d\n", ret, portid); + + qconf = &lcore_queue_conf[lcore_id]; + qconf->tx_queue_id[portid] = queueid; + queueid++; + } + + /* Start device */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " + "err=%d, port=%d\n", + ret, portid); + + printf("done: "); + + /* get link status */ + rte_eth_link_get(portid, &link); + if (link.link_status) { + printf(" Link Up - speed %u Mbps - %s\n", + (uint32_t) link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + } else { + printf(" Link Down\n"); + } + + /* Set port in promiscuous mode */ + rte_eth_promiscuous_enable(portid); + } + + /* create the LPM table */ + l3fwd_lpm = rte_lpm_create("L3FWD_LPM", SOCKET0, L3FWD_LPM_MAX_RULES, + RTE_LPM_MEMZONE); + if (l3fwd_lpm == NULL) + rte_panic("Unable to create the l3fwd LPM table\n"); + + /* populate the LPM table */ + for (i = 0; i < L3FWD_NUM_ROUTES; i++) { + ret = rte_lpm_add(l3fwd_lpm, + l3fwd_route_array[i].ip, + l3fwd_route_array[i].depth, + l3fwd_route_array[i].if_out); + + if (ret < 0) { + rte_panic("Unable to add entry %u to the l3fwd " + "LPM table\n", i); + } + + printf("Adding route 0x%08x / %d (%d)\n", + l3fwd_route_array[i].ip, + l3fwd_route_array[i].depth, + l3fwd_route_array[i].if_out); + } + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/examples/ipv4_frag/main.h b/examples/ipv4_frag/main.h new file mode 100644 index 0000000000..740cf4cffa --- /dev/null +++ b/examples/ipv4_frag/main.h @@ -0,0 +1,48 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/ipv4_frag/rte_ipv4_frag.h b/examples/ipv4_frag/rte_ipv4_frag.h new file mode 100644 index 0000000000..99ef0d2314 --- /dev/null +++ b/examples/ipv4_frag/rte_ipv4_frag.h @@ -0,0 +1,253 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef __INCLUDE_RTE_IPV4_FRAG_H__ +#define __INCLUDE_RTE_IPV4_FRAG_H__ +#include + +/** + * @file + * RTE IPv4 Fragmentation + * + * Implementation of IPv4 fragmentation. + * + */ + +/* + * Default byte size for the IPv4 Maximum Transfer Unit (MTU). + * This value includes the size of IPv4 header. + */ +#define IPV4_MTU_DEFAULT ETHER_MTU + +/* + * Default payload in bytes for the IPv4 packet. + */ +#define IPV4_DEFAULT_PAYLOAD (IPV4_MTU_DEFAULT - sizeof(struct ipv4_hdr)) + +/* + * MAX number of fragments per packet allowed. + */ +#define IPV4_MAX_FRAGS_PER_PACKET 0x80 + + +/* Debug on/off */ +#ifdef RTE_IPV4_FRAG_DEBUG + +#define RTE_IPV4_FRAG_ASSERT(exp) \ +if (!(exp)) { \ + rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \ + __func__, __LINE__); \ +} + +#else /*RTE_IPV4_FRAG_DEBUG*/ + +#define RTE_IPV4_FRAG_ASSERT(exp) do { } while(0) + +#endif /*RTE_IPV4_FRAG_DEBUG*/ + +/* Fragment Offset */ +#define IPV4_HDR_DF_SHIFT 14 +#define IPV4_HDR_MF_SHIFT 13 +#define IPV4_HDR_FO_SHIFT 3 + +#define IPV4_HDR_DF_MASK (1 << IPV4_HDR_DF_SHIFT) +#define IPV4_HDR_MF_MASK (1 << IPV4_HDR_MF_SHIFT) + +#define IPV4_HDR_FO_MASK ((1 << IPV4_HDR_FO_SHIFT) - 1) + +static inline void __fill_ipv4hdr_frag(struct ipv4_hdr *dst, + const struct ipv4_hdr *src, uint16_t len, uint16_t fofs, + uint16_t dofs, uint32_t mf) +{ + rte_memcpy(dst, src, sizeof(*dst)); + fofs = (uint16_t)(fofs + (dofs >> IPV4_HDR_FO_SHIFT)); + fofs = (uint16_t)(fofs | mf << IPV4_HDR_MF_SHIFT); + dst->fragment_offset = rte_cpu_to_be_16(fofs); + dst->total_length = rte_cpu_to_be_16(len); + dst->hdr_checksum = 0; +} + +static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num) +{ + uint32_t i; + for (i = 0; i != num; i++) + rte_pktmbuf_free(mb[i]); +} + +/** + * IPv4 fragmentation. + * + * This function implements the fragmentation of IPv4 packets. + * + * @param pkt_in + * The input packet. + * @param pkts_out + * Array storing the output fragments. + * @param mtu_size + * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4 + * datagrams. This value includes the size of the IPv4 header. + * @param pool_direct + * MBUF pool used for allocating direct buffers for the output fragments. + * @param pool_indirect + * MBUF pool used for allocating indirect buffers for the output fragments. + * @return + * Upon successful completion - number of output fragments placed + * in the pkts_out array. + * Otherwise - (-1) * . + */ +static inline int32_t rte_ipv4_fragmentation(struct rte_mbuf *pkt_in, + struct rte_mbuf **pkts_out, + uint16_t nb_pkts_out, + uint16_t mtu_size, + struct rte_mempool *pool_direct, + struct rte_mempool *pool_indirect) +{ + struct rte_mbuf *in_seg = NULL; + struct ipv4_hdr *in_hdr; + uint32_t out_pkt_pos, in_seg_data_pos; + uint32_t more_in_segs; + uint16_t fragment_offset, flag_offset, frag_size; + + frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr)); + + /* Fragment size should be a multiply of 8. */ + RTE_IPV4_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0); + + /* Fragment size should be a multiply of 8. */ + RTE_IPV4_FRAG_ASSERT(IPV4_MAX_FRAGS_PER_PACKET * frag_size >= + (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))); + + in_hdr = (struct ipv4_hdr*) pkt_in->pkt.data; + flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset); + + /* If Don't Fragment flag is set */ + if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0)) + return (-ENOTSUP); + + /* Check that pkts_out is big enough to hold all fragments */ + if (unlikely (frag_size * nb_pkts_out < + (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr)))) + return (-EINVAL); + + in_seg = pkt_in; + in_seg_data_pos = sizeof(struct ipv4_hdr); + out_pkt_pos = 0; + fragment_offset = 0; + + more_in_segs = 1; + while (likely(more_in_segs)) { + struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL; + uint32_t more_out_segs; + struct ipv4_hdr *out_hdr; + + /* Allocate direct buffer */ + out_pkt = rte_pktmbuf_alloc(pool_direct); + if (unlikely(out_pkt == NULL)) { + __free_fragments(pkts_out, out_pkt_pos); + return (-ENOMEM); + } + + /* Reserve space for the IP header that will be built later */ + out_pkt->pkt.data_len = sizeof(struct ipv4_hdr); + out_pkt->pkt.pkt_len = sizeof(struct ipv4_hdr); + + out_seg_prev = out_pkt; + more_out_segs = 1; + while (likely(more_out_segs && more_in_segs)) { + struct rte_mbuf *out_seg = NULL; + uint32_t len; + + /* Allocate indirect buffer */ + out_seg = rte_pktmbuf_alloc(pool_indirect); + if (unlikely(out_seg == NULL)) { + rte_pktmbuf_free(out_pkt); + __free_fragments(pkts_out, out_pkt_pos); + return (-ENOMEM); + } + out_seg_prev->pkt.next = out_seg; + out_seg_prev = out_seg; + + /* Prepare indirect buffer */ + rte_pktmbuf_attach(out_seg, in_seg); + len = mtu_size - out_pkt->pkt.pkt_len; + if (len > (in_seg->pkt.data_len - in_seg_data_pos)) { + len = in_seg->pkt.data_len - in_seg_data_pos; + } + out_seg->pkt.data = (char*) in_seg->pkt.data + (uint16_t)in_seg_data_pos; + out_seg->pkt.data_len = (uint16_t)len; + out_pkt->pkt.pkt_len = (uint16_t)(len + + out_pkt->pkt.pkt_len); + out_pkt->pkt.nb_segs += 1; + in_seg_data_pos += len; + + /* Current output packet (i.e. fragment) done ? */ + if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) { + more_out_segs = 0; + } + + /* Current input segment done ? */ + if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) { + in_seg = in_seg->pkt.next; + in_seg_data_pos = 0; + + if (unlikely(in_seg == NULL)) { + more_in_segs = 0; + } + } + } + + /* Build the IP header */ + + out_hdr = (struct ipv4_hdr*) out_pkt->pkt.data; + + __fill_ipv4hdr_frag(out_hdr, in_hdr, + (uint16_t)out_pkt->pkt.pkt_len, + flag_offset, fragment_offset, more_in_segs); + + fragment_offset = (uint16_t)(fragment_offset + + out_pkt->pkt.pkt_len - sizeof(struct ipv4_hdr)); + + out_pkt->ol_flags |= PKT_TX_IP_CKSUM; + out_pkt->pkt.l3_len = sizeof(struct ipv4_hdr); + + /* Write the fragment to the output list */ + pkts_out[out_pkt_pos] = out_pkt; + out_pkt_pos ++; + } + + return (out_pkt_pos); +} + +#endif diff --git a/examples/ipv4_multicast/496632_IPv4_Multicast_Sample_App_Guide_Rev1.0.pdf b/examples/ipv4_multicast/496632_IPv4_Multicast_Sample_App_Guide_Rev1.0.pdf new file mode 100644 index 0000000000000000000000000000000000000000..197c85e142546104694d49ce86d7c22f4935a769 GIT binary patch literal 109644 zcmeFYWmH|uwl0bUcY=fjnYhEmCAho0+r*uTOK=In-Q9w_6D(M;KyY`55H#qW$y#gg zb>3a~ocrUo_x@}et&LGNdR6u6_4S%1U#CY(TDmw;={Hbua@d89Up#Kw1=auywVwcLA_7D>;MA ztW3cU&XD>A1ehhPY{4LBW(iwkFzAmW%+g}IeC(`1HXd(n}IxZfAyT5 z{a4D|;g|jp6;+t!K=u}3O8__5FMZs8_mT_9tm5DRhKR8J7Ey)R2f)V0$*kt-2x8V! z0hu#v>aqY>0lzHZ0(J%&+o2&_u9}#bVHz2k812GiqoVk~*zyDP)Dcn)AduMyg+B`g z9?*neVTe6IVRZmDnPb=O;jsA#gf_n^z*9*Ki~$N>y^^ME`4m@#hH##B7tR7N(>1o zfF3=Ol51;O*sH7)w^QOw`OpCeTaXQ#Py(_qfeA`XT<+CGFw9EQb-@yol9qOIbgbdj zV%Gu#UD++cHDIWpT34`RGa#eUN7O=3|5Os&X+L-a_)vX@3mgnON2wOUIvl$FoaK6( zS&Oe2PDY**Aj|Hgo#BgkIZ6l~>S4}lT; z?<@%`us*Y*E7%s|Ll8k0p5M7j#ugwsV-t|A3xJiI^Iuu=5Eo!pcLDt+&-_d7m&hO0 z|3;Q5q;*>di+_97A1(c^Xg+Db(hj;ILH{85NB)hl?=Y!4K&ejk#p+&6k;25vgBF zC;4M~$cy8ws@$3QEH+N_?pz#RuC$~VF8KuHgASb&yuzJX={VxAdU`xv`Cb!!78qN{ zDji1w-Z_u>4S44G^BCuU#0WF>?Y-4yDj%)}wb8d&r-&?2l){84G!A?OEs>`Yuip!Jb^b~jbsS-Q4yYo5J>z3Gcns+8NwK_^M?cAyIGfC zd6wfOpr00T3{jr<;I{5m&3DgE#eG6f2U{LNZ%A2v3DbUV8hMjq$L>Xr=jPa95gHar ztz#1A2Q2qPP{VjtT|LA!{Vft!!eXM zwJ_#n@Kc@(lpcLrQ0)mWCtNqGKrjMEHI(&YJnkI=)Tc1z9pN*aX!E_E1oJk0XIXd~ zr-#>PdD^)wHFiZ5s{s>-8dAnDys&i+EBxn>l|;JN$aUFcdAU(x-*|m{aU<7o)=WCAlj+IW{@y?l_2{%5) z&_0Lzp#Y(N!ugZ_<1F}VKKXN&`3E=s(>u9XSpMydQW1q}wf1jVP*=|Ax$%LFGB`!L zM^IV34Me>7p{lQHUd}7rKl&D)GA(34SqMZF-V)QUQ01}7(}mcSXuQ3V%V*nDcpEo5 zd*GMu=a-*CtDM1BB0`nsR%43rM&wh;V+zTwXGNM^l=z(7SwuE9;|Nc}Od1NIN{M|J zX2yNV+*Z!f!A{h*A%;j!4*GQSLDM&Gm<)|7ynuE_*28;2>}IRyNAS-pwnGti}{tg<&?#VgP82AeR6Zc(4-4B%5X>~7;bzO@X%1Rir$;fJp#Tabbm=|| zHxa3QJx3%FhiVSoE)Y1Ujc@b-5W!%Wcgv$N76y2gmy@9&pVW;sp^)t-N=o!~v_j$D zd3(FW45jO<)H)TGBgi;q7c0SW3=&sK>qMfGtDk8)$Is{m17`>Vd{%~n%6W!BJltWx zO_y%CQz%+0039JzG;Vg=X5^Nqi#U%Z_P1;s*X9ZPYWsuKM73a%vQXr629 zyuiEZ&lCPbB>u*A2)!-s>g>hH#Y}W~df0rVPTtNhEXFbz>y+TF!wIYq==?77lmfC$ zV{u0~`}@OlS)gj|Td`<3qL%7Ybx&XoG`J-$VMl2%1Ihegxf)sRS6B_1D4A?To+e-A zyXov;tY3+_!0;ecdpnsWqN(}}P_$3K(nA%#{u0~=B(OF0|K29`L0%-=;+^K&+Q;`j zpEI@{)SY*##GYTgf>P9F(fLPj_}kL;|LhH%oWQ?(gPNLbJP|4C(kE5N0HyhLjKLv- zV!HA)@zYNcX6alB@?0>cw%XU01-_jU_cI{gE>n4*8I6;5HqC|@{3eQkS(Vd+-8O+! z3#2zE0-L#t`80;?)7d*tV-)kU`=I2#g|Mkc1udu-%n{=#1E#?s@S4mT0u@XiF8M^Z z%FRsW2@~nw;&Si+zPbBn-n!X0JttjbR?Ua$s2!0*8W`$iM6-U{Ie<;!nnvR6Gu5-# z<@T(2XJx|8aqRq#z|DFeE%@*+b>yKByF5zJ?P8KCyyPKYU8}if-l$b^%}4>aYnN73 znP!GKyV6!m1NDosws+FLIH9_|Jh1obz>`rDlSHwyCdohXB$6s54Dty^;X7cWm(ESX zy-5`Dvn5i7cLEs4r|r;+hf{3k)8#yJhukwL4!n9{(R<7JQ8eIGKTHK_smgKcb$m*j zSw*&x?Nv+uvQmQK8IbeY&+9tVZ9^o2E{7BidK%VnuYPDihH#^P<*>TOf`~s?CS9NO z;ah;_4@rb*qfJIwfTx&H7crVgPL(E1Y{j8^_aS+XKyckw)9bp@6V-RQCAakmkvcPi zN6LG8E5vUR8R6?J;KJxIRvYJ!fj4dSn6z(3-*}%LFgJMIws$JsGd8yiUD^>J)ZVyT zAo+BWs?fF@Czjz=2OMi3V9jj4l7bax#If#3BmVp&XYOonjVcQ93WXg*VTBY(adWPY zz;RuAmd|4CVS8G!K?rk!hyv{rtzC+!sKqyleg({3NSMThAW7L`GjKG!nSyZ!g zci!m6H1u89q98Op)MPe=`aeSZ*Fy1soai}uIR6f9IW^fjLKeiO8(N=$JXB725^{^w zk7{@qc&*P=*r$HX9Ff9ye(^obC-HIe>b;ARmZ${)pn-7sAU$dYMNB3P3EA+oQ92U)BK#maK)LYFr`@W z(v^KVV|X>_p4pR7al&umPVN~(J`E6a3^s3)Lk{Bv1rW^8P-NM zwz+R+9&6r0`N>Mm?V+Xv%4IFO3h?g?uE|M$9JUtwW9twRLK7Va48#O-(IG8#yLjo4 z$Gz-rDl6Kv%1q2Pk_gGni#uH_?K`=-I=WkGPzKtuePu&rSYrj+oK^Gsd(epc#u*)1 z=4%(J=#H5sp-jfhCsCDX=p~@uflK*hCBHGG@RqEwh{YBW;c_QhFVi{@B%42 zf-$C>NKI+YB5DT?FZqyJV8fD3*0F7jZO>HD7W`oXyJz}qE#BL6L+{Ta0-&kv9{g|M z($4`yNWQPwrEGor$F}4;$wedLa`{_#* z?28-%gCLx(bs~jK`!daR*&J&m3BmVC^e$+>x}$}ICyuBC3Kc(WRdnL+v(7Sg0s0%F zgd&dE#b#;orWGjETF+P^Prl6BqSx?xt%h$bafE#W)>~d~ebg3<*l1zalUJ&nOh-3t zw&Sq7BoiqUw#IlpoQm6E7Z>ZWm`**AbhOG9X1iTn={Ywjltl%rBT}opvT?8HcM
VDws1N~j@oIU3(Z`|f(AoccGP{KsIWsZV%$g}?LIZL}tu6D3cWP1)!Ma|T6JzhFw!Jp*HC z#n+`z7u6cnqh}%<%7@!pJHDLgKil`ACkJF1LubBHH)SqxhSUI`ZoxD;MUMTq{Dc_uNF#90eUrmNm&o=wQVY zbbo;}#JAjCBj25W9G0RE+fE6|vJH)ws)|b4TA;AcwplynSG;L=)~0@89HFC;>3NY2 zyvx>7<|4m;Ghr*$rwIjZ+`uG-S)2Do)mNmVeS(u~@%?Nuetx)^LpY7ALc1^4;tj{K zilAu)1D089yg#ois*aLI`>g=R$6MEjHqW3k;aO_2qP&w)8DN95>uI16IaIOQj3pue zk_mn$AV0itV6$Bhcl_qtYm|0I9r6i_5Y$I?@Y0Oi1UET4uKOZiiE3#iT9esSoFmHN z6oS`tB;W&IN$Q+%=Z3x>3fHh4neajM*JhHv^0{>iy2Vg0%_~NV<3c^b zG%bufd3km1ntYz1l74KnG^(WryO$g&*_+3oZZ>}2V2dohhF^atYx!h@w1a4t~PM5V5c^S$ga4 z%kf?WzM42#XK|M#T+_0Wx#Rruzy^@vF~c+C9BwXHd-6fM;hT4ykKx=455s3)2u5NI zNe~NS8a`H{T9%^?GrD}yLh^wBndqkQmACuD*Wyyq{WVjkN_zYJ>*H5&fdqDO1)oCg zNiOWug%^cmd2U#H&}n}_XQk1}&Bu8?3$3ajCU4Z2TIw=D@l5b+1tn&Cd6V?%fNGt1Oqk(bzVu%GL~z378Y@YDh?ngD%0P8^G4y^3$gipTaHzI zw04wpbh94kyO{^`$vDaiU}$t*&N<19d@JA*7<2=WSYkCCeKj#gq!#w#W7qL6>cKjP z`$vPFFfuVvOyzPQ9F4+QqXy!;5r3dE;>^8pnBGy}rX}>Qb22oHBuln{Vx&Mixs@IX z1#~-MIn4}83N%muoGs0dVWarkbyfn>eor&QIRuDHH(;3fmx_kdQc2wpBvlHFU&@(c z3=hTiG|~bkDL=8L3y?OHZ}FlA-w1>-VG6?zns%_gu)HLD&OiHf zKsTbWMe4sb@lAt&Aj z?CqzPIm{>Pgcl(6Kp5xQ#*|-IV(L>Xtrz-W6?E2uopMHaBM;qZBf?Wax)Mc! zNUgiKaJS#OZ|8TCwO0x9@%X8kl(9GxRRVZUrzF)F2Ar+gdy?q1LF5{mdeVEJ`P&!?%^Ni>S%K zs(f+Os9o32#j^MBR5ZkRZ4%M8wjbVPfd-y@NQm-tK^RP%zDV5)n5pxlhQ7=@ny+Z zqG{Tq#Ani}A&U8{n2BLOlW$IvHA9mC;BuI+yi_B?=*yxA!@lWIpy)usjqlgbB$>C8 zT#3B4zT|Xh4w@SIe0EKW!q*fnU)50<$4&>2Thao>UP>$KzwngNI|Fg922q>1CT!Ej z74(Fp5x$Er9?hX5Q8!n#OK&ikI;mY1 zj8<*;m;rxe_p6k-pitd9Cgqh-Nzt)TYob*+5IGUJT?N>bp*r1WY4q1oKcOvc%tg|P zPx}@%V{Z^u5symJ5%2RMwEGo7{0{oT81Iv-wHH6A0b+9Dbo@jHTnz zgO&N{@d^%{Mz`vMMxZo3%q@#+7o_94^DedaD+XLpcJoc(R=L54weMY@jPOKV)#*R@ zNMpTFEHd;h-(^XYj$&%%i6~?4Y~6k6o5Z~c81snE1JC&bisIsxNBj8FzNZ(> zk2Ts+COCq(S2&{YT#2?`(jU?FBv^jJ9j(=Vvm8yu!|5W*K3hA`oBz2{Ia{*gG1D{) zyWLfgYP>KY)IrJT_=& zYVr<{b$sWM=66dBU^%nwti8T2&dOw-KL0WBI2NRk(&IGUfATcrx0Sukt)TLvMk>O$+JsKtCX4juQ7+MsV?cCnr08fsOWC*-A&- zE8P@*$AUrivQo2yp&FO&1gW&qRvNllVd`qJhw-d+WK71jTkK@HiOK2!THOp-1C~wR^)a?(~b|So`v%tJLyoQu|HsF1H$A)#6^qvz&uA5;JNX(P^OHzVqcx= z^r{lYFWJcNOXm^Z2oXdj3yZFNx%6zr_kn6_^=<^;PhEfcX}dJtXWMB1hS#uKw_`s= zQNO?KKN=M6%U;D?!bhLyxLF%Rj6SUT)8gU?(;G;z;93v{xJJuIL3`> z{|b?Eu!pDudR7DX-ggcvVkUeutD5UeGCQkSQgJPB5hyDX0V?#z);dnR`6>6EmCycW znXbRi9J9rVg5Mo;H`F9A%iceN|JUW?pWqk%bz^Q1xqo981G!n5f>b0${^^eQ&jWl` z4jwMZ{Tkc9y~1)eGud5DZ4NJNqSM_gZwxp^;FZ7x^uJ^R5WP@-We)#p1gZ~J-Zs2c zNDK=LT0Az_AG#ndn1Y-i_YuybwE;>_h{!8@ms z35r6Igozn083vF>y^i3;J*^ocIa&J%1@GYxT?_@(J}B!FKJ5{rVCKU2Y8yg&h+IX5 zmU`#{?H?$X1U)NGRj74AK@A!9@7gNx%J%;xQy9T1e1F15Ry;uQoTL<IaoWD5KH$*7Zjix}$zjyiG6w_h*cZT~a6tabKHH=bt?-H$5Zp z5moFyM6t24MYRwn$F<~sI(ofvRO%B}SeU)RxZQOX&J>eeqD>)0K zp9GOMn>I+3{!kRqxI?hGxzOMMQXs5GHy$?(R}IW0@FQ-1`b7axN5jNm(p|JXo z0^rRsT7?Ca066I4qE46?*dncyc%_JRA#`~XJaYXIm=F*~CW2p7k>)1bZ&%r8JyZlY0CLwp}1BWfpvTR^8Q zseDpO{EhJ&jWzrglDq_c9@-?QCG#a6n2`6SzCV*BSMJhe%B1_`);`xO)X>ncJzBq=eBAa%_xm!aKce@$A4cKye7V>8n3qxW)G2??QP8D;S+aE%g$N=oV}w0%OdU z7dR=>BIY6GAx?ok6EWKlMV8VMp(V_x&>=!d*+o7adnwISC>0;ukN^$_XM`X?inMC9zI8s^Irw4!BjQK<&f9Gg+FYDV zTo@cyTy#1;l~j#Fx<$G=928tdnpMrfq{}3jq*+=bjYHLRbq(cfB?a}>Y9D1!osiO}y|TT^ z@5s>t;YG$NYs@C+jJaH|^?%QbBjbS2-)d1ttVT^B}YMyDu5 zE7XsR>ep2T*{9oQo!aq#AQoj-KJ#Cd_tjbx5j6ND^Bf9oYh=~U!EFSr!Rh?wMeAU- zGG#0ppO;`9YFxijiZwPgNHbB|w;xhl>=b-%zh$tcNQFlwO?5?uAqSGvOx;a$OM9{| zb{JqSXKlAr;B@C&vs|@$a!%`E9j^=ea;l@m;KSIY4c7H(XsFxPZPHTEw$c&TRo6vh zD@A=L!Ls$9V{vi8tKiD&$elBp6DH#&=coDCi&Tr83kpng+T+>=pL|9YR-&ymZMaMn zn`4>;%>*}dce}x*E*_hu8$)ikj+u^AE;5e1yO+HS{b^^*yL@{lTXm~D9rT-r8#?=o zXH;8wn;Hk#^ZG0N1AV=wS%aR64JFNc_7ld;Am(Jvg1J%Ta@-@$OOJ$(pbq-Z zoZJ2T@4jt5#%s7;-aaH%-*kp`7d8+cIv%cX`G3rvD_%ScK)qZiQNsPsKFJQ`s^JR9 z#=-rDb4fMN(?W489Bv;DEPSiR%Ff!r0%jWJs9?7>OFI8}RbCZFAMDcimb?Zl4+{;e zzI;XFOrM|F3QHT4r$NF(em1?_26vXKlK&+o!RHUW1X0oGk@oMOx-U&0EAvafm)6+3*pJvT z6igH$l#Ntd@dXKr=Vl%e9oaZZi4}ML+F5-aL7|cv5q3 zSq8RJIG871<39mi*7erSc^6z8FdAwY)W_5)wm1~_mgKv9+I`Z$7gQV2TrG31vaZzl zxT?(W?(b{#J$I`o`SnhEe)>&VOW5hG*SDrQtGOhS0m5! z?u>VC8+zld3m0X3CCvT$ua2S*{T!WUJ}>b@^FO9^FzdG-`~5VjRy}K_chgibY;5&9 ze7&VJ==$a8#23%kPEhM%a@JvC&9CoJVo&}kLm&1F!ZZQ4}iVij) z4lCH5+u38A>Hc)Pv(TIQ%wjp`UCjaMo$&^FndDAP67k$)>5b`8_|8z8^4!-hYGFm< z548i?&xuddVdoDe+a)_MKfa7gFi$8cSuU{`(0#Oh@I1Wuc@UUW^sdS;?|!r0knr6> zhBgP=Q~a~@#@wdnkLu_SgX=c$*>~6H8QWYJE+Gy(Ys9{T9&`5s$B+F!y9mc&?_<13 z+66q{d9}oL)O^o?9K+oNU)=xWBJ^*>u0L0oY(U8BQq|Q2{F@^wFZTzD>n~E>zt^06 zf*y{>rZyljzyxGrWzSE2*8YhaU}eTnt;rz|ly?*bSz1YXIfGQa6x2+;Y)pB~s09U( z`8;?%>>TYN6g_~4ovpnKuLnPwF@$*v;)T5b%4Q}5{1yS*@RJGu`T)?9R|1GSID-Hj zOstHiKnNL*lZ}amjfb6$hXKF}gzz09|3EfI7FJ#$H!lb9F9s!<05X8j+02|*MNHz) z#vmzvGD|Snk(Zg--QAtZosG%C*@Bschlht5$jZ#h$_SBQbn&zY8+$O?yO95?;*UDS zKrW`vR*t`TgMeRk8k;z{g89kFe)Bc|?1`P@zbdzPVfx)GCQ}DHW)EXWW)>zO^Z!Fw zUjDxqx3l{%CCp6!CGP0zZ2MbrGgD@eEy(T{n-J0h%YTt~{99lDi#m`G5Mj1Igdu~$ z3!yYxnR|*swjcatzwF5fWM>3&sR4O-**JJPe!cSofxngiB`pe}$wG*||CiE#;pYER zUS6J8%)!+4SC6E{_{m&dt;~2$jX~z@W@aEp7Isq*BRd-h3!{m-i3uYI$lR2hhm{M+ zZOrkPc98PY_7GmJy(#El<=NR-jCnX%xf$6_fET=eX~ju3*ki-VrLC*WTqdR7oBsjVT%&IAN8keMNbbn3w5 z;%4E+$NUSVwpM>?@H_RF3Vh6e*Y6L>zrh<)TqtV}buz;6E1lj|Gtby#;_oH1Sir|61T9Bmd(*JIk(4$iVmk&Su)`6ef1G1 z0cGvpSE{%^nG=a$NkcFbS)l1Fl4{9bfieT+n=n}k%{2zXPy!nq<7y!{!;WGT(D(~wJ2xSwb~X&y_Jnr6(HvXDt^rYP+WMxo7CzRX4@Fx>DC zc!{wM@Xhr;uBdf`L!nnN;NU z{4Z-a#{3tS8Q*SSy4A8QI$)ku-)=+E^TrX1ug6!pkG{#Fd{fOifMB|J&oI|-k38#a zO5`Qa^=`x>%s%K{aXbHIq2gXBF+ZbF+A}%HYJ2k8&G%`6HO>b`6vZ{g=t>Q@*9_TR!%Iz# z`~&L$MsX3gw|9Un7+!s)-E9st=P zL7e{%h8)$EcG((<>$=eVHs&uXW@_kTnsAI~(U4ZSd|I<76p;Hu&^`vBOa2_SC&-fW zGwNbT1yGtyP3V$-Lzb>Cq;9Tpng;i{h$3g_CB6H(b+~T+g`ee}_91R7k6Ne0C$6Pz zhdQ#$hoM5~PI?i)Ap8K&Q{Ow@KE(peDY6SFfBy!cMkj;WzVJHe7D@g?V_C-?`(jdw zd?SClVsqc54>6keyW>YXv4f_<@4cx_S4+Nfv(HhxhTb{FGhR*=@6`HMyxdEdO+Q(~Y;8S5wWu;*9Dut?c8iz4N6J@| z^c#dDJa$Khht_<5px`n{rYI(XGr%l@Cma@jw{RqxhfiUow<~^092p?^p#uOH47|#x z;kq6E;jgiT(3%2gKGseySaju-4DGc^xh`|Kb9*2em$FGaDM?y@+)-oH<|EjxcuJ{l zc2}0N8yCk|n}WM8hEZ*jeTSrw?oI2^?X?acO~#goA2?Z8GCdlUIos(kmYg<3pFip^ zWnzL(g=sOeD$7jvHAL(kage(5aX=K6LN~3@bs!`jFgjI2gm^wublXpOdkH})I@VEm zMBHc-Q6y4MY?RY!AJ<9P8~Ho+3uOW~7};zXD?~5k#WP7H7CxlZMmWU4A?zG<`@OCd zp&v)YLO*G>{5!cp!CSbPF&OB9@u$Wz`^W zm$ry@DK6x~^a>zZL&+l$h>4F4zx2GP>>}h1GzA$6LmBJme;L1p!V3G6Pd!ssoh_JB%J-kj+WcF;Ca5oew|NdWx)+?TXP1-cTK7`FP7V<0zEPs zNFD7%5eAUzAoOOd>uPbaHz`fQj|(u^Hp?zeN0P9RMg#uX6X zZA^p6I!R_+^^=z6MS6I_jd7%KsP6;1n;0F+Nv-L3T>m9aAyEaYPzQN1w${@H1L@#m zSh=z8bp+{lvY8%?)npfK2MzV8bnM|Pcmk{<*0=R2w!uY|MhY*g0IdDI0d+gEvd{(Q zhKTxzh-iM;@tD$|F%HCpC6>{R(JqPGNjcLht?(e>Y27+%`VD7xjs7P>)kY}C0XlI# z$|5RS&~ON0T4Sz+9xV4LZ$sQd;0iDbZi9GIV;fX4<@(`Be|7|BRTBkV0pN+liYuyE z`9+T-CGQS;1l6Tpgh0u}_k4->%R$5P!mZ09_rSq9CVM;Z>39k(BOE#!G#gaaV01do zT$oV5SB!+W)X3MMym1yGlX! z1kG_$;O~rJFj&CGn2NZME+dPG6>^J$iPX_oj8F#0YH*`P?TPxCiXc86J6bKjJXj`` z%^O#ck}4M0R1XLVPFc@5No|U)UEngtJh(+jJMfg9*H5I(I$<~Ms#=m3b0(V`PQ2#P zT_>$n{IQ%3KZ^3QoyN|+7&%r$l^AB*nK)bMkp)?6NKOGe?m2ov?%sg#;)B>wNl$7( z{8FM5EnyOp(bNSd5(37?B8|fJ@r(IcxA@5bkxhpW{(TEDa&(}Z*q?>^aMj~5yilkJ zo700_t=BMVv?J;!Ze5{ z=EA9%h@beQUt;@&bKuo3{*VxXNn#t)4)Q0;QAwvS5}rrK4-=n)ZY67V9GyI|=-)j- z-gbz&64~~hB~potonhi5p|y%hL|&$?2ZA+b@)nbmTHo4bf9xZX)9cm{rIQy^N%VAl z$cM&Zp@T{X2gxXEM2gUa_OPwL;X?5ACl1v`makm?vM6!3_761-DzHpk^oM<2Xc$wgq+;!64kmulxnh;XQC zy+_R?`upHXif`VfxA=Z*9v`~OIuGwg#ob=<;C7EY<8A9Tx#LVTG$kSdz_fqU(Slwr zKlbqvEfvWn1GKWa#^J!I{6mThna^~y%meKBAa8K1g(Mu40X1+!5?>Krp zQFxVh0VJ9*mmo#bTHtT>t;rZ94@a-A)(hXS*ZShl5aoK{!RcP6(ENg*@52F)&`Kuh z4IBIz71JIitl|=zE>B3v$iXU_siU=mnArH0$6eH`=)-OTLjs9ZBoQ)LHPq8C6f3@r zK(^)ONf}$B!!1d-@37DwTXom`z@$8v2|b0(^KatPS-RN!LusbP3Dpe@j#WQ_ zH03-Avn~y*S%GYbR3-dnIU-*_k1^xc1!`hPzA#F04xsPR#qXrl=~4?~NThNpWV127kLV zu7}|-e2OcuS@?)OScS7BlSS{XRZ-`L1V^}kL|~;LX98$liFUR3_}N)X0Nav=x;@(MVKWu+;^+U4?6vN>VOBAU{kJ?((@a=06A zd6@ACTj&jC2viN)&a5ELHi?J!?)=Mi_tA!EGAc>Zu>0Lm3MGkxFTu@3V%NFf9|~)? zlZJX4_wA3*LuPDU%IN4!Vz2f=M>`9%h)zOe{k6itS~oxyf|oSaiHe@p%-QtgH$LQD zf-lClr=J?IsrJyZ%A5AK+lbIn0WZAbYJIV~(!837SCl6CqNzso0)D7>)nDPh8u8(pY3lQ2z z@|41#3%_eYlvHPN8Pg40g^&O^OUtvb)N8yU7fd=L&y&R;@W$qN#1js#ChR_qctd8H z4rTTeDzYEFQ3lFN{B;5q#!99>(0dqKj>4%sC(&nB$oc&QQ%LAAnieWi0jxnQ4%=vF zpnpr(zN1K777woJ=XrG!UTQlf$GXjS!sJ(2tmG=ae1j_IdRnSJvDXqIvG2sKptYfR z2t}?|aN*)6R36pJVmZtB&5dcdTlu+=e-ib) z9a;|KP|k(jKsr|1SotE?6FVE%o+rQd`JOe|8G>EvqSkow zV~ro)K4nrjq^ut)36$X~^V3DqFLUp4?y)Ul_S6n_R8JM&D@`)M9Si4iQir&JNoVNm z)poyi%*Xd5G7ep3sCZ14Dmmcx)d|d_W(A~3!aeQc@s#-LJ>h{drl=6?w0D-slGl4! zuanG(5pF$(pk+Fi;$N{v);m7A@ph!G^f0&{Y~F4{7mT69O^YWsQ(~=z0h<=CI=Ly| zH(<|_K?3%EWD%yqJ68ZP8fYAgMX?U9j5s;(oawA87*bo!9ugDl<6MGAxRZ;wCx<*A zqE$Vk598!t#RWtW<6bX!-I-uXIY!a!Zj`Puv&G7>Vk$X)QPvBzrAo}D+1P}dI-*GR zn}z3hr7PtqPT;vWbZv#cU+W!c4GYT{jtvqaUFFTw`%+V?>d5x$6{rh_jp(Gmh|%SrA0vOz*m~wm#QDH+Ivzz?ozhPc+fT8> zkRFeyl0CG`4HpsQ%%@H6EqNR~#O53K^ci1zLnV^%;rg?>1)++SbI}68!ro|V=)27< z^i;=JF|7D;T>Njq1xRRW(FHS82sEY8`46fE!OtUXbkY^bZvBWtTsLcjrl1u{KDMUi z7W35}*i;)|_RbkqWO)7*IJR55g)O6etu!Ye3AV|oyXmN#y?y0U6LD>-O=6i-=RTfZ zAtJmPk?~4AtUhm~jCvl0w{^1i5jaQ@I5|R6J=t>0g|%N!xyk1$Tp#ujKymhM83%Pv zx;2(|M#b?S`@KDK5iRxpf@@NYyt7eEJRCAR(qb+$#A>DAF_FO!`W>go?unVo*q?vI zm9i-XCoG7i9Q_=9B!3&-S{#RIEXNO3shX>-qVLSx-L*F`CekE{9+8X)?Z9yP>}n|Y zwwg&h-|ARtf*;@x&!Tl}I21P&A66!?^@C#arp+DyErdPM zg?4AE0AHm29XNZbp>p6ooW}ek;0R`>7ZFG&c-DUPSl@71HLr7 zHK517oe_!*=8}kYS5B=Lx~3${5wodFOShbjrA9J^!QCTj2!=kYUuHpYh0Wys>-1J~ z2R9m_Qs3gkV@q>UNeVYYoAn_|N28VIqw@zZzL~9$=%E?)((TDUb0aqM}tu)J9O)zypiAE%>cT;co#H<{=bviZ%v6bThoKPgAuTbSX5lPu?2D)JRA z0%uJlK3$C~Jn4}I&zc>ugZuZQM<`=@kF*JfmPowT ze`U3_{K3}6B@fXN(fl%l=e=MFYL_Et(s$jY&7}HPom8;TLKe7o;WYZLp@OfHL87g> zEGLcT`|)JznDoHd*L~Iew{8rDu!gV)vsW3M#vd4_h<0!Dp~gg_Bi7fX<(Is;7`_?g z@an-|R$i)qdB0JtR!Y&7h7@4k-YWo{MwKRh*@dX4%qjKJ^G1B5LHzfOTwJc{`DHgi-tVDI1Wke2F~P4gz)Sgrb72 z=7K8584B)6BwNKrcT$Z2(nAs44=lO6Ata2mHr>FC(U=Xh=Y zsnr=S*sopjpHEfJ>NjOQ(csJvipeOjt;fo9K<%EDF+SRlWU4cKIF~)`$aJ+=b({#% z%#AOaB;I<_y_XY@(^1~8q*1ZBcUq6O`$FBk;&!Oqh3G(6k6%U1Z3YM*xlnmcIQg@v zqXa;rEQX6lZBaJvMlT$zAbl#z#~<9aeO6ZVrk>xeE@;vmVJG^^o1P}-SOULMp>ZOy z5v%sXQoSdqUe5Hc?v8Ne_y+BIHdQWJbSTdyLY0U@a4LuBtyvcPVYQ!eqQ}p+B3Xx1 zy+!xEN1w;e*rpvPr|7%1n&w<|GjB)MJ1*6a?UVW?#hbjkyfKM`TKTERMY3ZfKT4&0 zl}XshS-E*K@>>0SDa4k|}S zXwA_9IGj-3*Dl9hUlb$Ja{<2DKl9GxY*9 z+E$M%u7$%LrW})pRk~PXcgB=YtE1=YaPJ@A$ z&O0kmdNs3N^px$Vie~eispyEV{W>aC&AsbqTfaH3_(T`>1%oow_n2~bs_`34?Dtu3 z_K6o+|2(|Wyt`~LV)j{%*Cd** zkWfT8r7hiijg#h|z&Gb}#Y;=7tORh!=9{A*1CO_ognp{0flEPc)7j?($_r+8?uX$u z7ovfhKl-R-5>vnmGI^!qy>U*gZ zBbwDz$`Mst%|_nTua-q=0=iN#Xrjlr%)OW99knmyC8$Zl7L-|uDK)RFuU;~m7jeML z)4jBFl_*E%3bU++;g6hTiL`p~A0=r_jMP_m^cMwi9M!Ypuiuk}V`4d5U}}GEoXHd9 zWLx-dkuFapn;BGeq33o3zQyEBAasUqX})Fs?1VTkr>V*6Y@D^pB$rmEuUgb;-DR73 zXpBY2sk7L+vZT=~>%2f+d@k)_Gk4+Iv>t1?3IXr%O^P8@Y!sWoVQec^ZL32`vo;Bf zgT<)ZfauX|KW=zA#q?}{E7+I#i}gNygaF*N?4A@Wv*A&hyV_(NRR;9=`$23FFmzD4 zEqet+UmOUwR%!iLwWN`D zD70l;8;@Kl?YKl^rS^QLf+^OFuGZVi{7TTaxSExSnsJLCuxE)ONzI(KEZAjlKf@hO ze%v>5jvBPp(8i%@>CtPZ2M?zep9)Zz?!j(pi+>~>*`i9Oha{zG+pxR_D9WTCuAX~M zxId-yBYwrhF13E7aKE2h6|juAnNZN}UOBA;6U0y)1@HPXG!l1IJEWZ7qq`rZSp&#V4RiwEEUmC@o+>lhi`SFcE%{u?F@rJ2%C2M2pB@##%oe zL)qc7cCvN4E!y;xRx)A}^L)QC^^Rk+4|T|ljFv8Z@LlDSCEXDiNWY@upgv*zCD`-X zq%k}aC-yVw#UD;zGnQo?X`vhlbC2vS`5WWD7g_%H9#9yM>enT$60eW*%)xeB7BkqN zgPC}btVG16XH6O{xWpJSJaKWilBEH4jrp@GFO(ruE?+IZhTps`#IdYt1u%dyRVe^^ ze@foP-`#U;EfqgP!&Xj}dV$t8FEv~;HGEy?N_>)JJ;EW3q`cdaA^hg*N$cmNA}Z4D zqrsxyQVDQ#?}`!T=c9`GnO4>I{59ED^w`9qY4%EmM~G{EW^1cE@(S_{^q4U2LPK{h z*bwxHPW>CZ$kjzD&H=?d(h|GliR~o)*aL^a3P95LT@$=}i=`DA^Yxwt8e@!z%L1d; zz^vj$Ugh1ZO@$-fGwyetuCe6V2uyWyOrMFNHdv5(|3h&u;Ef5hm>k=unBZB^kaKl9 zeOqgE8;LUki?holI^mDyjf^*duYpgSKN5|)CqnkpFUTtb{7-c&rjTYa?Q^p}xSso} zX@i}B`M=)?@tjDvetlq^(qXGC9)tQ;expkgu72r!M&6F@;HIEb8Db?fz2AhH8 zj~Mi??C}&wE&2-3SwmL(%?z*!OSG@p2t41OfRz<2kGN2T+x7RraYttwQ{qPGh-cRg zweGz0wZ)U->M4bt`ukQqL=jXHBjkwD9tbQx$k;%(lEj#?HXG%3BGUpyVCMIbIiE07 zz3pi`vuj1`GKt6LdEb%r0OBLnX#e5J_O~_5e~*Ix#rkLdlfI9~@)zl!$)#9Y>~zS+jLbyFVR5c^a_)nk8!6~th*!?ylu_1xxFq_J z_*{y7--rYoeo928lxMdsU?mVCZD7cY+x-lkfJGzufN#!=vY4=7ztOT%*x7CsV;l!IEPnnoSJ1r!0!=0=_UK{ zZSAO*(V=a7@pi7~+HhIhu;^I7Qh~1ZAC8|Z$3s;Wpr|yw7Fb2oLT1?3MOF=;XfA&e zfDZhr;Z|roEGH+}DQ-}E==;kZA%*2J`_WL~KcetLWpuC8pdxAQKdnJ>+*P7sSukkh zl#nz?&+bMYk;vf_2syh-G~Hid{(8+HCm$Y_tchaQ8IEw?aZe{111W7;3Gl&C9gkBUz!6;0;c2h5(A6Q4n+t{^`pdtK>}|t2PMR1A%)a=T%KP9 znmEvudeliEW=bk`O*X>%u?CVp9xw^vihu-Uym`KCsk6ifBm3%=26QMu#%i-Y;K~B< zdu>n^if$0KaBU4M+Hd;ER1nfd>FV8;y3!xO84do~tsqo;{k#tu;Xsz_hO>-FCkiOm zskZQQ$qZsV`HSs>g#yalQaq(N%-5fV|GYn1r6=!x{F1=~|q05v&t1KLV<6BAajWA}b#3gzE}#1{}oUyY42 zvgUv=!zu^V%;m_FI$!czA+w-(bZKaXVb%i{0w&ExVBDAB3nag`O9}aCzp^}GGln5r zA2Uj_t!N7lLU|VxFr1aal=zl9TJ63h1g}xCPT9c{1;e zd8N8PX69fR%J-;=?S|}EC5}=EAlzAx85Kv<3%kfrsX^RkkHDD0U71N`dTZcg7GVR5 zVbSZV`$GFbfdwe+yhFm;Deulp%z+`W?!mGtxu+CcBy)tBeb{>^w&Tsi3C;wzJq12O zGbV_`47f_g_IUax+b9W#y#ReirF&z&FirY+6&-?L0-v2dd5m*OX&-< zj!{G>IGjej!%u=F4BQ_5MlVzi<=>oCGRIA}X-&SOoMv~=j9_P=sF87vD$?g8om&S| zg{_*32=|-|J`{O(uFzr@lxVq5@S1+-_r4?UZt*`w_QeB2jOoGe;i_r;v}JsIqE=6u zO~o|k|1zIl>xCkNj!H6l_(n3OHih|)@arF5Q4hWn?Z8T1&x!6E$!!YhFz;RDaL2=7#c&x{w{oZ*FkHKnEVC-KCdGOHVe5?^FbFry_4$PWqw$k zQcc3yUAok+yF+IS?*4rkYt+3#&cZ3UDsT4AWaYZ`two(`mr3aZ3hxgdAJqq!vTP6V z*g==T<8U(DVMs7v^BjDpK_P^z7ESU9u0o9V{V|<(o!VN|GV6KOSiCxPCollI#RveMJ2}fctpw)#Jbz%GxbZwm5o|e<9^{eO z5Sv67-8;CYgkP$9!8~1?OU9?NK7kQu!dQj(c`aFlQx3Up4V>tq*#NQN0035eTVJdm z_DDrP@-1y@SP{3m21R!$Q_*~uF3D!4ZW9EMdjlJgsB#SSBAS9$@Mb3D?IB$WX9>lnb89*qf8}J2t{leE==nh8rzT&!YCot^Qi;$ zTK8luEMy$a)(vp;oHKJ9&Z;kS>&VPz4f*N8-4$?#mcRp(N8Y+_&BOluGSSqQ6)Voo z5M1C>(+9BhMCJi45Rdj|{)c+Io0K!B^cY>F#h7vx!qCi5f_iXyN_T77aM^t^34~T} zdWEx@a0+Efs2ZWv*qbI+cAC0!eRVMFs(Eo)V$^C0mP|C2nD53d_xdP!WH!8bL-L_} zct70$OhYWHdswMC_Yw!y#!vXni>?EC~sN8BGSTv(q^ z0l}DYdP7APICVDmw^uX^!P(hJ$doq$Zwwq+GLPZc3lWBXKuO$lMgmw99!@;95~`?& zaax7{v}}Vm2mh^k6nhT$>0p|t;brzgHxvU-cWU~DW8*DEq)QJ?XL|SOqiAmtIxW08 zv_Q5q=RpzY(NAQnHu(O9)~qj4;%o=|(ZpFe8iTj8>T#_|m}V!rc54}@DVrGfqoP~= z^H+R2`0T{_!%F7rl0|(~E|3vxb#Xg9Pv-8d{I>ai$@Cou^Q?Tp#@SsKR*cp);VV*y z=G>KhJ9`<}i;G-<%x0x#aU{i*C&~SkCx*TFlxLL=^ZRKOu=G0KrDw-J+nW9xS0}<6 zw0)zu-+Yj3)ix;oql zcHGEsqFWg9h_e_}vcbZ+5I40Z9+L%J^PyQSB~af|5Ot;s zK^uBaz>md5fQX8HxpP?KO!-7saD@U*wjt0)dM2bFS}w8_o0jsFaLd}9MsOQN2GC+3 zhEB<`@3Qj4`1j2Vu|X=Q!^O&nvCCeMn%>Vc%BpFLuNJDtc`{|JH?2%!gmV#nDRV_v z6Sc=v;6y5=ue~W&!H6Fp^}Z(FF!qS`NcRkqdEc#Q7_X0-5h)<67)Cc!wzC>Rt_XP? zH9Q`WfD6VXLQ>{OLV8tJ^yAp8y4emR)~RtTOUozEV#3Tcmpp%lv5=JAQfh>9Z-f2$ zTj!fh3zzR2K|m9M6QT5|;@tp=g7s2f=ynR&aE}F&+~Qq6-uaKu25v?H{bFzjdqqFH8yjpT^Sv2&aVYuTlTX7y9$j{~tVhzM4_oB}&-Ub`Wy##8|fX(3F|#Fch`{poWo-VVC<$Q8i6*YkL2ft`6z$ z8*4Rdmo-m%=8xUXIRd57X*J#+ftB{BEz;$qi7_~5O_>R=k5=2aT=s>zX(fa1AX?q% z)DjT{5rtDK6#a^Np>VoBUX2K4*7AFGF@zo6(2jaJ(HKF4?}_23{j4(Sd?dNOfqX&q zBROGo5{?L`Oep+Rg6O6KqjbUV3nU@RWQuB{!SowLeapH7x#Kl{qrGgCpx#b2C+4pB za6gc#yRb#kxwFk87v|^!mFwt+WZ{SOgUXkARN50^lzSY6WBX-sia1dDQBB;b#wo>1 zpw$fgH6dm5k<$+K2Whw6&m*~^sFh0a%zo4=V)x2kh{TOGg!BtNfVh3t z7?i8_`9`1yA6JoB>T|NSc#S7poH{N3MfVhiYZqQ6difg@)OM9rQ91klI0UPmhi^8S z5}!zazEW%qLYH@k?w1HYYFH#OwlfdTj`LuYT&=94Gw3jDDNX^-UfQc*-AwJPJ$3E*nG8bEamaX-IMVp0 zv2=(=S=?z7IV!Ypy0Y9UZo0w2fBs5+b)Vh~HG&a?0;JNrv_5(3apEJM{a|MzD*r|o++YB*CrO>V; zA1i>`R`2eIvPZ;BDndy0iVJ6`(!I1INWtlMvoRSrr=8Rp=%Jm=1Xcqfit_^zVNgjv z>i{7%D_|Xo;eZmiEl1uYlTXNykI1=A-^EZ;F2CMGU+s^tGxhXcs^hdOyY-IU_043! znxVi}$5NpeqGstU78;C#t#GMB3Mo#bM2ubmZ0-|$`ZC=Si~j)T9m*F*l^UNdr_RJ9 zLcXO<66^U=4BJjJ(Y!%N;tr!14+mM)AZk)qE}jTsYLZAsNGOMFs8s{s@33vcTBJ(0 zypBz5$k|y;r>!G@7VOmOk!fPJPigq*x!xDyibiz+lkX~YS?Xh4-;_F^0|-?rX9ov8>p@2H?dT2uz3scZe+NP|dB zd@3Gy?mdXHqZeW=ucG;oB;$^p zh@rtCR}&H?+o>`u(VMbmAPoKcH8jd<*97Rr)7r$?1u6v6+6UNx?BL9ZKN0vso|$MS zaJMaaAhwu(nOCC{iWM%)&&!SrPdd3B=^wa0yd8@_qc3>TYj5S4Tpspx3+n-`_J6NyWI7@OOp!kEN02rXAoy{bE zaTXeY6lOH`M`(Nd{rkndWhF9>auLz{mBnnPAhmM9{QTe-N7IJYRSt}(ixt{!$0nEb z4;OZ&FN!z4HeR={m(M=^2 zxFyZ``qQ}r{AR9jWx)y%IYzi~INzX{VL39;!x<+H6|pc&j!5)WSRoa5W6Y#4At}Vf zxjkSpm=$SHdMPEW-sI8hexD-1Ax_2rnyFN(*8DK240AkK8ManC_Y_gW+}BQ zC~-!FOhrO;$~!uj+AG^9jbM{Z3RK9`*LmN#q*rnL@H;j!JkulNG!q>o6|7vWCI9$17+XHk*wZJh9o?=a#}?w*mX$}S zV|SNM&G%h!W0BZ8)-6BVUghma_UX;w`|7>~I99T<7V1xxc3d_wk9XxwZhSfS1YzMx0+Ao8S+RSf7et?hN6r4XnNtD0oYY+V@R!YJIh>JLjn_p8uP|v~SNEsP9 zZLA(UVVK{8Zho2f`N?O~08MkA&g;p+bFwODBFy4C>xtEUcpN1q zFsyBDmZ!Pc^nIrQ!dZ~m@+|0$f*eMCu9W3-3BMAWwOsnrBNDobL29TFSw+n zv(p$h=TCF~{MsW8tn*u;nUx}ghtoYi(@#G0?&y#x$&A2u%}&^2SZ#g)qdE=dSFKKH}gD z1g&55qPInWh5OsGW6scvCsYuBIqao?XraSb|Ah7sAwzAa?!|wMT1W3zTztHSr$X$( z$-^o$WL$Ymj5bT9;i13S-qf$K7kApnXKI65Vpf`-0%ahh5oYQ+BO~@+`SS zhaLRsz?LRZRyh>?3C%xmMvwx5xXqe)^_91X1C5^d$O(lTuzhWsVqi+z^<@uJkv`@e z(!2r&^R@r`ZvN6NquXvit%c)FmS$AY@wa2nQI#*;CzXV8mTyN^-yz?iK~9B!#L=TG zP|q1&OLa~VMrjLQtU1201p5l*Ici4LoR%tb>nKapMpT;bwMoutfT4Fq41Tn@8i@9S zNj|2k-OoYdp=nQdSI(m5o>53lqBX48aU{i;jLNJpAdhg~qE)iqe-L%5kWuWG2$l2Z zmZ_oSib!UoP?p72*~UAT>QhOrhU-d9;+Br!ZU+_pAoQ!Ng$QjbzuIs_Ce_+cJ1WTx zOeMJqol$g)XZ-c1fm67%M&b(uxmfc+p|9>Ph7WSp3A}#%xnocUYO$C8TUCB(7H!uUIwimD1bQUd z-Yf=E#7svRPU0xlg6+Cg!wXdss4v#MQ3uz7VcVreN%}NV@`rX_@qma=0X}}G=^MNS zCBuhR>!Ej_mCxNfVwo*32cPdG*85Ho#o5%t>b_6@l)@)QA!l{k+=*bXN|)5a#bj5n zV+{=eT6sJ)B-<$P3IYhXRJ6Zpq`yvyrV$@Qt)fZM zI+s-88M1W0C99s~AWl+S`~}K#S^P5d>7%krk}N--8Px)7ODoD9#tSkyg;kqCt&tot z_Z?y9;p=ckTFIN<%WRyAfLGkT%0X`E{v?3kN>p0n`ZW7;Hq?1b+;DhxW_5*x+CDVV z0At>9AX@WHi&F%{^s#E0yrNH?buvizeRaqu^>P-sb$H?@Z)Rhq?2iS{)g+%!^6HJH zJ}*UHuJ4`jq9YglYn9ZEOQ%VmP)Q!p1TGJuec`=P7SwAPai!~_eY&T)aAlwGpyqB} z#cA1dI_h2wrQnqSG|dX$Iio}cn}kpOi_WQQwpk?4MV7}A^CldW1)(>StX>5tBedE+ z5o(5OClHf-d2PiKsSeO=&(PT5EY|GH1Z7`I`q)v#3bPYY;-h*ZQZ>VP_=4rrE^nmzd|Gql}pSdNGZiis?tw@&P|ObiF%%h znwdBod|^b1DUA&vtm{oAru=N>N1GL3XzF&nV!(C#cJ5*0yh|`%&%5J(HQy&+P9-Ah zzeae=QlH7j5^Na$lLQF6vh0}gPFc~x($}N%VUmdncId?0_6ng-PsZI{h?$Xl{s*~r zad1}eTKm2yrzv97Q#MN28YM|NdUykasJ}_NKiEK3El>-%ddA0eb^w2Vi|NKu`LJgO z|9pzvD3%EIk!H^WRWrAK{!em|t)yhfO6&oaoln?+BC`G_o|)5`l^N_VQ_puBXj7h1 z2GeUpbB}gq9VcJ?PLu;y>wQ8f3n@0mzO7Kj6EXktkee{GG#gV~mImw6@-(a)&m)HH zjr+zD7ws;I*~CS5D#=V8iOei+i;f*%Y=g35wwxH$yRt z(%ExBgV5d;Sv3S64i+BU)_Tevm~RZ}E!D zL%HT%gMz#evX@_XKArToy@cp{;bBg_tvWzkFVtd1`_>k(a3a^UX@O^4&@U=&0{TXt zIZmv~qVkqcrk(m^c^xQf58z@Oz=;>|qdL~C&9EDrzKaebAK0)qx0-21@##fhZP}9^ zE4H6&$p?YONM?`dEopHj?vbWXia6r1J}N8TzU(W(vq*t*1vcXqQIHQxaX z4ttm21!LuhWShFo(M)#O^yl`Zyo6tv8(zVK{ny+`vAvhk!m;LX7mm z-Imy1nJMFt?{6#pQx9ekEiEYNc!c$yyXwD3;vxK`JET}ofrAPv5Dt*UgU47j{kGt{`LreEAU%^ z-wOOz;I{(575MMxMd<%n3H`5~^?wVD{I3=sdM2hn@*@9LT!bmA_De4vjL`Ri?=fkO z@C6i-xtTE(aj^741HcNxW%Gi(xR`+#@0Xs9l{rA+J{Rb%llSK0E7kTS*o}nqCBAKr zhpl(V>~$RZqV_dxg0D5*@Lzx|1M@IvtYPfv*P8d@7IC)Vj6JDg# znKoZVzP|-#D)M3J#uC2Ue*YC^Ft}l0_@^_~-$+USk!ARE5%zy3%kWoy%fHMr{Od*P ze?RH5GSL5h$+cZ$C2DOL#cN4*UJz%v+o+y}2JEoM{>WHo@3)^FB4RUA#+aC^pPaT*WWpJXtI>+WOR5M z4D8*J#Zm}G$(x{r;_8&llkI}Csw$wtR-5OJtoD{P zFrZYH>fwUsLREkyGk`0Cx>IOmn@gfL#Yt=O<2{d?6u9No&-XrrLpF{&qySjv76y4F zqEPsQ;*Y4MV(5REVBs!{WAvAj?~EBB<8lNpFh1f$A~sVdy9fzRWdMQ8R9_D(1WpgT z1!(t>?;QlcOOfvYL3+~>wmT13FEjQhO;p6M>rx=MjvTX%c}5e^HaWS5edWsqRM9}$b2+AX|0v0_0i)$pnk#4)Ez zpLLW5d?8d9YC3GipjCAJf)YG13m*#|1S}7*O0RN?A4G5{+vJB@Q4l_139zYE_d0Bizw9qg7Zdixxwvw=*6Qbr|oGw_^3fGtx3AW8e8 zd~YSCYbUfl+Wa)q(n&e*Y0u$ARV#0gUT$K4R)iF+!&|!ZUJ%=<`1)MDX&MksB1xHr zKyo@|2}S;4a04RvSb#saU-3Fegx`Ib`Y6R=L8kU*bWq`K&Jcdx1A`32h@ht0)@S!! zD!`;A!Ajr~F^Y#HmkAZUgsLb{8a53=>E#(QkFXyQfFB3U>`O_KYYe27u;A^?<=UW? z-xCkSrvA14+KL7t?M-lQB#vZ*<|cM$tqx|b$7S4C;Hj4S25ZhTsC1OcBZc=)A9rqq zI%EN4*XN#AFi0m%V`NQtC=JQF(d>fi4Dwj;1jP*wM+{8*GL5j^2oXRWh`h3tvuoGq zlQ4>o8y1mQzPl1g1RBGJ7MS8lIHu?>CPP`bJ>Ii%?#WPzWOM>CPkNu-ZDk|R@JPyR z&Slb6?{Wz}O$*}%@g_cc$%~wCuO2~4Z`DdLD2cVB_N#u0KnOh&5@T!@NyqNKLSO8$ zdrCcn%_zC z$lGzT@>ai3357;~m(0&Ep7k_6?mm~up?D4eF#@sM+%+t(vRofW2(;36CSW?-z&FKK zTQJH&siAl3xT@v|69(@H)nDtCBM6Y0ciThdKk_l3)GAmdSv3acn?yR?HXjLkG>}5G zb?OVu+M!Mzi;ntX>}yp|2SbwEgmIJQHK$s2+S94`3Z#@9dWtwS_zcsNV%-{6sOa)- zmnb(GYRzDO?407ka_u~dEPDt8MCieDuUf5vzKWGDe=$3cNb)D^UZ&ILboV|nk61IV*$SK{#|Ap|6 z#n*LdX@_(S$pvD}<@$Qh>)al8^luPvy+#SKPl4FmY{I-_b<_PubogN1-*8>4b0Z=q zvvJiWEKXyZ$8SqYj%{_)m`oer@(YtNHa_ecK;S7G&tqi!)xLR`9NFi^)P>v*(OXzH zSbn{8aVCB}J5-s!sMM$>n@7{+a5-uz2>#U_LUaMc8fh$Y(@ugL`U}j^fFJ71IZT=t z2C+`&bSpOg$b}1anC6ghhkz~vc{6AN++K&lFmMW!sd;ZjZLn!Kf#FntU@{ZI>YROa z(OCNS@VF*eyc|7yq6|WL$9L!?0dIEKo+wVwDY12UX3}vnuy!&ReKnc=)2=b~VaXH# zYPwT>H^-29Mv2fpcL%pqq9yb{PR3w1<Bax z#3HnbbB$|1?6xIunTo_?>E0^RyRoD9T966Jv9T$X#&mn1r}-=QJJ7KK<&Vr|XIMji z1Rd?Lg;v)+o4rmZ=U=Ae9UT3#{z)`Fw#~>MUsdJxs#kQbb?nd1+~Vj>o^Ko>S zJv@gbsjL+ub!Mt<)z{7r%1f^3G)qSGmUp!~kM8)`U9PZ(&;m*4sE-sIIx#U;I@ys6bRe=Z>5lB}0nH*=9dHWOlS@Sm+_RX!`G#3orEr2JDrM#l zf+nawuiVWIj;4H{0x;#C8fzB8yMBf9v7EtcfaE$wzQNE*W zY*Pcq`Pa?qIJc5OEu>tL6$;S=XH4@m?P;dzQRx;f>hsdu(zZ;T3>aLnc;k65#lfQ& zBPgCKn%~t|m>t+FPT&%l;hAK zP~d8Ci0C?S@k33QV|Gm-2a2WFz3mMLVs1fOne~7#1GkZ=ssrIFo5us8`JEz8hzRrM zj}W%=>+MY2(XQ96db`Kn*~j3tS(6~+IQ;VghWdk#pP!8%EewV^EZIOy@jq)AgKGsuk^)>;>Ma1rtiD^Dz3=Jno_*2{SvFnzd}m4d-~!BM$@ z9%s_qG8A&7S@&S0T0v&NU&^|B)oT;XbL^zMqF@If9BJ40J;9a;cWiE26qF>)B^KsE zuLHVirEyw!wc*WL3-6>i>8gZ3&Yeiah<>fFsvLlJeiTqFPTLti=3Jm{Ac{88`4#*$Te9Kue1oYSXkL2X>@1I_%;t>9o8q!&tl0O3n&V z$shpD3RfSLcfgLz|6r|mPgCmzZg2R0ZOo<=;L)Z^qa^0aVmQNHG<(H=zo%}$wuw(H zK>Z13N5tH1SbM$j>Z_Ls|G2ghw!sG!DFi7{qi8u}zp8>orz%f~=Y!hG| z!%%o!PXzMSP7ZtIH*atz?5a+j2M?Ok{7RiJ}6`mb7-4VA$tdoTnX5wrFOQ!(K7i z`2wv16)BrGuw_4ytnkO;eQR2lQ1^t3;f4tf1KRj+tD?b1H%LfoPCfZ2`^Pg2j%-Q` zrb|ANfssl5ckVvG$fD{vjr?YdVf5AyN>X;zz6*Qg73k|Cn<&&rxV(=d^-ZWVmRAp< zr0)jO>+3?=jYgkTB~_}=L{?v`+K4ja$i#pr69>D#Zdd1b+Or?isKVLGJiAfbkB``H zon5uDd_M4L2)csbPHqm6v$YDeAa;E-W{Bg;XfLF_^)&3&)L||+2{-Zcrm2|$IwJHW z(0cfx6tMTah>PZ#*t4=P#NI{xbbP;xU{ITxU1hU+;W6;D^<+l)XjErvr7iMgYltr; zV08bJVS6A#a2J!A1LfPKh-LzJ!!7Qv{B{M0Yv!#J$4m9EPSb@^OW=PxQvZ!)_3wvQ zI`-f2VW;2l;otCKLwzT6TN_$CBa=Vb#=pLD$T`>=IU5=~a1-zw*%}z*$q9+zNtqis z=sURMvD45I@ceQ6lIs`6*vZ_<%J|Qpey_Fv4;hEQ?+pJ|;I{(575J^dZv}oU@c+L8 zT)b{}`i7RqPI$&PM%<*Xq`W*(P*DGL$oSV}*#B%(vi~cak}8V+OCKJLaP=;xPp&x{ z0j>Fn5;=vHGI#=mQkjGh*Zy;0!o^EhH7zD6Vg%SZF^oUj9I0_r5b>fRr@x3glq-S# zhPwk+bJ~bL{c?n~E={g5pL0XtQ<>Ywvr@CA*4MW|FnH;;6h{rM{ooW@mFMQWdJ)U< z_b5&Du1IaKJ2IpNJO?+>FW1>5uaI;A9LG%Z|MWNijTiPGUgcj9-Tw(5FWX82fglPZ}K2j`jj}d!rUZI9#Ka@ubikOfO z^rf_=#P`@rKMFqvxi$XKQopW&H!gI@r9#)5*e+HA504hbpCQFcLNuUD8i+7`jkx4 zoaB^~(iMy*(dZW#;#Z-_>k(g<;!9OhnQEfOm?{k!_bD^Q!!Z*k8fZYhwaCp~-{nkX z!lGo))@MI^+1G6tfXkOF`lpo6#&;?EEeG<9iYOK-r-;2r$JwZhSn3N=KgD8OppsJb zO1I5zd>Y27aJ#*SgCa&D6UoUmWP9h0uN7Hi_Zdi?RLn_ph|p^T5QZ5Z_AT;8oWt&E zp;IKjqK}h70ZZ;2N{tF2`;;5|=k)lZKWaL=7A*%gbX# zInU2Y=o1$srw*VvR-Ad!!enTUwOWOO>M6Cvxjb4#e|76A#*jB-m&u*s?hRN2^E1N|wBr-{uFE)}9 zy_KL!AObMf1A;i&N+x(}M9E0i}GWh0tthi|l;LdK3-5=QaMOc;}JjPjLquBoye(eP|1Yhs9o z>$<0-hEXd+9}Gl{|D4nW;3AR@QtxK{apc{VBvM8Vyi+g;XUIx@EgA9rAxZDeL2~RBN~tvBb+IfFu`y{( z5B2%K0KGDkUq ziZmGm#{MO>%WEwf_^$A(!=NTPJ;rzvZcv=?+cogP?r38bdYvJPLdj)n?Xv^mhjf1E zMRF+#@nLM$2Dg!fDLA7(_GY6yr^Q2<#aXeNZ`U(ELh>;Xh~P~p{BOC*m49woC;YKxeb}5Fe1n>G{m{;iTL!qXyUjd1pkTg(9zao8 zim|NEpb%1&Ly49+h1dSeqy)AOl8y5K-yF)%r%8D(7bVCiC_p}?t)kNlUI5XF_gIqCWJK#%`wQw*(&qS1N+v8WkRS4y8U2t2&h`cnF$8a@WNhoyc!gAR z{$u2=6q^@BfPA}p$cB^AFt!Ky>=Oqwss3#%FGd>G!=titS<@bVbuGLjE7>AJ^lh0l zFDNm(SV8b#(i7NenrE7w_N2$tI)iQycy9>kXevYG|5&iDgNe^!dtG27dRs8$%R;;4 z%@uh&%FXRyO;Nyt&Bx!kiuJfkPpsDh+;WTQKS);GoL#s{Av z!zdBIk!1Su#?yRQKhnZy-lR?f#{&~o@ru+ zecF|8`>JTBM!dVj_-jEhFF$MCpC1{wN9*hENbXt_IeJ$RZQP3ut^WAJY?xmdJ5TAgL(KiX~mrmD};(KYKXEL{*}xtQsCsC-Xg>z^J~< zG~qoeYAx6(Bw^cZQVo2TAv~DO812k$WajW<^W~c0uQtcJyr6~Gkqr_Rey(gwM4_Pc zCV-gp@#9cTfho@xmspNCV;0#e=0T|W-eiX`NmwjV4=B8me(V08{Y`kWCPp0fQ}er; z?9)XV!rIHH1?iy|A(*cm) z!7%V-0tH{Fk2U1z4vN&LaMY^(Gnc!C#W`Q8{&z9(U9J`8NVEd{3<+GBnj&njA&2Pq zzLQ1ySpkY*><^3l;}5+XsfGdS*NC1{X<_SXYWR%UBS-A`7qL4%H%TsmWflOVZTdm~ zDdF3&b`TvyGa-*8gD48bSREK}@8Q<99lnU3S*QPpy|avobX~JH?(WvOySq!Hjk~)w z4oxGCySq!{H16*1?(XjH@X=?VGqWdiX3xx*lgu9ylBzd>r>a&U`{4wUZ5GJyYGHG(mmn~rs7RzPWa*ZyX--bVjAsY& z1!wlLCYrV(mp_IXcb0XzGM~!N1(`)GyE6xsKH?ntM44+uY|m?*m*X&~9lQ667=3uaBS=SP`r5IB^SsWb3RN zy{5Oq+l?Hn4e@3fvl}>k((IlV9hK6eiUUy`5IaTm0x=-0MqvG;*8=_pts0bFhMPSe z@PtE#YwS#*a*mc9O}8R)tb-6C=dBqJ(MCcMj6B3~pvQTw1-d+NsKbwnbvqdQbKbX< zd&sU^t9re_Uzi6j&bm~U%htdwulR1$4XPW?2nO)*RvM4B46+Nd-2c? zxj}dLa2zy#NNny#OvODZ2pxC4$d@t_y;KSzX~J6e*^iS$=Gmp*^A(5t@H3`v<#)|C|1wc&eU|6mtQXM=y+lISzl<$0dx}*BJh3&>VcQ(FPTQs zrMvNnYdmtIUHgLm;&l5wMH`H5)kGd4 z7VhN4()qgNbO@(42@&3Q%fsHqstso;YwS+iZ%PMIyY$rsSI&qQ-n!{h~w2d|Jtx-&s0*r=h^u zKqf{xZJl#faPt9>NEv#!ms?fNZgzT^*Rxp0`e@zB>!5)symuaW8j8Wp^&ULiruX^w z=4lc&SQsnQeJ+4~=kB%$G}znEl@irhSZ&9;}ZwhrXjHbuJU{L{dzAlJRD;;mmU*5ndw3IUojY%ck|-(>r_Y;4bN%bd9a{mr(I~x2)cnh zp+d9=tm_DE<(`f<98X?D?rTzCU8-6+ujj)GXY2HiXtB67g9ul%dq?`pd~H*(Z)OD6 zR6ex?7I-lXkgNa}IRwf%WgDWxdf%?p>Bt^dPiA(ssqM?c=q_R*_JR_$4Pc;rN2fY0 zqwQDTbMma=&VVAZwh zG%}j@?X@7~X9ez?nuCFZ74)hQ@G%dUhhtEhA6FxNsE`2tSUv>Y=RKR7->$4ePqwqa z-}96`j_A2o{M2zAc~3|_Y0O%IGrmpNp5%$b41X_s=bZp7gtYl%oxdd>0vG+P)NAH? z^Da6YBmLb$?XH)9^UsFLKb3j?V|@E?>zu7V+&Z=ox9-EO7n<^GEUw5Vezw>;jSnnhM9&wR}RpL^o^Fu*7kg(G17m88VaUm&mBF*`O zq-+*qmw>RdWq(?aAN}fk_>msYm!5k%kEpr;jMJ=c|75yi{8MGizgc#Se`EsxC6?VE zG|s=W?Edy@|F!o2nE5!E|3g)Bs`|TS_c_{aPbJoX2*>lYr!4w%P8^}37I$(5w_V7p zo2)fGIf7yrkN_5l#^vIb2_wuQp! z*aXNVS==E*g4Z|4ZD&DN@qx{Xwpk*iW+jTv-k=n`!?-`*&>~vMqT-xkpoEQxE7N(L zT1((k8nL^5=Em$UR<2B{ej2xNuBweTPcbTg-d@+eoHr!8d&{?I8^zU}KCv?`Ju+Ex zTw+Uc*-w$U)_quC;6~ z7-B`VuUZh?mx7ayZ&`u%6zaUCF`>69+~5z)(wXtpwC91<;Y`h-9wmthp-KbZBthNT zfm50xjf}z@0EyXpex4`IRglL%hPuqW3v;mUdrxu< z$7xd3R{K5)u6Xe}7GwlJYAO=1``{P~5^3Z*I7_E`{_`b~P-OQ>Fmqa~Y_Xw!&`}gP zlMwgSaV)_y43zg0b|PG-C%~KZH)A1JECL%mP2zx0liP~GLDWHjgnn8S6q0;^YQ_h7 z$2lZHP;6AzQUrhFYoaZLQ4lf@M5M^%lg=7(j@khG>Y+&DlF?IT2{!8OKrAMa$xt?o zAW;;cqm5C#4sWmKOw^Ot06?Ii{HzdM-(U-%l_^XLlB~~7$s4zlHl_f0M{Mf)6A2Kg z=M~ow6Zy5H+ulG-klUwju?Qn^&^-4=WU3(GCbawO_5$d<`>NV++>($QM4qss_JU9Z zx|Lf{QaknXy^Z*z1;fi21O?*qTwI0F=EB9fvf=*9>$SaK)-6GgXsl^Tikk%<>X2t;Q@z%IidPVpr^T zKka2?g~4=(x|1ilQdwMP3_zQ-h5939DAB9K=Y9G+Wtd#qws;VwZzyok8$#!jBOPgE z!v#RYanFFXp&Cr|=^;DEH~$5r4RdBW_7bKC{3JkAZQwAUY*>NbX$mCeMH|permK#_hmzl)ZB2@|`ivj>zVu$J6nh}u?f8<_59=@2JtFmM3E+a$Z$yO7y8y4jcvFT32G5CcrO*Sv>dH^#E?oWo>u%{EgXUd=DB%>teBEKU1Z? z+4V{yrzSltr8*g*`j!^ac22eF-0LPz;}t8_sz2VNQlcH*0ZBmf=b*h zAvwPG>vfP`PA}^~3!qG}cfe+>IU(U%pi`_Un`uv#Wu2AsF?_NT9?S@1j`)!pJ0FpX zAYe%t1@(lXqDN$md^t;%MmBZW7l{QD%!R6o)T_+)hMBwd+XiuR!nR#U&n=7_>LEew zY&ky+fU~rT^6}%3pJu1}NtS|1S9?)_{1MBW$sOGxX?U$45pr74T;O~m>r|GbPc!MKB zWSk=4iw2t*Y#X2`Qojb5Qmd9CSjbB2Sc`pCvw_X+ApQoC0jC^4)wpIgns^pJBU)#V zFl5zZ{^JHrwNkU>(dBF!cJO8|6MljUy`vKAFb;shxH>bC19ywd<{K#qr5RcC1hSUS zjIU&5(lnp~(&9nJ=>aLFlbH3?`f&VoN5!l2dK!e*qN#M!;<*ce%@-e6J)}Tr@~uS| zf0arAdvz1Hig!Tufn$I`B<=Ab{wa^{Yw+WIsCBli;|A-Qw}0Jnsd9@GH7zh%zI}8o zv<2LQF5h*4xvu8mx?ELi&ebYke$PXAzgc&)XDra ztj5^%?$!q#stZTALOOloDM$&>$XF6vU(%!O%hO191QxR>;#L^CF45`;-ZgxaUka#N zM1l&;3JuZ|A^>wy&GaBYi5(+Ghcv90b);F3vi1 z{wG+k1Pl|!e> zOQTWMODAW%hOt|gPXQ(yHdNFZH6jd_RCR&a`wWt7N635r;e*%QP^$!rtT~Pm3fwb21|~J>Q!Pmp-k8u>>Wv4%F~MTXR@iA?OAAuOH1s z4)0HS(HHPGNS(K07Fw+z?#-r<18iMrZp!7J`@yS+G1|@d78vyeJJ zyKbL^A9+jy3oirPf@?>(<>B%$bKD%a+*qjAHx8-vtUy*CEP_Se_JBB>HVPE=*hoO_ z)R5i4VbvlDjOd_2ATUQa*473R%STX31I-P-aM7%V9zte(O3Wpj=V{EWFu45$c%|j} z$dzUPQji+`R%{cV#GeBv7R!w^7rXiJaAII<2}p%>*}DIFiLRoU&C+;67$IG~>z53{ zwsDtFADDX?H~0miQnM{wSpDM8Fhbbids$5%p)bwroY{bOk`p1X7RwAO zRx)o+`Ym#0(#5Bd$yzUo9uhYfTuBg1Y9II|T(NS3#Q?WYAq^sxMfCDsMp_=LHInD* zi078IZYR)yqLFL=#xY|RnRdsua2w55oQaU?) z|W7Te=}p`j=?(zV{1l`HMAjz4ifdg^p9&!(C4 z-&74geYD(>IXMe)fgD@K^@rEEG}hrp-wTaMPHEh{U+81EE{k_tn=W@YTvA*nsN$gw zmb}FiS(`8{zo5%^#QxNe``uq?ICZk&DKO>X@Z~d~Yc^(?1rs;tuQM9U2rHHab^3BH zK7u3)OLX?^G;-`dy9`I)F6|RM*~GKsQV&gEC~};7$`0jKGOf}wj0KHY>u;I|)@7z` z9_rqZ;_+Zepom!K#LkI}=r{MBCt}agTrGH%kLg~@x&cR;%|DC>#6_hG17>GlrDkX) zd6ZA~0H5$!Ly8Ys1P;abu^$Zv-l;56eFA;pWKnmRM&~=ZU485<*WY{0n6o8!V*}(9 z)xS8T?sGBOV-o*FhV-cEk35gK2R`VDepV*_dByTlAK2SOCgUlr^BlZwoi^FV!r^}# z);m7i_-v+MF3@Ox+q5IA)evq2icM}+(o4r>GO`&T$!~A{`GxS#Sg9uWx(m9sVQKXN z7B(T~RQWfA%9||n0_uDJv`vPclnN?W2b^HC=GScoRPpQhL*lvO z1_`srZ-&t=tJgQ-nn34V?(KrDo3JZT;@EA;Uh!wYV)``Jjw6+_tef_#0bi8GAfCf4 z8IvB)Oms>%eP2=gtKXg&@krNL@s4iUcB=|9WR}RSEeW_}QwH(+K7X?BPIFhc%e>R) zec}TI14u+_i~DE$fa%9}0XZ@S(u}yyBJduM0>2Q+AT&Z%E+(y5dy;t`i?Yq>Pa*qZ*3EY+mtTFbC1G z7qp;?o?&ua?fM=ru&M8j?&kq2^Z3`uLy#`~-uv)@61|S+c@&3Ay$$2g`eSAesMwSN zaVtYhi)VxYWp9fR8jGQM)azEGn=$ZRgB|YDqwdtr&B#-IUX2age9>@asq49f|m*f+ZI~AojU4O@O;D!C-aj&FKinsJy61FC^{cduP%|^NTy=30k12 z-9dQ4C^7XR)G}Rx;G>>}?`P*s7*OKrkNEt;q*y6E3n8V;)v4_{o!;-aNeBR8w0@lJ$$v3iq_8Zfji8{D0rU2~3&Q+4g>zd28oER@rd7UQV#lhm91X3aS z!VEV2CKL1sbFP{Cb!eMq9~k|=$@ArWvjFozePjd#jl0)NgnZ6P%2~M;+Rc||{=o@@ zw1}5W@vF}rrij?xACC^^1w^NVS8-V4kpD- z0Oku5-v;nJ{OWpCzTg9tcRV!H7q|NYH8DFiNjiKP0utnEPAc_RK)^=2S^UKO1#&LB`s^*q@>tX#;xi zJ+JbqAPLlf?>(<}oOr$rnB4Iei#R|XMz;LHOU~m*%k2jo?m#+#n1B)bB7$H) zo)dMzLitNjNF!}#8Je!!;ZZU*(fY37-)XftnV&Wi9yZv%gg+S3)8of}w!snZaDL^2 zL;f^#bq;VU|4yszeqZc+4F~ad`g);kH>7z|+cPWpn@UFm89w+>Y!4HC_8yJ~SAd+( z3mEB}>W9B_N)`a_&e)U01-GHp7fjYOgE&kgkwJS%#mfnOfiY1CZZ*wg!<-(wlm$S? z6v;0`aJ)|eP9WPMytMb6lrm6dBPlcNf2Y+V`4b2LCihv?;1m3&)vgFp52EZj!P*K( zg1Q`IID&4NL0XUx2=|v%B#C|XNbiYN<;i)c)q27Rnu$k9(#K6noCaXph8yYkLi9%x z&s6!m=O~5MVYYnBgN3D%A=~3S=s3KSf75D%VBPh9SG~&SL`nUPR(r$_TX&!)#1xlY zh0j`%T-AbC#GwyfFGy_zmaOS73@M`r4gd$uFg&9Ah`0yM1=^@MdaQ)uW(-HvJ%s`k zVhm?_?uQ6`5vUSnVA9>kuk{u$HN6B4)z8SlQ&fBydyyHqUx~?-;7eIfIh+S zu$N#`pkzeBt;Vw>;n4W$zL6Hn#P+3p;dCc6w-cFkEXAh)3n30gn@A0blIFJ!D_96N zz?}3>tIhcxr2IRrc8W~A1^8@hLI6#Q`2bUr>b>pt5gLi1vda;$Oq|CGEmRyAcz#>R zo8m&u;oD3u(#EhFiC8}bN)N^!HHZ<%wlP;85sHPQ$iR5$8Mzd^==2W-tg-N^(33@} zn8;`0f*6vUdX060lZdKebKtFlc`J#$m>>eNrUH0C`;vcy)dmR-t|0~#?1>_d_4^h7 zHex69a1$1@T!50VwK6x>3FNV72K>@IH4MlYDBfYUh){6f-(j`BQ$kn>jdrVP67m3E zK2bfXxIN8r0o&Eg<)f*~Bco(f<^r`Y-j}|!$P_70F`h4&l1#q~UtK^4#^7LmM@Uie z&?XnyCHGR;zaC=sie3v>wX;k5ieQFOFc~K$VRTHCEfOr0rBdY}cRf&I5Et(AV zEZ9N%2JIMo?UHTmZwtf{%w(B~{seBPEAZ8d>70I|E}&9%*VJTRtD|LaPm_a-2;9;k)WMct*;Znl-f zOGD?Qe`nQJLuq?Ucp7eR%tp!EYPRA-k&DxmkQcGMYnUf z;FCk>g%wAo8Y(Hz>Qi^8uBl^%Zw9!a;1Umi zx_H4xgX7Vc;7L*jBj+WzM-NhYGA`C)bcfVo@1YO=-ZOiX;ZtQlamWs{1rba<_mGVQnZ;^)bNGCl_g|biGQJ1nPc4+$!@+Obm^EppaH@# z;=TPrd6+iVK|w{uyp4awT0*vVf+G*4T?-7k^46D`GTp>^irFLWRLw@6k`DAWmFtaZ zc1W|;d(+WJAxNt9V6C}`gwq%zhVi`3@@b%gZ}u+7 zHS($!@^q1zN$aBRuQHIk2_lKrIfRS^=`q|2-mas9-xpb@fu&8;)T-__uj0B6Wr|J) zcHLZGw#1+jMhZkt6c*`$h=2=ZIC<8|fwTiANI!x+Jokbu8S2~*B9&a|rZ4YFFoeL% zg*etUJI)DClQ&t%Km0|hxM&rf%tVTYy_fe2aV>bj@o}3N)~8KSYiO2#`DSLkDJ4Vr-Y*Xv(>aVEp5(t@cp*~jf=;Z2)@OayXi?~f`InZ zCYgj$^{SuE*QiW4ua&zEdav7vA`#+lJ+1?0U9ktWrjJZ9J4$&x7>i6{Fe+c{pR22a zK1Y_67S7C<46eA7#7OS_c$mAYYi?G5y&8Eqd}8}PLA2NzWPO`$S`BJB9IsY!SluY-vVSlVt`J?inW9=H|^& zTJrp64mmqOg4T6ZJ9}2%K+j1y(?#e3$JRrG#b&W6|84x@aW`P+{X^EB#mBC698I!h z-qFukKTn1F181T!%3ur=z@DhJ5iyWV_; z=td)|K<~(WXd%cPopMpt3{y5gXwQ1zj!K2YZSTP4YeO^<&@>iYRP5?8jBEj9&{sD1V==zPcn+Qt@qDU5?4pvv(<9C+q;BKR6gZeC zsN}jiR>m@}WQ42&pFSRa=A9<35OJy(I)HXn_5)@C0_M8JEKKXhmz%J9!V4~%O(&xM zbbRnnPMJ15ru2$mCo;DcxUdE00u^)9QC3x_V1<4@j>L87fjY9T=1OMVocamv7S9Pc zQ%VmAW(qBOZF${|TT*qnnnNIP`pSPnK?tM-AjqR=aM7TQ$vzYC*pYbtI~P#IO7 z4x@vkFbwx?*A_XRiEz!Ezjsq0Y9AQlaaMK>P6&H~`V1`;u}7@4B5yupcBdHW;~W(6 zayS`QK7X2gsQAOzb>Y5#W>t_yIpw(rSdKX?afcy$+o#p^SLwl816X{=l%Qv(LoxAV zwx7ah8$#kJT~rFAmjpx6362$6!YL{5){SrGrsnn#(6EK9E{23b^C(;F3>VZ1uF_Kz zwtyWW7Yyp3S+rV1aU4`K`;77Zk%UE_%C|y3Q3;Ys`by}>Y@U8EzCz0tzLPk8k)H~V zO&aNULzDN_goY2J5u&d8{>v$yYCBW@I`4|R6-3xwFk&{I1i9Be&oIP!KKD534x*&ue@)nN)AgvHpT@CmDFO>g6VTk%z5h** zzABn3%WgJV^GcjfhQe%xfT?zcytaoXcXzqM5UM3+XF355^-+&i%W8teoSQd!lN*=(zFElShU@qwZmvmJ`?QVSCnMSz(3%toW7(cZ`6JS_Y(sr7dVZT= zcjP-J^6}PrXBdQdbT5XPIj9#su#jvi&Im<{n(K1Nd<<2%AUi>HtxS#AUdWqB8B{?1 zYdrhQm#6zG{a=#gj{_qlzMMuQr!$GIf^tDQ4_JHC^3cEe%r4tni;@S4doHDW5R zt?dzy&JrF}u2pR|zyYBe5qS>Z@Rl0nUN!7}kIN*noUEeI1IsL<;!~r&rf>GJ2$9J4 z*56^yC)Z>|HLGj3Ykg69$GZ)OfIOB+7Wz#G@NC3;NR_&vFHme~cj~$80wn_`*80lu z3@bhnwGtuWKcM7<+}-vl%~SEeaG}ka0$_Z?UMa>%I4qehyV3m$5Uj~^hP;)H^N``h zy$_LlO7huBabH`24RNtyTje(K_R@Hj3DdamHQq~ZEEyR6Li&gBGkOU|FszlN{!#BE z1?KSV&1a&Rrp`~o*~EGnpR$$gY^cQzpw1$IAFszmvu4mH*p_FxrjWDoO4zQTk5cK? z91crOWrXOazOJNa{+!)AQ0_d)>@5ML5VBr#EcR>h$E_K=wjSri9LC~UL4Dhx6kKh0 zL}1Gk$rVMUylBcJA0fj-r#p5!C~Uu7sbWRoYio4i%l?cIn)ec%&Y;rjmBTm9Jde<<*wz=r}K3VbN=p}_yb_5JJW*8lAK{x=z1hQDQS z-y3stq`j~T2Ia-ba)KR%VB`io6{8Ao#<$)r7oN8_!xRY03?_Vn>XO4v9F@9ziX1&I zfv`A;?*`M~_EszQrmx|0J0|Dv^&n}04lSsPK#xMVeu=zX3EZo)(^x|xzqOc8|^`HISBtbSt*x6*{8WL z$LzI?E&Yzn%pxa+R8frL&PT?NNS12^-`#rM_W*LpT~7=brBna_Q-7ZQ*jtZuXH(E8 zGOn1JG>^)wkc~pSVvceqT{OQon6+(+>L}r+yn5;_hCAqsG67n@6xU^WbY!#A(@HyW%ELO@6 z<5z}`N!Id3rxmM8#k3Q>%48%gXF}A7pHvB|eRd9GkgSApd0Xm8SUl9~FGVc%zTNJ3 z*ry_-@V+os%y0KM>!iy^Do?`Vp+4x{{mrN7)-VHffgr3?H#6JN)d`C7Bg9LkQ3xF2& zArr!Rr*Dx(V@(G-ffS6RTwEx)+2$btfx3AjPH@g7;Gzl=HxCu3@s7hV7Wle$>naDX zF>@$EBm?4)^M2mfg#kI&`HGaVI0Xafwk{UTKEIFRCAO3E&i7OdzWkBMPhE7aKx!tP z=WYBzrhpNQLrnXN`oS<5CTwzRcgl?Vi&Y;cH$T>B?xwA8IU!XnUc7cj3hX9ibQ7T- zq_!}3?sEv4J8~A|P#8aAKRI9S=l5p5d|H`7HER3uNg0~)FBH_6P*A4c$r~cF!MpKt zAxJwLbsM&1hJ<_F(3n%Z? zx#?`rtPMfzH;A?nJ$};3L_V`P?iU#v6C}Va(gF1A2iS8S!UcRWCLV=3z7t+Vc&+ZH zu#6&{f23T!4?$-Z_pxzcvfb86sQ7>ZeTu#qH_4}SO%$8h>E+i zX#otMGA*!`s~O#&(=V@`gk{)r9qm$$LysI&F@lk2q-z~UVS(T4b4MyWus!on$|*2q zNVV}#dJ|8b_uystJwj2ylSV51p`3=DITLJ$nV<+4szD@(J$?E;ecLs<+JLIqrngUV%x31QDMq$Moo2OJc|gm+yGbey z+N(wg7C(_FVG#ci#D>FjL{I!YSRhnN&+R5~atm+pj`?KZ(xJxJp`K;B=7TgLgSSyx z&nP#Q>G8HIFTyM&clim*0vfWkAk%TxSSA?zi+kSRL+csyW~32h53o^z*jr52lbIGW z-m9v^qpN3)chcxrYEL%pzSjeEFIy)~*I;){7QB5Dm`EVs=1lD`Ax%L6l5|A^tGD%* zrv=uhH0&9In-T%K)|G4h@G4vU^frCT38&oYJ?ivG#`Den=_l1@c8e!^XjY?%o6Rp3 z(0fVK9k_yD%KiG3jlG99PWic}GovN+bG_Ip0 znBF;``q`fqWDNpNdKaz?RBARC^s1n2)w;CuE!g*B7dEOt$&j61e`QqxKPoIatVE)N zZ>m#zVzJ^e49L3!j0Z>R4hQLij%woH2Ym;9%Dd8C|rfcPXX&T)B@#=wvK*-FKa=? z@)x-ZMU869Zr$s|w`)^vD)rB^>OvCi`TF>cDI@MOk0gJr@MFFvR zM|&fLY3%w)EzeIs3GrD_TlW`p|8l_(I%T_8G40O_2O@Li&41EYy7rs@#2G=Rn%mMp zd$xdzodKK|=h^ZVVUr$G{X)L@*}rscn!H%Ehpk$bOD0>!n}^u{*3WFM+U>hP3W#!D zT!t7KfSoiYhnGJxUo96tEM_bpQQYz4>HuuBe>Z8CV~LY$VDd4ei2SMGut zu6aojmj#^1o!7iJ7}~Be!vX>4&$c{+oM7RfD`akm)=Bg-3B;~bv}Ohj)(?t?AqI#S zp$Vbv@;Ot$P~m;|1nQ`2HRQ6dqo?BM{o81Ad13OY8<0Xtr_pbqjMfJ&mjM+hA8;su zFa+*8ZVR;SFQo>yq~-k>I-*1qHcXdSCcXshKT2s^t82RVtfHcg>K77z&G-UoZYP~1 zx>DgZ^P?v8#$po_-EX4B9$*W`|K~MHIq)oU(GUF>6wR6u8Wr$aX>zABE?k8I99R@< zIZi~{3L%_s+zWgv7NzmJC5iCMo1nR^6^m!3+Z_F8Bse*oX(~HUfB-%i7C)dLcdd_c zHT%!rS0+xYjhB9sv+2{PhV3r8D=0#av%m*p8$Qc8<%Yl!g(kTPFSg880z!7fGg=#! z%}pH0Gya7sKC|rJ1zvSH_e1NBTPmPR#=nGPlba$O+9(BuJsJru|6 zl+{gz3|Y4Nz#hjX=eXK zegHy(-vG$05hm}(!wp7n{&BHCqVIgMa!2=*R>$ql<>K>l6I+GcmQpk(l~WahKHD47 zU{vuMZO29U4nIg4oZTw7+wCsnkMo_pPI=D9aQ$PLurjW(gWGFOBJUpsv4z9qcY1VZ zF`S%eawyYub8=}|%~CS3PdN^OH(uhJ3DY>xt-H2G+4!gIuUh+MDr@k8GkO-eUafn( zC%rY%QtVuggbdo5r})p~R%YKUgl#4ou%jRc;K!3Pz_Lo?UI1Ahx}vhX$A|HXoY74d zJWrOouuwtc4!>TAWHpp$Hs#5fDr7n5aP9KL>qu_jI0E;6b8{tkow%KeM*luea(X6x z_Ff1-dMRB8DQTj!O9Kf zto^DRUnr36gtAD4o$?A4Z`JOg97k1Yk-k!m{4!tL&@HUBdQ9xlIzXV+c)gwzCO_R| z%<=qHdP=$>9b#WzH>IT3X^QbsQ365ma96 zfv2#e$A$ORhF64aAQisEt$nf(w#@__ zG>M372(Jewr4EE-Oli0C<0x6MJ6SD-<$0^Y=sY}sD7n-kMI7V;}3i6Al0NGq1)#<0eRdAc(KTC5B&O-4A zpNU1Wy*i-l0^mV24E?j2>QDVQ|L)cO!KM6Hq%=AHsNVfcukNqYv45ZR7?{~u{zFPr zMba9Z4Wac)<(wQrk8K0GQ#XcBxlE z#DxaY8(2(26nCqVh7Wg;;tcRs-BogN0-i1rjwk@!kU&x4ewIpQgK+854-+z0&CD_& zD$(>PDZ>|CR|W&|)ooEupnJqe9F4=6!%Nj2#@hI_SGM1kJoVKL6oAOjj(=Sj>U7BZ zx*+bRkvsA`<|`4@MC7Wmz@^a`~Eyd3XpbZoO3b-{w*oe2?attdcugKjm|Hy*-n1XiJOwov32S`E}pgO zx!=b5+<88(Fk_4=OCiUe$*1e3I8rN^o=_WZX@^CZ0#nXF1Bs8OQ=q^;UB{=Ip2{at zqzUMk9r(crK{V(r_0K>Pa1OHR9yIzd*Ahl-zG0nD(1?gD8v+F8)BED$(okXDyBK!K zmEqIcbGM3Fskue--S{@BKBf8~-#M;#rLRzFYg@Pa$F5aqr-o4Lcw@H?IqkiVs_E0W zx=R$OKN+ecM342!H?jeY^|+JMuuV}%%3R?9?Tp~*MZRW;c~kESkG*h-CBcz`Y_R)o z!kQuuT2@UM2vsvg4y=6wgCm$&yiZe1^QtXiiuz<1YTfQbGzBC_AE#~Do2Dc0b&qSpWZw1nr7!!>i`q?5}vJTlEN;-Ul z(?UO0hCf77p*AqOq=^8cgNmI+> z$z4BvOPd0Qd$eLcLhHI&`*b)5IAq%%*|GM;vFf!uO6S3g&f(PV*26)>)`9k0Ll3kf zTb)L;nwh=F_}R@XC*o*^k$ z2dDR#9?>|~WDJKd+Al=y08sEz^Z)Fh(Vr>a%9}0XZ@V{8_{dFnWe_rr0&`IMn{#Rve)Zq+pKJRF(_uKM)5ljUH)lBsY zeZ^`Eg@kz&1!19Z+1)LZPo7%J=;}yN#<9kgfVJ`;>wC9r%QJAEKtpNk={1T@!NbCV zf-uvJv#T&S6EIwK7@2`MR8o&pjdqiV6oNy9TnZ0Z$s@p9Kr9pYeYncFUEFvbYD-YP zQC7Wx5+7we-gC}jyL21>Y@qs6&RM|9%G$wRgX3?zJ$#Peqn12A$KU$c@Hu{e-}}|y zD%J2ge&4?@kI(R4RrSZ?@B1!(U;k~F_x|{MXZZX2kE0mq|2T_*{*S{L=>Is4f&P!< z80i0g9>d?yWBB`d41Yh5;cw?@(f#}W@sDb^|BwCSKcWFf_SWC*42ynsAbm9)kYg=@Vl`(QXtZRFswyaR}ak{()9*kGkS0_CZ+S1Y}95sX-)1&`Y7q z4SYpOY~X^Jk;pi60_>QsU&zIDH9hI(?t_BQg;#uJC)?}E^NZ|Y9DM*Z}%LT%z%OXuBr<~1>3crJN>PLnh~t%*`oIhXSN04fZUjRqgsXvmEvG*R zD-N*Q@0Y_cTQ4ZZnkV;C)G-61fugDk1Nt0|j@-;&dQeyw2O2vN`7+Rwi8%?HScNtU#6m^wtieuHHB?R3#Rv9T%N77b6i^C)cLd=h}XUY1|UK|(**3E`!ys?1p zcPKzQq4}o={js5Z(3TuXm0&2tvSJ{;j}O8v+;|5D#JY~P(7=haKmHG!NC=$f#sk1y zCJO#M8}fowQ80bAop!rOV@6cH__K0)6b`Y942AkGLLnt?+SUP1v#Ou$sQ_m45+l_> z#r;q`kt_J+HrY;g18!nYc6104u^v{AR+qn|7NAhPgdZqik>J}>8;2d@VleeM%5hi2 zKqLhP1QIihVF<}Zs!YlX_~8VoH}`2OYX5TTQ&b+Q7 z<|G-NNW$!z-L|~r>YTKgL?P#lNjKO>ryCofd==%1#Z&%++PjZj^5;BPMSm8WEWML{2&Bfw2o>b2Hk4-qKahpF+mjDs z^1!_37`|15N+I$na9|03-hlImh`1~-@Jyz4(6N`xM(d z)$ZII5z<-B1OsHoV%oSjc%Q-7q#Co3E)j7WK}BOyf;tt1uEFW~%}o>&H@vgrwo;u#_= z-W(kx>oH$L2vR!N-38x*l1FHQ^|=eg4U+Ztrs6Oa+oEs;om35rnE30i=0jWQUqkhEc!{>$1DdMiIY#y8@2uJiq3sS)$9!yB)~lRHH_))VGz(e1l9) zlWLcrmfTo33&3idmuj&qpqa+bE(VGh5e2bP{qakCZmc#yiB9_aQpn9iVdhfaDr-q8 z{E?B6)Y^09J)0SeNKxlhN7gU}Ep-SJnz`67La&0_CEe3A>G`^6zI&hfaest|XP=lT^SEiE8#Y42jB? zGX+>nD}tF-Rp-RFXRZkVM+>cG+pDs!9nGPM`7Y74*!LHcIZMC-5qbRP(8TXwcIQ}6 zkp`Ofk%b@2Ef=6czY^mkTD94aZ+1eIaChv!J*{4~wfBS?36|a5rP1#p=Ya1x+(^Xl z4@1gseTD~aZG3lp*785Z0rp1b&KV8q#u8jO1*=QPp@!yw9Kzz{I0cGbW>26@n@90? zM0mbgvJe?ClmsQwJ5Aohugut?5bC#xkz6D`BYJZAdWNij_C|~naR473+cFH+&e7ov zJNTTG!>l}wX?Err6s6^j{&|j$>(%#AJ&CIppYIN|$_EQ>?1~-N-)-{y8@NYAC<6Q?}3Xiv5UE!$En zohDy8uuFxmdk3dKh0i={cde}TwGji7{M{^YrIPxrlWQD=Ne28MOoN`v+gI}-!o0Wx zpL-~7RZ#esBc0i@JMX;F_$gBzE_n^wD<2WMi=!+2T8~qfb{+|m^RzcmrEYGF9yvI| zCY3;!mmU)dm|4@~WeG#ro%k_}-VDx;;fJQ@>d}@w%{S?V~Nwe~t+g zEKiZJ!?2EsnB{n95<$5A2ua_6!5miQ&fk*+-K=saJwnB;&wu1IGJW?14@mX$yt;;w zk(`{p?S(LmH5JYn#XEfa@dqpx=5&qMq2I(~RP=Sq zJx260!Il(m5FrQchUP;#I@MOwpD17C%k9}aeHI$_xP22kKKGWyr={cx#Q=nTfu#Dc zUgmystNg`||6<2~vE#qk@n7ut&y@I2=f4<{UkLm{;1>eF5cq|_F9iPY*>RR19PNKK ze`RN9{KpK070C939{*c@ygo_RdJBLO`Wz9{imgdz`3>}QwQbW!o80M-A^6KGVYL)U zF<33nZ_JNV=L0&85`xMn2E5F8CTYcK6T_)Vd9y%>M864nzk)Qx561}+r5B(Hm{maT z#wE1gU{$ae0TFG(3{VRsRe_mCm+^E{0LT79TqVXWx>|^^EQ_mzBqN*Tg<$Crgu@BS{S;$ZbZg66ZzBo>9f`M(!?RTq)7Sl?tcbfV+SIv`Vf`!r1*cO=LkP}vK1f$&F1oq9(v@D4<klW}pQh7eCx4?hAUr(u564o0;fFk9C;qD^{Y z4F@6e)-Vl1{1`NBkIWt3(VC!TlTPs_oOj*JBjt+z6tN<|pkpvE(zut<;oe8c&XFSE za&jTWeKPI$9DeVJ(#MJ_kggam*7t)!W4>x7e;%v)ROXcBX{HJ~(rLL{x?t{9IhURp znq{J0dtt)dm#z24GDA1%YmtYi&+FL(TKff83EL+L@jM>>^XFE5tz=!LF`BtE*UFH$ zZ8zB<;2wS09s% z9b44M%gkfrD}L7?d7E-|>7nK0HC;gegziF>Kz~fU?=BjPTzioN1=qLHBhXieSosPs zl8sZ0*Mn(0fszUGI@aoZQ~`o#!DF@GaNm7ZArK-Z>PFP~OUSLuvw{pI?-yHp^lW5K zVJ{Zc5_08ggw>4TDuuJG64I@*c~g2r8cMUOFcYPV7F}3Mb>iG=%-{yp_gD$rV57q9 zF4N^%jsO=G=9d^YEHs<%OiyI5G`c*j z1x7r7v^LTOO6A~ zP<3)x;jGwz!~-JDd^uP&Tknr^V>nFXc(b4?Q9juSBC@CXuf8%5<$CHCwJQhLik_}D z6=Va%PxMzKNd`1$S^N!=FkNLLS<3~wO z2q_eA;-WcvCQU+Gl{w8x7&{o7b;k0huyBNqpGk+%htRF?_5NkVKE}cF@VWy(tw{=L zRCr2{nA&@@f5MsxrRt;XoLlR zdx|Ga!1GcZr+}Y2o;Z?o!uc6VsVE3mkOL{#16(WEY02i>Jc=!X$yfKV0AY!Gwl`F& z<~g>9GA3d8TX^T|1NHv#Qj>(WJI7^CT}%%-I=6Xg2u_O)#U!D?o$ss9^Wm z9c-&Q4>wjDQHX=rbIN`FoyVK|EsvHtKf4RCnUNDLd$q-7qHlLBk0Ub6IKIBK#g~y! zK0bZ(k4GsfJ;_vK(F;$t&vCL2Bf+5gqBNR)7$0ut%Asw|$CP0bZ3k>`Ixg?VM{g$D z8n_8(fU&5OicL%Q!;c%&N%Gn}nRvbp1nZl-J}umD`m)LUmn5USI@F(G03hUeam#=8 zYxI)~%r9Q`7q9w@SN+AS{^C`C;!=M;{WX{Vg}^Taej)G+fnNyxLg4=eulk>z-2Y*9 z%<>1V3S?pWFILBJJwL3D^YAz#rhSaTKZBKX>eev@3NawOqx#G+H1Z00tvaG+2eum*{QfFI-be zUo)pR-x8U%M^TQC%8SyPzL}z=Z2~>o%FjQ|B;CA?z|XDmyGJ(wd58yC{nd=>CqGUx zOB)wcCq^+FLl;vKQ)7D*(?8nC$_@lFD!UrFcsiK=W?l^x{@K1(MFUy`Lw#-)(%j58 z@|5G<@qQFV@;rA=9;rq(q$4%h>q&^hw-JHFBBCPYCDilPpsz(03&OkgrgQSpXkQr{ zz3pO5|7yu&Ji7b*)S9~e+_ATiwP$$!-g%67kN3`?lnMXLSFN@08g{O?o6TkFYt?~?b%llQ8V_Xd)SeTSxUKR7$M7)Z=5Tu7&ch%lTS zVqszs;Gq+=8fZ$KcUJ0Vk&(7|Y49+f9HQ2;kh8_KKXQvitK~OL0$w2&Pt>309N1JG zR8D=f|KMoCvc;MXo5gDQDw>Knvro#nqGpb6{II^YUcKH=1F<39wqgGD!lC~R=TYTs z<*a7KV&khREBH%T=5)>>9#hux;TZEK2j7nnr!k{ydzIX!-V5p0yv43gM=i7SW=kF6h>J7tB!!bF+GcfX)h}@LlK$bV-26OLvN%Mrwr>Ah!khP zr%oT?Y{hN))%Qu9ZtExSk0q(T9g;SEQ`5Hw;mgaps?D4dIp4~`ZS&!@VlpeE@#69w zVPEv}SJ^tfP{~jBgAMalQSX6JdUYv^BKmRC}`y< zvZdz?`N1AJUxCyw-pxc)=JTaNpx_t=ex?*=UXUY(TD*ikbkz-nRvD5c3)&!dp!x5F^^%pG%2biOPDl(XY7h22%F z<9?(T6!L6q8Ko!AMRm;kgGG9e}Xc3Sxi-8qU32l_Ja1cA9{ zH=PT60C%xCXro0JVrKcq+PL2S7}pk7QNTZL^A5_WgHTr(5>O2B1eZ<#}wfw4kmjg%b*rFGs54*-;9}GNIp?l4nX zh^c!9)u!xZz{5?>?F@ak9uQfgZth#-*O8sLtsmb@SJ9E>CE;_`j)jv(xWyY# zHEO-V$$KoN&9weV%LHP;5|(ix--rFRB@dIgDJh+HT^(l?jp8Ixu^xpD!s%dvy^~{O zCb4+lvC_}Urv!peLGIlA({NIyp`8AQkpWacHrec5rm4E6sI>3Sn4F@9YSU{Pp31?F zm^rO9N9yPr!ku-a=Zg?w?bjtcYZqlLTD+t>PCJxiI+&+q+{11}F}~6Jux!lPk`Jv- zH)oHNJdZ%F=<^%oxAE)&jfXp1P6SoE$$G<;^PgyZ>r&%dY19{!l4^j;yKA1Q6_8`+ zmcBf@7;62Jr3?DI_8jG{sqQ;6K^l659p}}VnAy0k!zE+U%5`tA)zK4mYt)#h={9OV zV!Hu-;Z5=HQ|r>a)H+&_j1GzGV`{Yk0ntSvluv6`a0pm>!*qJ7^cPi;Hz`DZMxSt} zqxuMq&9749I@W#4d!M~MzZT?>^uH8U>12ZI=~VoUu}(zaKjO1 z-eilWoc8Aj<|{|F`gLRytv=LOm6wt@kW#FTBv&qg)i&JI=%8OyWM07)+SWA#c3r-F z$)`{vAI*C>5^6o=`oND_fE7D@~*;A6&P@Lm<|08 z9rI}NNI^3;P*a%uU7IY<#LU700H#!4XgY9H8_3$>JveW$WZ2Zvud%uGI;M214~N9z zI5`9X3FHCiXVXsmv^*~S?6yt49~RQ6Zg%h|Pk3v$Oag#@x_X0#2YRVn*N&v8Dun6a zSr`@uckWmEC$IS_h`1Vyo972UfLmJSE9Gr&7M7fQ;ysAkAPbJoKjcMS?KsWcPofZ! zJ)g1=qLHeL(#a*F2??_^pg4lu3Nkj35)Q=rhDJ2{!M^Fnk3`XzFC|m+mFu9S;W|zo57dwv0`O zdwocMQUs;NF$}>!{)S9}%s*%<|1_-dBrcMT9>eibpGvDYImLFAhwS;y`i66=naV62 z;Rjx$UCAloJ}S;gi$bWUVLtvklJ!6zCN952hkS~+m#`4yc@o>2(1jFi{0BLT)3*F@M_I0E*fPT5c%imWi}hB2z? z!vR|u10)vWk5RWnV=yrOP8ix8T>+hfdGwU8wX6i-g0vflq^&SaTa%P_vUO-AVSt z0r{430~R>Lyq-!tXJ2n4uT6#d9+$QhCY|E+>qB(mMthW6oW%nR2hVmp?VgX9B-ZJ5 zUB;4cKLjK}csp4{j@-hsFwcu~+9Q3-kBd*UT9?D}N?QV&(tT4{0&4DdYSMqMy}iW_ z7@F=!XU*94j{dsJlh89|^*ShsUbK*;2z9z-N`u2#uV-1cPIHK&CBj1bEm!!WZXk0v zEfGwoKqd!%*mÜYIESVn|Dn2=duD~iLXT9(eu-@kZ9vvYR6mX9bgG;Mvb;%h( z=e3Rs5mDL!UzT-lGm3Z2Xn3kGz(7%7!3WLVxMAkDf_pdqcEx<`73LDfkx_!W#40ND ztatc;0G6&nOX{|FCeuYDMdI5D0kw?lgc6R1cQ}KG7WFYGX?jl@RK9suycxJ zNVuH+tNMDI*&N;xas2+guM{sSwFdQv&J}?`Mc@W3=OrJfk|$}gR5_h)<9+!scJav< zxiKv1h#UODgp?F!Nk{2+9nv>X05q*SDz5H@UrjM`Wn&Hp7@trzlSgjyL5P_mhi+`kTXdR3 zK^)3=qI+P-5(#n$Qt2)_O$L6)HJ5!yh}du5?Z>At-d3ZfF-{?{b|I8C$Xk(qd@!i8 zu}Afmd0uXR?)m(EZqUbYa0@WLy{6!@@S*ARXWZgZopIy)`bTpg3O=Pvg)W-l{Ps}? zQ1y&iwFnn&i>o?Vt>+O}RR|rrY&oPu%p`P!N>BDt3YKDqdU#t50BU{i<3?Z`9Z#MZ zDt$w{WZiWOoZ~j`xBI#E?h;UShvTOh)z0U2RAG?Qr1? zu-v*=eGd`< zvAgs_pVSebo^I@KufBVL^pJ#q_Dnp$f%#1Auy^)V77H!CFPFA}`9T*4EN<8}@N`fW z%QMp-Dz$T&3MXgTzz{}^$%k34+r_L5ti}Wc-q(QPTkprHM}A!$gm%>euul~;-1GsS zw9)u(#Xgs~<;Lxl`yZB#3`yUcJFvJZ_>V^^20)EEyuZtE)Igt}fa0-%kAsWp)y^C& zA(4spL>XnCIO<4cZeoH4ezQB1=9?eCQa{+7@jcKBMP@S~-oG|Sn>kp=JRQkBg#_M$3%{PgXim#37)ZuBBh^e}v!L_ERjFB%DEYS!_)OJh^heD@z#~~+xL@$@|nkwh!!?>*pa*q#2*p4YOko6-l1&p*7vv)u@nsri3X4?d=9oDD@z(9JexySEVXpO7)6iL$MBX{%Q4Z;g zt$WSdvHEY^uc-F5Cgm+ZHfo`*c8`V8O!3qjFekKPxf8jfuL$kkNxTcH{pt%(XA}-@ zvoWeVlfoXpwmYn7BIEKv9gs0#T@4f%wGBBRd8j&98Nu%C!?fj?HyAA*%wTJ;vkDT# zBA6Bl6H|+QPszHMzo6dKqcyDZ$ZzIzDeYXCbH9%G@6hmwZ+CH4<@S^A%es zKQptA-qGiR%qocwSeFrvrUc1H@G)7>v$yI%^0Ftcv`#G2aop~bh<3R>PKM^!)vwow zgSE5IUn79=UZPh7D1FJJbu(1i$aZ!MU~HB)MR-0P&a>}V85^e|I(R!O?2wqplNFH< zi;*;hiDNw~meQe&qF~-ENf&5NeK4KoSKi)8vHDI}`#{IZ^&FLkmha6F+yoVejbXjd zYno33;+aIa1i186wQDP~cPD&KT`pA!=Z>5{yu$k1+Gx+)h_#nDj7^R=X&k-;c7o5NCSTZbskz9m%G zjK2$-=tyF&6REQHSWU9cqtfn*9M@e#N^*uaE7U4{aUpMkTAb&`|FlGbb=r#Kx>J7b zTYg^gv%Gz$Tpn7^%|7fv9>-&U|A>3G05pzbu$Mf14D30Co`%CiB&;54MAvH#(o8My z*B8OP{BO{=&XcU(0p!_WereIcJmC@19MbxeveXsf9=7fj&-1RNd2G!IxY?`dH2ZM-@Xe@ zWm;H)5atbP{^a@I+iWVJ|FgA-joEMj#K+kx%cC|Gg6BM*3QA^1ktW-fUDZ-C^Li~j zE?rKCLHU-vbq%F1`VW%03a>v9}3i$Q86Sz;4H(s^&EKWdaR+GJ0Ocmfa`~t-8lk@HBnS5Tk{9 zYPklIQ2g*fLKG=qKFi#8g`LXJrt&@#;{9R6yn%{?%2<7Aw)+mqTPk>7XZ#9UGO>ZY z1zieO_w}D$N}Mv%7mZh7288HT8nPPfs)`!n!$IR34)PH4I{uGd7=(_h3ce6w-H}b$P}R+Yf9HAByk~$8zP zuUIxQQ+RbbA@ltx@TG4!FvEpEL?X z9wKMY!y~f!)$Sg~%AOaw)@oS`;8-)QCkD3+%!A!@3>Uu?fLU5LcqgBkWp(RHacAwD z^}=SJQ5Hkj6*@YzAziX=YK+p8PmswfH6Mjp5T4~>Mk0ftfY!-rF zdNM`qD2W#GyGBt1U~Bp+H7+(oe>d%^)5%Qj5|Y}j!rhG@!M7A%EUV&vta>p*Z7vw# z`YV$9KgR+2!vK^O!1IxpeGt{S<4p@x>45YrKPqz1SjU`oRqAezcB^BgUW5fxS&q%AVo_E=Mb zSJSTLof8vUYhoaK-QV?RqjnoT+pDj}*6 zaFdm2^kxH$h~evo5m9JHashXvrtBB*I_L7Vg`%WZIwbStp0s3R*%a9#3b$6viWKIh z`q^|_*Ea%n&l&Fw4~$$8(8qwV2z8-`XquFoL!Wv8P+kmTJr=m^AI#}(GR(>-g{N|H zCu0mjN^yuyG9?*I`uZ6$nfRk({Un7cKf_DIr*&OiFhod0 zEAqcmkQXe;8Q!6N-X0%8HH~Y>qFWa71+8)DYWJvlB72qpq-of3Ag#AK?jSJ_pf7fm zzX~Ic{+^UP)hwtV1+_`6nj_ogCAFvTPj*2xsB5#Y+<8SdsBRCjjuXDkTX}g`(RbE} zo@L#HL$o)nMbRMc7_EOp&`$Z@!vUc9dVR?D!1U~Gucl1kwL}T`MhmV>R>~1lHd~(0 zTw4B>YM;C3G-B0Jqn(zuGz$HJxK=9>%eQY{Qb=slPmod=ptp^AKmYK%d` z{%0@Pt$oP4odq+N>wh;avH>uj5tBcEa8oO)J0yqioCoBt|tuA`~IvZD*6M{#>;l zx!`4Z=&d5^!1RPBic4W}?iGBQ5R@OvG1!P4<#9U!5O$7;zkTMWzL?#4$xy3eQ8zA& zK*IbHm%r!O7C|>c?WB#QfOq@Yp5B{Zk37L`mFIg+f!tJzS}Q+47R(bPF9lS00(Rb8 z8Olci4HzPPs{7=J-mFh(}cO%uuh20Z#3JP9m&NF^W+TRwIFrD z3S*Z0%6bF2LBfAslRPzS%UBeQa^pPHhdStOVmx(P@ya3Gun^{KSp#UQh*H;}sfVu( zyYj1JVt)Q?yeAO&kn{DM0}ZF;}t^3^`z} zn>7CzYgepLlq#*n^gW^vD?&o+CfYN1E-sz{uGq~qH-MUWY$Cpg0O=jC}5;fIVaSZCG=3evUR`LDu}hc=Fw z*i>y%u}2$6N!}Q|K4gqj(ET1T8j<`7mX*R(^d*kOjOUSh`-;L2OJ39W9uF1y_(U_l zGb4Gy*zhtz?q%+6UmFGSGh@D_;>-AI`HgGS;njHJKHp_e#Xt}0z5*Px0h2zfJFJC) zgq$_^8-AZ@uX{FMNiwr4hR_MrY8s2!#j{7k_vm#G?^YofdY9a7#$+(^k#u+&1mWK% z6D6pAYT#*N8f*S~+f%N+xl>6dO9*>XbNT!#{!to~`^r-j|(Oi5_aQY`w@AYPZtl%wbqUY^8~E$5-t5sT(D99$?~y1N7Hu7bpT01_4255F*z}gWG)>lOTM4(WVB_i@Hb@?($sqryYh<9meOuVw0zsl z_!OPCCB{Cx)?vMzeAdoNsC06gwHA5ue(RN^aa`qvS|3=YGWrT%pl*qb@D@Hi3&7$6 z6xt)eG|Sq!uFpcII!JxISUNA8J}FDs5GfL$YTzBaX(>Wc=-0&i<`cC(RiO|x$WaFw zh#oX;4_H5l`Az)G$j>@+9AeS9MIMpxbkvn!d_&P!?rp04d|YwYHW1}%VsDzKiv+pZ zwem|@u?F`S#WNW))b^&(G?i81$b5QnylR4;)qs@v+U*mENNkNk2r(uL#;~JdO0z zmaa1$6p|)dE0Kg2kj86?N0KD8$zc>CNme`}yHTLAKP|e#8t8$zfq(I)c-K@0!gr+- zF5;6#b+N)zGd(J8BrR-fz2TLv_#LCcGr~(#_5dtWGuns1p4{sbDHf099Ke~-EhmAR zV*KINz6ba>eft|ff5NDArVh+Iz+s#-)*2$O412Fbem6L#;X@^?C`hxMt8q5sjv1OE zTWGA8yp~1ehkLU6%wMxZ3Yu&8T1+(vrTuM@taD&497IU88B9o^kc9>k!;B2*r$a=c z00kNdx50U|r4mj^fOe8B!Y~6y=W#NL{{na`UaE9AmV+of!skvMArgPi{n!9b0ZOP{ z7zSu(*>l__i^9P!9dK=qRPAmjXlLa`|Bslr$L*R@^)x7{SjaF4<40!k)!af%W=vEU45q1fS4Yitrg>VAk3@&V?H4hnX6 zBQL*1G-pp&mqxBJ=&n#d*lw3&p=6W6E^R6;bF6K7{KvqLP_~^q zo<*3$h^{0r2g!K2T`1wu6G>tNJb4aWOEA!OiJ=Vm*c=)N$~s(#Z84OJDFv+so~arv z6bFmIM;>TM04@Gd_z33Sdq+U!fSR0vTKNEnD0vWAY8(=b0UO6Clj|`Y=&;=jLJ}r& z1`O*1V7q~XBlCUIBw)Lf)c%}Ikbp2{5N8!QgGwRL;iO>>r>d8R($R-*M-Sr;S15ou z3xhe$B4*5;L^7`W-dv>c^PhtiorFC?>G?tVJw5ECRC_$#e(&#idN@CC&>?KE>7Px< zZhd$-{qFbtbc9I>942ZX#a24Y6RQ7Uf%5p8tJgO&%zhiSFjFa)&8uV^tOV&@d?n($ z25GUvt9Lzcwub%!RUgBIHVUnfXGCc6auC#K#KXCfR-1y?UC-Mgvv5AyC+P&;RCK`} zwe@GiNkBs@2E&=uhM=B2Mnxnf-orlzWDvs-FptDU~fK-mnDRqFP)`Gz-k#dMIS;$E` z-O!7V%9Z?b7}Pb2Dd{duB`Z521&RlMAbC0PQ3J)N_b2fcw@#f!qFZB8Ay?mGms)~h zwyLKIfSVJL!L87{f+2oZum#H+@&$7w;w+CMzSQgw?{*sk<~i(%DT{dfHqwwH)7dMF zjQTXvk--BrR)R#NUOg=6py5 z=0!PPcau~>SLB8{tmAdT@$ZE1&$i>QsQYwVs8b>=yu+#KmOTvKATN-`*{f$UoF^OF zqrbKrXFxt9T2gLrn$x55TujwvmdD#lqT;e0zV?uDbw>zsi*n5dTz45)wi=i4W}yv) z7Au1-EV@V$b7#oGnjlZnAn9>tZ5lvx_qCb_dq%mARrB7ntavZB~yT6T!^op`Q zQv{V~dEF%LnP<36c!?f;baL@_seX&*@*s6$mH+UGygy;Xe>-(}mH+;!pkH|*``9Dm z@nBbOCEISb{;~AC)a_vptNPiShXZ}QJ{2E>`Ro3*jz_n=zTz)R(TzTNV%D)PEjZbh zw5(XzpMtR>v8@>TM5?uXDtC>vTiO(yf$i~XkyH(SlW1+v&QtpKtKI8fNor6R5nT<3 z&mG#^N3?c^7q5D(-}=zUe${&S@_#o&{Cv~%UP@L@iQI$sT>5MW!jUF)l%4tpj&J7l z1fpUu6Q}JOaqy^6n||aPYv+ouGP&p;YiG1oE5dz3J8_$LPH)Lk%Tij~($}@p&e6+| zfVZMh`Ra+3V`lR+t}=W39FWg=P|98Hc~rmb;7;fgogg&S+5UEDT9X@caE*^~y3(A= z-_e26 zMw3!{snUuvQFZ7kOMdw`#XhB{u4!G3Ws{BNlXjNW&r4f2^r2TDDf|4*PW*VzPZ+}} z?eT7B(O0}ow8ubr5C6l0@emi=nX;;YVxv%yGo_ln8V_vwN%`DA@2nA2r%fks6=NCqe=aZ zWlbI_S;iwYiVfFU3xS>RerAvRmR4Hz-o3eNv+*#9>@M^%G1`sm2vpt3h#2^AUkRYoM$^fE zfs_E?Ee$>Vo;wi4P5$Nc9uW{mj6dRB5I8TIn2D4Mok~PR^yaaPkIn0ihv%d0^Xl4a zyPjY6`zOY5dn^O0BgpHw`7&8=kcDQoha)%;KJ(6=)^yhE1mU!H!srGi3peF5T@52d z!%v@0ME08BzP?I%!ikaXhFl`osg~`=v}VWIeQZz7T&SJqyV_glqT)CR`Q#a(0ywQ@ z=7E1S%Uz@UmJ#x%Zqk1C{%W(ie7$|4jRSx;59h$oMD@+cMSg!~o8h%)qeUVc$2DO>+b|w})8(2J3-Q8jjh+p?mfuANg#uYY<<|cTjb#15ic-M*#|zzW z3i!AB$5abY1}Fe(0qOuIh3cA#8`=^2m{1rTYKYH^3!t9^uXK-(L0)3)wh6%6{go^~ zIkW%aKFG!d_$O(oAuIFyYox5Hi=m03i{ZbFTzU9C91M-EO<3&a|-3dF+pPNu{x3``6_dSd_!GchX@12a1lI}-;T zF%#g07ASdTbF-(kq?LzpYz~9OdF?BX}vUG56B!_eVH zbikiQf62`y>||=_V(+A4Z*RjV=45DVDr;!{A~x_vY6c(=F7GPD2TsK{sN;9z5EZ1_W;esn1hIiT~Ge4gdYu2Hg)`S>c1NDyUafx0uJ_0zhU*CB>8heze)0M zwf-;1iLC5D`~ELSimmORyZ%MpWo5ZU?2TQ2=!T>SpX&>_7m$A*+!oy$K7ap^*s_Cx@xwi*EcM_x_|Jzh9HIb9OPbGdBIBTxM1#V@@^`W_lJA zW+QqQCKF?NPIgu!dS*j5CZHLo2{St<#~eUH1EW1{3g)*)Ndk$MPfd->qdZ)mAI~nEqC) z0D#JXS{4NWOM}YGG}wKVafDV8z%qF=DnkPdA` z-o)2Ly-l+!tshRB)l8;!eYlEa__A7EuTHMuMAt7>U~2KC!cf;#M$IA|oru92QfCUN zG~e@`P6(&Sj1RKTINz5SD&X!X?AUs5@VYJlwfuuO($4zAOr!byqV^tYMCIkhIU&M# zzBk&4m4d+Rj+eFPr(?;%ze49HN8;c8X%!5eY@K;{UbfG%akaH`CT3++a56QqG=7;& z5(Am|_!$2JL`e~C9u_75Gp8sUGdqi@h#0FN8#9xj5W6q{D9rTookdibkB5y#NK{Nn zL{u2S%*x5cBFg!a;Uyv-ho}&MNk~vkj8B(Q($37Dn2GbZ9ew`j5+)$)OXxj&mzVV4 zcJ^UZw)8T6`SK42MnlQej8R=1NX$$O)cqG3VzFvu1OiGa1~n9oAcKGqg)UGq42$w5 z0SxN`Iw6lZ{ySm#He@7oh`&TAB?`?0Cf$2DwL3z(RRq#IM*oq3a6ixl1IKo0iA{dg z%?Ina;X63?Fhv10$p@-#pY{(DyPw2Ud-_7ipP*#Q0whLvU>wZ=U&aF!kXfJU!!fks zu$rLa=#B)*TfsHv$g9%{t4~A?u6+SC&%G3hBZb5t?|5p#G0r=TOg1lr +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#define RTE_LOGTYPE_IPv4_MULTICAST RTE_LOGTYPE_USER1 + +#define MAX_PORTS 16 + +#define MCAST_CLONE_PORTS 2 +#define MCAST_CLONE_SEGS 2 + +#define PKT_MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define NB_PKT_MBUF 8192 + +#define HDR_MBUF_SIZE (sizeof(struct rte_mbuf) + 2 * RTE_PKTMBUF_HEADROOM) +#define NB_HDR_MBUF (NB_PKT_MBUF * MAX_PORTS) + +#define CLONE_MBUF_SIZE (sizeof(struct rte_mbuf)) +#define NB_CLONE_MBUF (NB_PKT_MBUF * MCAST_CLONE_PORTS * MCAST_CLONE_SEGS * 2) + +/* allow max jumbo frame 9.5 KB */ +#define JUMBO_FRAME_MAX_SIZE 0x2600 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + +#define MAX_PKT_BURST 32 +#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */ + +#define SOCKET0 0 + +/* Configure how many packets ahead to prefetch, when reading packets */ +#define PREFETCH_OFFSET 3 + +/* + * Construct Ethernet multicast address from IPv4 multicast address. + * Citing RFC 1112, section 6.4: + * "An IP host group address is mapped to an Ethernet multicast address + * by placing the low-order 23-bits of the IP address into the low-order + * 23 bits of the Ethernet multicast address 01-00-5E-00-00-00 (hex)." + */ +#define ETHER_ADDR_FOR_IPV4_MCAST(x) \ + (rte_cpu_to_be_64(0x01005e000000ULL | ((x) & 0x7fffff)) >> 16) + +/* + * Configurable number of RX/TX ring descriptors + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; +static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + +/* ethernet addresses of ports */ +static struct ether_addr ports_eth_addr[MAX_PORTS]; + +/* mask of enabled ports */ +static uint32_t enabled_port_mask = 0; + +static uint8_t nb_ports = 0; + +static int rx_queue_per_lcore = 1; + +struct mbuf_table { + uint16_t len; + struct rte_mbuf *m_table[MAX_PKT_BURST]; +}; + +#define MAX_RX_QUEUE_PER_LCORE 16 +#define MAX_TX_QUEUE_PER_PORT 16 +struct lcore_queue_conf { + uint64_t tx_tsc; + uint16_t n_rx_queue; + uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id[MAX_PORTS]; + struct mbuf_table tx_mbufs[MAX_PORTS]; +} __rte_cache_aligned; +static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; + +static const struct rte_eth_conf port_conf = { + .rxmode = { + .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE, + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 0, /**< IP checksum offload disabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 1, /**< Jumbo Frame Support enabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + }, + .txmode = { + }, +}; + +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +static struct rte_mempool *packet_pool, *header_pool, *clone_pool; + + +/* Multicast */ +static struct rte_fbk_hash_params mcast_hash_params = { + .name = "MCAST_HASH", + .entries = 1024, + .entries_per_bucket = 4, + .socket_id = SOCKET0, + .hash_func = NULL, + .init_val = 0, +}; + +struct rte_fbk_hash_table *mcast_hash = NULL; + +struct mcast_group_params { + uint32_t ip; + uint16_t port_mask; +}; + +static struct mcast_group_params mcast_group_table[] = { + {IPv4(224,0,0,101), 0x1}, + {IPv4(224,0,0,102), 0x2}, + {IPv4(224,0,0,103), 0x3}, + {IPv4(224,0,0,104), 0x4}, + {IPv4(224,0,0,105), 0x5}, + {IPv4(224,0,0,106), 0x6}, + {IPv4(224,0,0,107), 0x7}, + {IPv4(224,0,0,108), 0x8}, + {IPv4(224,0,0,109), 0x9}, + {IPv4(224,0,0,110), 0xA}, + {IPv4(224,0,0,111), 0xB}, + {IPv4(224,0,0,112), 0xC}, + {IPv4(224,0,0,113), 0xD}, + {IPv4(224,0,0,114), 0xE}, + {IPv4(224,0,0,115), 0xF}, +}; + +#define N_MCAST_GROUPS \ + (sizeof (mcast_group_table) / sizeof (mcast_group_table[0])) + + +/* Send burst of packets on an output interface */ +static void +send_burst(struct lcore_queue_conf *qconf, uint8_t port) +{ + struct rte_mbuf **m_table; + uint16_t n, queueid; + int ret; + + queueid = qconf->tx_queue_id[port]; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + n = qconf->tx_mbufs[port].len; + + ret = rte_eth_tx_burst(port, queueid, m_table, n); + while (unlikely (ret < n)) { + rte_pktmbuf_free(m_table[ret]); + ret++; + } + + qconf->tx_mbufs[port].len = 0; +} + +/* Get number of bits set. */ +static inline uint32_t +bitcnt(uint32_t v) +{ + uint32_t n; + + for (n = 0; v != 0; v &= v - 1, n++) + ; + + return (n); +} + +/** + * Create the output multicast packet based on the given input packet. + * There are two approaches for creating outgoing packet, though both + * are based on data zero-copy idea, they differ in few details: + * First one creates a clone of the input packet, e.g - walk though all + * segments of the input packet, and for each of them create a new packet + * mbuf and attach that new mbuf to the segment (refer to rte_pktmbuf_clone() + * for more details). Then new mbuf is allocated for the packet header + * and is prepended to the 'clone' mbuf. + * Second approach doesn't make a clone, it just increment refcnt for all + * input packet segments. Then it allocates new mbuf for the packet header + * and prepends it to the input packet. + * Basically first approach reuses only input packet's data, but creates + * it's own copy of packet's metadata. Second approach reuses both input's + * packet data and metadata. + * The advantage of first approach - is that each outgoing packet has it's + * own copy of metadata, so we can safely modify data pointer of the + * input packet. That allows us to skip creation if the output packet for + * the last destination port, but instead modify input packet's header inplace, + * e.g: for N destination ports we need to invoke mcast_out_pkt (N-1) times. + * The advantage of second approach - less work for each outgoing packet, + * e.g: we skip "clone" operation completely. Though it comes with a price - + * input packet's metadata has to be intact. So for N destination ports we + * need to invoke mcast_out_pkt N times. + * So for small number of outgoing ports (and segments in the input packet) + * first approach will be faster. + * As number of outgoing ports (and/or input segments) will grow, + * second way will become more preferable. + * + * @param pkt + * Input packet mbuf. + * @param use_clone + * Control which of the two approaches described above should be used: + * - 0 - use second approach: + * Don't "clone" input packet. + * Prepend new header directly to the input packet + * - 1 - use first approach: + * Make a "clone" of input packet first. + * Prepend new header to the clone of the input packet + * @return + * - The pointer to the new outgoing packet. + * - NULL if operation failed. + */ +static inline struct rte_mbuf * +mcast_out_pkt(struct rte_mbuf *pkt, int use_clone) +{ + struct rte_mbuf *hdr; + + /* Create new mbuf for the header. */ + if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL)) + return (NULL); + + /* If requested, then make a new clone packet. */ + if (use_clone != 0 && + unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) { + rte_pktmbuf_free(hdr); + return (NULL); + } + + /* prepend new header */ + hdr->pkt.next = pkt; + + + /* update header's fields */ + hdr->pkt.pkt_len = (uint16_t)(hdr->pkt.data_len + pkt->pkt.pkt_len); + hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1); + + /* copy metadata from source packet*/ + hdr->pkt.in_port = pkt->pkt.in_port; + hdr->pkt.vlan_tci = pkt->pkt.vlan_tci; + hdr->pkt.l2_len = pkt->pkt.l2_len; + hdr->pkt.l3_len = pkt->pkt.l3_len; + hdr->pkt.hash = pkt->pkt.hash; + + hdr->ol_flags = pkt->ol_flags; + + __rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1); + return (hdr); +} + +/* + * Write new Ethernet header to the outgoing packet, + * and put it into the outgoing queue for the given port. + */ +static inline void +mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr, + struct lcore_queue_conf *qconf, uint8_t port) +{ + struct ether_hdr *ethdr; + uint16_t len; + + /* Construct Ethernet header. */ + ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr)); + RTE_MBUF_ASSERT(ethdr != NULL); + + ether_addr_copy(dest_addr, ðdr->d_addr); + ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr); + ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4); + + /* Put new packet into the output queue */ + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = pkt; + qconf->tx_mbufs[port].len = ++len; + + /* Transmit packets */ + if (unlikely(MAX_PKT_BURST == len)) + send_burst(qconf, port); +} + +/* Multicast forward of the input packet */ +static inline void +mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf) +{ + struct rte_mbuf *mc; + struct ipv4_hdr *iphdr; + uint32_t dest_addr, port_mask, port_num, use_clone; + int32_t hash; + uint8_t port; + union { + uint64_t as_int; + struct ether_addr as_addr; + } dst_eth_addr; + + /* Remove the Ethernet header from the input packet */ + iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr)); + RTE_MBUF_ASSERT(iphdr != NULL); + + dest_addr = rte_be_to_cpu_32(iphdr->dst_addr); + + /* + * Check that it is a valid multicast address and + * we have some active ports assigned to it. + */ + if(!IS_IPV4_MCAST(dest_addr) || + (hash = rte_fbk_hash_lookup(mcast_hash, dest_addr)) <= 0 || + (port_mask = hash & enabled_port_mask) == 0) { + rte_pktmbuf_free(m); + return; + } + + /* Calculate number of destination ports. */ + port_num = bitcnt(port_mask); + + /* Should we use rte_pktmbuf_clone() or not. */ + use_clone = (port_num <= MCAST_CLONE_PORTS && + m->pkt.nb_segs <= MCAST_CLONE_SEGS); + + /* Mark all packet's segments as referenced port_num times */ + if (use_clone == 0) + rte_pktmbuf_refcnt_update(m, (uint16_t)port_num); + + /* construct destination ethernet address */ + dst_eth_addr.as_int = ETHER_ADDR_FOR_IPV4_MCAST(dest_addr); + + for (port = 0; use_clone != port_mask; port_mask >>= 1, port++) { + + /* Prepare output packet and send it out. */ + if ((port_mask & 1) != 0) { + if (likely ((mc = mcast_out_pkt(m, use_clone)) != NULL)) + mcast_send_pkt(mc, &dst_eth_addr.as_addr, + qconf, port); + else if (use_clone == 0) + rte_pktmbuf_free(m); + } + } + + /* + * If we making clone packets, then, for the last destination port, + * we can overwrite input packet's metadata. + */ + if (use_clone != 0) + mcast_send_pkt(m, &dst_eth_addr.as_addr, qconf, port); + else + rte_pktmbuf_free(m); +} + +/* Send burst of outgoing packet, if timeout expires. */ +static inline void +send_timeout_burst(struct lcore_queue_conf *qconf) +{ + uint64_t cur_tsc; + uint8_t portid; + + cur_tsc = rte_rdtsc(); + if (likely (cur_tsc < qconf->tx_tsc + BURST_TX_DRAIN)) + return; + + for (portid = 0; portid < MAX_PORTS; portid++) { + if (qconf->tx_mbufs[portid].len != 0) + send_burst(qconf, portid); + } + qconf->tx_tsc = cur_tsc; +} + +/* main processing loop */ +static __attribute__((noreturn)) int +main_loop(__rte_unused void *dummy) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + uint32_t lcore_id; + int i, j, nb_rx; + uint8_t portid; + struct lcore_queue_conf *qconf; + + lcore_id = rte_lcore_id(); + qconf = &lcore_queue_conf[lcore_id]; + + + if (qconf->n_rx_queue == 0) { + RTE_LOG(INFO, IPv4_MULTICAST, "lcore %u has nothing to do\n", + lcore_id); + while(1); + } + + RTE_LOG(INFO, IPv4_MULTICAST, "entering main loop on lcore %u\n", + lcore_id); + + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + RTE_LOG(INFO, IPv4_MULTICAST, " -- lcoreid=%u portid=%d\n", + lcore_id, (int) portid); + } + + while (1) { + + /* + * Read packet from RX queues + */ + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst, + MAX_PKT_BURST); + + /* Prefetch first packets */ + for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) { + rte_prefetch0(rte_pktmbuf_mtod( + pkts_burst[j], void *)); + } + + /* Prefetch and forward already prefetched packets */ + for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) { + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[ + j + PREFETCH_OFFSET], void *)); + mcast_forward(pkts_burst[j], qconf); + } + + /* Forward remaining prefetched packets */ + for (; j < nb_rx; j++) { + mcast_forward(pkts_burst[j], qconf); + } + } + + /* Send out packets from TX queues */ + send_timeout_burst(qconf); + } +} + +/* display usage */ +static void +print_usage(const char *prgname) +{ + printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " -q NQ: number of queue (=ports) per lcore (default is 1)\n", + prgname); +} + +static uint32_t +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return 0; + + return ((uint32_t)pm); +} + +static int +parse_nqueue(const char *q_arg) +{ + char *end = NULL; + unsigned long n; + + /* parse numerical string */ + errno = 0; + n = strtoul(q_arg, &end, 0); + if (errno != 0 || end == NULL || *end != '\0' || + n == 0 || n >= MAX_RX_QUEUE_PER_LCORE) + return (-1); + + return (n); +} + +/* Parse the argument given in the command line of the application */ +static int +parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:q:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* portmask */ + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask == 0) { + printf("invalid portmask\n"); + print_usage(prgname); + return -1; + } + break; + + /* nqueue */ + case 'q': + rx_queue_per_lcore = parse_nqueue(optarg); + if (rx_queue_per_lcore < 0) { + printf("invalid queue number\n"); + print_usage(prgname); + return -1; + } + break; + + default: + print_usage(prgname); + return -1; + } + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +static void +print_ethaddr(const char *name, struct ether_addr *eth_addr) +{ + printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name, + eth_addr->addr_bytes[0], + eth_addr->addr_bytes[1], + eth_addr->addr_bytes[2], + eth_addr->addr_bytes[3], + eth_addr->addr_bytes[4], + eth_addr->addr_bytes[5]); +} + +static int +init_mcast_hash(void) +{ + uint32_t i; + + mcast_hash = rte_fbk_hash_create(&mcast_hash_params); + if (mcast_hash == NULL){ + return -1; + } + + for (i = 0; i < N_MCAST_GROUPS; i ++){ + if (rte_fbk_hash_add_key(mcast_hash, + mcast_group_table[i].ip, + mcast_group_table[i].port_mask) < 0) { + return -1; + } + } + + return 0; +} + +int +MAIN(int argc, char **argv) +{ + struct lcore_queue_conf *qconf; + struct rte_eth_link link; + int ret; + uint16_t queueid; + unsigned lcore_id = 0, rx_lcore_id = 0;; + uint32_t n_tx_queue, nb_lcores; + uint8_t portid; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid IPV4_MULTICAST parameters\n"); + + /* create the mbuf pools */ + packet_pool = rte_mempool_create("packet_pool", NB_PKT_MBUF, + PKT_MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, + SOCKET0, 0); + + if (packet_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot init packet mbuf pool\n"); + + header_pool = rte_mempool_create("header_pool", NB_HDR_MBUF, + HDR_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL, + SOCKET0, 0); + + if (header_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot init header mbuf pool\n"); + + clone_pool = rte_mempool_create("clone_pool", NB_CLONE_MBUF, + CLONE_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL, + SOCKET0, 0); + + if (clone_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot init clone mbuf pool\n"); + + /* init driver */ +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n"); +#endif + + if (rte_eal_pci_probe() < 0) + rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); + + nb_ports = rte_eth_dev_count(); + if (nb_ports == 0) + rte_exit(EXIT_FAILURE, "No physical ports!\n"); + if (nb_ports > MAX_PORTS) + nb_ports = MAX_PORTS; + + nb_lcores = rte_lcore_count(); + + /* initialize all ports */ + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("Skipping disabled port %d\n", portid); + continue; + } + + qconf = &lcore_queue_conf[rx_lcore_id]; + + /* get the lcore_id for this port */ + while (rte_lcore_is_enabled(rx_lcore_id) == 0 || + qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) { + + rx_lcore_id ++; + qconf = &lcore_queue_conf[rx_lcore_id]; + + if (rx_lcore_id >= RTE_MAX_LCORE) + rte_exit(EXIT_FAILURE, "Not enough cores\n"); + } + qconf->rx_queue_list[qconf->n_rx_queue] = portid; + qconf->n_rx_queue++; + + /* init port */ + printf("Initializing port %d on lcore %u... ", portid, + rx_lcore_id); + fflush(stdout); + + n_tx_queue = nb_lcores; + if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) + n_tx_queue = MAX_TX_QUEUE_PER_PORT; + ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue, + &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", + ret, portid); + + rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); + print_ethaddr(" Address:", &ports_eth_addr[portid]); + printf(", "); + + /* init one RX queue */ + queueid = 0; + printf("rxq=%hu ", queueid); + fflush(stdout); + ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, + SOCKET0, &rx_conf, + packet_pool); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n", + ret, portid); + + /* init one TX queue per couple (lcore,port) */ + queueid = 0; + + RTE_LCORE_FOREACH(lcore_id) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + printf("txq=%u,%hu ", lcore_id, queueid); + fflush(stdout); + ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, + SOCKET0, &tx_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " + "port=%d\n", ret, portid); + + qconf = &lcore_queue_conf[lcore_id]; + qconf->tx_queue_id[portid] = queueid; + queueid++; + } + + /* Start device */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n", + ret, portid); + + printf("done: "); + + /* get link status */ + rte_eth_link_get(portid, &link); + if (link.link_status) { + printf(" Link Up - speed %u Mbps - %s\n", + (uint32_t) link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + rte_eth_promiscuous_enable(portid); + rte_eth_allmulticast_enable(portid); + } else { + printf(" Link Down\n"); + } + } + + + /* initialize the multicast hash */ + int retval = init_mcast_hash(); + if (retval != 0) + rte_exit(EXIT_FAILURE, "Cannot build the multicast hash\n"); + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/examples/ipv4_multicast/main.h b/examples/ipv4_multicast/main.h new file mode 100644 index 0000000000..740cf4cffa --- /dev/null +++ b/examples/ipv4_multicast/main.h @@ -0,0 +1,48 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/l2fwd-vf/496039_L2Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf b/examples/l2fwd-vf/496039_L2Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf new file mode 100644 index 0000000000000000000000000000000000000000..750d025970bc4c75050d15f306446edaca772b22 GIT binary patch literal 70953 zcmc$_Q|=J3~uYZf-hJ3u|W+M>x{dB^k-xMyPDh)K=3>5Q zlLkTyM(ichLkl97vWUYr8Dv+pMtO7-K3O?>$} zplqxmjna?2_~KIsdFrzLT9gB#)yz>OGjs4pmzz8#_LP-OjU3N4=n2=yiSwugiO!!= z7G8Cc%nO>!^i&LW*T}|&jdEy@QZv^KB~vX3Q4$4CwUl1?{4#Oi#K3uDm5$Jdt5HLE zyo&y~e_-`zB&sHd?=;~ca*n$<^zMVn3XJm1_^cQSaz#QzypE_e+)K+7V(F5VihN1% z=8@H5$bQ#=hAnx9gTLsS%3Vx;1VIg!u+Wb(RGZ*>WLx?88uBnN_1)(q+f?&Ij+{+| zF|iiR)<)O6I3Ar#IwLwsjelc)^I+jnpFR$lFO1$l&Tg+ogc(`Pw8Xq9eQi*2Jcp4X<=7yUh(9q&SZtF^=Ue{aRC z9oSTM*V%&trRU1j?&>d+QHlnESnyy;y6~3S!5M-yW$o74TE82(t6>Ne#4ekxkUyjx zEk28c+>9(tAykbrEFRtxYVO`nk~4WiF9UNawygn@7xDvBsXF_-3EsrFz9N2 zGSDrBAqC>lLwaISa$!&2Z$NC*$XoiNeQ`J(XIz*EJ0l1kQ}G`(m(H;6EgFi37+#LN z&&i_zfh<43zIfCFE&-Y;j-Z2B2xQMuLbw@*0pY2=+o**x`t=jhT$#2a+(C4Rckg$% z3tB*r0amS3vDT+O7GW=^bu7n2u)MKt7%zm`yV=h23Gz#nIlTyanA{Ph^J^WCV^G{b z-Q5FZr%vR*$&Ye&Lb{OsE#8FI)L|b!Ck&~|ms$}l+4eviZV;;T1?*ijk9=X<;>WVsiV?n5 z#Np{i9p_2*fnor>jf6zQLYdea|1ZD(T>XowOe`G#fU~UsrYs{9)4wSDcZ@ZT+m;;S zM+ng~xy}@^CW|mnQG|#=T@Wnz@_#gy2E*OD44xh^rU5OX$JzR&DkBN(EPWlF1U z`7MSj!pk*e9mwu*HrkA7*;-sJvr8dJ>VtA4HuhA`zeZYzgvIm7b+Y&)B3BOCPag%@ znj17U^5wF{ibL2v9^&LSeMs_(e^vYy0FwWf2o3!H-iI5w&48|Sf84s_id$&iMfY6k zb1M${8q8vi2CWYW1^`-Kcj~WjWTpQHI5IH(8%Pe0KR7b}9gd8w^s4mo^sMv>^sH+2 z4GV6wbTd>6^~*1_(iE{^kl5rPBj;htXvOE?B&B6&o9hOYjUW_?^dL$4uu#Vch`Jj+jAX%o83DsTiGcCn5im0R*AS>O+lP|a+fDC3` zf~37Fox#FpOf_*~h^qG!xBUYBYZj1q^cA>QdJ_X#Q;M&#_HXDo{)a_ue>eNZs++c(>4djzG2CST1iAtf%(i>voP=++(Mw*jI%(w^a%@8CPZkD&yItE z{-X!di3s%skU0pA1W`oK$FQ@ZmzpUxWc$IO7*FXe&@l^;hjkv|5AULfr?b8I8YV2~ zrA@x?soL+AD?y!&r|@BBVne&|7ao&ye^lizE=BqEQVl063BPLkB;)sU5kj`Xpy3#(3P`4>!u*;^xX|{AliZ zecWPwR3E0qc3Hc2!`5gU|GBa*%Xms?Y2}fR7`aWswiV2 zXCFeNsnp7FRm6-Un7szWyN7XD9+~UYhl&PpNf+GG)h-C`(ker~&x~Zwp;E`v59XOm zt<2I7!QOXLqjq3rwk-c_O^O~0LE5G7vNP=zgumGzD5SZzmoY4l7YZs|MvO9)qbOrnaq5{`f>%u$DOQt_nqe1Jor}wW z=(gVvM%`Uy9!dKJ7%tSI1MFFv-+d+WNwQ_)+iuE>TJ#WxR4?*H?*K}S6nLi+RbJY0 zZOpbaA&Vqv>(aZ>DV`m{yfaT$5JYM+NU|~$Y@K~PklK`NSZAa9Jeyi9+qT{5p+^3y zxg(76#=h0Jjk8(49hrSH8RW+FZm!ekNy1R~=90npgDc{LkZVBJ73g>LWxJ32TG9#Y z$x=<@!w%m1@&c_jYMH-NgF?=0CNtq&YVpJon1vx?Lr}PwGlOI&N(V%0x57hVx1U3S zOfBV;qfAc8vC0@`9ll1W%Z!vrHM98P^kCo?Vs3u*31CBI+7x^&}9 zB9et=@#5D+5Rt?}k%Hf>a4nBzG|~s8RYs|#i2KE|eWJ6AGdb>kJs(?jnPq2we|7-Or>NJzOae^*APF$A{U-~s(6jw(4`BYgG*F#7V7o~V6RH<7 zc14twC@#Tops)2)+m+mMHxQ?DI?O;wyBKS}=ks)Ua!zJeSsdA-H=0+CdYN=`#wwG zrn*r1@9|!{+)$sLP-8`946GWCLc$pm-8om8mkX<}Pex|FUVLBX&*nFK_UH~9JAsQ1 zNCy_zft>xzhFEKg#YW3Cm>K;%OUc$o5G}KEh^uUgM%s*Y*s+y)P&dQ8<#OP#s>}$e zpNg<-v~;*XSTUle$H{)DEUbbeUs_&bvJJ@vE+-(zlDhU~zx>!>1dB2Ei z#=)unX=IDCs^H^QB-~+X7byaXZIv9ZrW3ed2okcH0ugKr_$)yyng5{{A$K@;x7Ndb z6K3&1xp(cw_Rq-W zQ66wUyfGiPBbx{J@A=0et(}%D{69NP(tgnHUoI}@f3mn3{=LI6aIpM0eL*eL1vSWz z@R35(GO}Wgq2DqngGK954r*06 zpyRz*t(<|P*jbW(Na3n%6}zErHXF#mIfbrYGm&gV-#z>2A>*iIal~FsD_nWkPE{XiXW9J?MGx?dD>(R<5V8D!f{2lx@xMYOD5AzWj zXRlFr#kwiEBFQg7i&JTBs{m)S1`K3UwJiSYg*`r~-)6WN~+>1n@I18!F%H!$v=YGoFiXeE=zlYiB$(gXjtbXhS`WLh@XeqHK{oz{Qo ztH-@o?FK~bGps_=6_RzrR9eN88^h}I{z*8h_h&KrmJ6}|B~q;a04bLLtR)8aKS2Gh z=BJM4g7YVU;jdf7zExTtt}k!LGGKxmt;b~87&z2|*|gfVaskHKNOidjZ|iF1Dc)bm zHCg=nh#9$|6&N!-zDE*g(=HF^7SKyr*%F*C3&0` zLZpERz9BC&+j1A3^IKtYrY0vTK5FcBLFRzyw;E*t|&D zO@rq{8JC9TzqfQ;iA8pZGsqFblc791u=1we-$-L z{0Q4}>tmvUrelj8QiK8HkZv*=*?m`wp2u38p=myay<%Sjn$O6mii46}$sl|rm8pIM-bOxlDCvHFz z0r!luiAuo|71qc)C#!*Jya@ zTE$_JbR?4ah)0q~A4ZZ2_X^lD3r~|s;jxoyQiC4IlRw{L8+n;>uE|V`FPMw@ZHYSg z41o-l(Kint!s;>QSV(+9p091`1uS#vV&tw)!uN`P6tTv$nUbcek&9KI-w?#n3IY!S;=3s7x&5&UtSauC}#*H^kqd_M%XfC4yu^Fp?5nmAU z`P631IRg(Rva~mRUk&f7sT=Ct%ffYxbO1xa4*;AkDzEJ^FTY6!gm~8>+|&Gy-i7a% zeWj62W%RAngU|1(e6HRM!h2ev$-DZVpHMm7$9oKzwM~Djz6@PYN>I@zyfN5@slUFN zZaaXU^%_cXi!-~sdHrDPKd;;X-VVJvx}Y#q;B`(wkS%O3a$jDd&4%Y>@q#37Gs(HW zt~_pW2UTD05^5C{J!mxnVx?irDF(F8v4_CccC^WT|ts)!Yqu5c3BF0NqhZ z`Aeey6WfOMKgy7i<6pApZ)sYcBp@|Rj}Y=b`4MEwm>~f)6?m-?KeE{vRw&TTETtvz z-P3@~t(=VG=s6+Z`{JGxAE6Hb$P)m}2!Y3rkPjFEB#tnN2wLdk-!PrgugMRC1{b!U zT*e}E;Yc_P#0gwbBdMaJQcJ1gvvOPWbD#NS)90%_-7y8|Zb3IJ?D3xUE0Z_Y>5kfh z?a^LV$~P}>3CQ?@r0y`YA4V>(O_|qEX3PlEi%q6O*hLxF%5@FN4RSB@qXn`|g#Bs& zG^HI98}%x=MR8SB1KOmdUQwN*8bvjdTqK!D60sy)QJA6-<*v)LU5g3n%j$TH{*ftK zCbY9v2dBrv$@vqf!eG05^HbT!!w1M-3ShmDZHdm`?tDv;Z>$gfx743GpyIRs{L4A` zC;kunA5H$h;^U9@{2w2cp8oIBWpjpJ+%_}9NS`F$OK&mW<{_VANHAIe0<@+IHvlSp zl_MzCWZ>H1$q8G>L{CH4grEXqa}v|@E6$wprMB?k9#{f<@VFjlcpM-xtghAQxX}27 z5lS>Ah6_Xp-!NCSd8ocI(+5H&6S01<;Q)OiW~9Q5xRCSCs4)ynu|5jZIRZ^LNLax< zRA7PwD4*~Rl4w7~e6Z~!WF@z-o&iQUPWKFvV>oNnc>NdwV-ze>1dfaaiDUCw!uUcd zF&CvG36h4*lelrq3UNY`5gSoLQjP3mdg2J7W8;~0_%QcXra}5ver!xK35GQ4);Sa)Vty)7KqMB0M5ID`?xH_d zRtiHQEG@?9j}?KSdQ~z<{#s+AFzIaRBl6n`;aFA3jPcU}f<{iJ@aob)$jWLX%*6^# zav+%_C8&fX(l!JRR99fxOq!06@%2{}BD2*5!J~t{AQBo??2z#g8pKIKXkWr$RZA7h z1S$&@%LJC0TOmi*oEgd4=Xz0?&7n$+mC<5C4M=F}q!NYl5#!9_RYDJEkHvUzdLjBX z@tg3=jC0(CAhzIf0UMYEV8v>3WR)lZ<4BmG@nsOO1d08<_4(mgBLt0!GIS!UOx<4Z zyOmG5xVkG)=b@@ISnki9%Nu;Y-@%A%FMZp6Ufw=W9XPeUw?*ufP3=Q*biEz#SMGy6 zEOW0_b3CRT8Guc>)9$zQS<)lP6CZ*QBYYFxzOV&f`ZoHopo1f8{0%@Bz!fo)G{*Z6 zSd)OM4b=q)*2*>n@G34e!;u^Hb#Qlo227+s!-yfvrBz69?fQcqpd>5_D?OR11yE%;*5^GHh3TkmU#8cE&f)Y z3g-9{mK7Fd-2Eo2{Oo|@0Ltb@J%MLBd2@n>h=zdfm_;VxXqC&IDhgwP03>T#A2w8hytfy<$D``b2vK;D# zAgOFdxU?GTC2?N~^(hc}?KHi~fGnm7zWj3`WC(3#O9-;U@{SRepx%_1sc3)wIm+^W z_7+w}*KjqFf<;SX%_6u;I&7i&i{Jw3fV~9CSVu5BMFPH&mT*>_eX28<9f=gLn4*B1 z7N?kkkU*4$G}+=BTtkxl7AbYOcS2EMA)cDm^tjgPcUKTKXfr>WwiZO86yQvJTSP|) zLJvy3EVL@NbKzG>v{aG-_z6^}|08FH)hP_wj#Ea}DbT&^q_&tDy&SO-IMzi+Dy@F~ zSq1Q!KI6|N&|=IX<{ys6(z4)C1v82OHPrMSgfBEl@XjSIfa%tp%*9)>zr#H zyYw#+tHJ!*R_^Fq(mB9QGrq_B@13aJ9E%pEmR| z%@31Fv&`sk(dgE>gC?Q7j2~bIl~S#91tL{bt!D`=$~LM0e9_tokBecB-n&A}7-QYi zR6in(Yu+>47HL>M+N2`<(wg2>NH5DqW>z+oD2Z0MeXZ z3$kAPb88M=0@p0crMID$PowNPk5l zu2ng#t6+R+R5#A)d5OACf$t@f5YntxOkLbVWI}j_y6|q&`?#C$-g~*bHjn&{O){7E zj*abQ=s9#eoA$fzQ(cUAFei3)7w4O|7ndcbF4ivg#SedMlh4o3$=e&}2G19(@AKh$ zKLOpbyT|L{;%@s;;CSjAg~&?6!r2iDar^bWE-dUlEN{%M4`)p7`}OeZGpoz(rH>EC z_=~q)WPs)J*bW%)7gp#)7+~BNR_cQqVBF8stUd@^F+QHN50ug}WYLILsid+v{_D9W+C;zeqFcrP|Lo%m<~xPm~E4Z_&i@nY3gp-l-)!1qH}12tlOz- zGp$}L>G5$|dDj=`n|^{G4v#_FTP{EnUFX_BQzFOYE{V$>n2GrM9lnu|4Wp5kt3< zW7@pL`XP(#^t4OauH55Ou@+aZWfyb5F27y6c@4a|Q+5q}p(yFayMD{#V3yd!wX_2( zd1aYeAATuVH3M#Ahu2*6yRZct zh1gX3h`zCeAdf*S(XKi*!kx?p>Ym;v={I0jfVWLP^;c_adY*^YQImO;cc^DbFuAIU zJt!qw>M>B5l}>>~+0s6HjzG#Sy`+le^7cN)Hza$6G|sdTK|qj4{_M3r-ZB*$2^;Kt)&ZqPL3Q$ zi`oRjshkVxx%#=|ZN&Amt0Acuik$ko(lCZ+hK9)6g9`c>1*?}$@|O`X!4Wdcb;h{^ zfJI5!6r4C@snZrkYdM1SvU=Gx1x_$PIRml4f6f{Fnybbfdp56pz72^Y0 zqEXh{e0W$Yy9v$s{&@U#!3Kj93cd09@$~j~`0H0Fr7*nn9Rn05P}4KuZFr|hT)7Y0 z@R|rdKwSBXqTU^1eD~8L1wJ7Bjz-0i$q9~;c0L+v@rhkStMs{Ws5)q zPto9Gpo80o$nWpXpYJPi4sYMEy^m{!Sn5=vMd_4OuHr2;OKubTM2lm#SUNieCwTrI zo*|=nJ_xTcd{8xM3-vs%NwB#u)JHRtJJr8Z4B2!B-l@eK@@(@__I$*0=E0ME1o2^?uei5x!z14{iZ9b* z8)PIM9->fDr#Clx_k6j^;-nt67TerjP;sf;$gK`)y!z5#ZlJM#;wly{LO*^DauqBO z*}Z`2q#5X{wb*O*fvld_poxU}q%`rCSUWsc7PspFT%(@!;eDg&130ZPH2o!${*h(= zA0jdQzwMa6;}Ih(i_*VZ@+>O!O#fp^f8QzToNW&`s!+0-Q~`}5 zjf%`v;Er%XCc!ek8tXUPWnTJ>_hGivjZD09ni&r6wjfZ(%crdCW$qlW59Y|VhgWKv zf@()0ZISGO;fEHQf=-92Z!7!z6Sb?9nbss@(hd4mXkDM%MN=%*1<84FZTRmR*&}FP z?{gbywrx)Ds3k@h1x+-KvqGY}%3+%_tHk*DU_G5n&etDcA4HVrgDDP(Djrv*b@%&E zmqfFL%oJN9(R|lL6-6ZAyBMLhe5^7~a_muN- zHX_QSw3WAH$n{H2l#>^kQ?d%2)>KK!Wsc*2vLq0SFeIGJoVA9iNor7Td+3oPc=sNV zFc2)iDN*{_`jkdwjD_r*Rrt#jbx85zvtaPzpuODD)6Uul! z?v{xKijdI}4Xp_%8X#M!Wg=3bls%f`*>Z(cVmS~ezc^!phwjK0ija@IrY?k3>$R|Y z?W{i^;4VXK;8F7^ZJKkTENJ$REop;bKi*dO^UkC$#;QUPM)ox^C@u24_WL;Wd38X+ zINkW47-#^Pg3tWvWGu0_5m@KrcS^yiAmB8q8IyM)dWdbWkuZB)3My%1bpGUXAbK#( zC}6Lb<4P&UKQO4QCCAZ&qh*TOOJ^Ur8PvG%5i#(o8-wMW?vqPpDIu2C%BKeD0NRKo zwG`3h(Zge)-af-j!01%0$}i%oFsdHwpPVy9GB^{ePX!RTXI0#dZrGfQua-&a4_=>Q@2|b0F;^` zSW(}fE2D@|&^ja>%0(-+bh@O**MNkYK!>B)EAxaT}RuIF~26)}ie}g%cOHr(aD-47%;O1af5F%oT8B^779VIqWN*;vN2ZN-D zI8a|!B9?T~Un3y`q#h1H$#Wv)N28*S9E3APVncakSd8=46snh3!5ujhP=ZKcaUiH9 zsMi5-zsfU8hwXTg!cfV8JP!tY%yLOy4I?F@rHB%+24W%PRPEpuI0ylet{rA^@ctiXgFGz@Q@aP;*HV+AHbnL|wr6^3`p}U_kI$@U1tP*Y>)<0aKLFeG$E}Qshu9jF9 zMrY#SzS=oM53r6Jju5yMNF9mT&*n4qtwSqDmz$CY{k1GzeEcF5f9FX?u9Pj_d+KEU=?1 zJFFZk@a92f9ExTwI2^|0_PO%rXXjQ2AC22JP%>FSm&Dde>2C%LQv*De0Y>S-rzWl1 zkVlY#c5#W%*F=*3nwVf&6B6kyz0?_7J`I|0-gO!_tDky)daMSR)~o%V3E;&vfU^rn zPc|%W+ih(tj-2Zus0}tKbD)$2#k#Ai3fTj{QFW+K!)XXx_Z+zFw$eTwSdT) z+1q$Er54u&iAV|<3zJS9|JYoMnAtrQL9f|B&|cAgQyU83woNbg8#(&S$ATUsH2FIR zd#mH_Nncp7)t* zN5RfEY)%fge7?*4#cs^o2>@dIm!5^OiT(FHeoVH2V!fxQz1I!=3ta)1H!pla(VS)W zZaqs=&F2EU@L{$`;PuVBH@rL>_&9orKU$ok@0py)RMSr-#jM;=LP1FZ3-o@sZNFE^ zfFco?w-Z@})W~>a|H$iYHc}bYATBNl#r~lgShoZ7IbA-<+bhccEcxjh-E!%`0&x!b zH+VwMt;r<*)Tw?Rm|lA=9Br#DA(sqX=djG7SMnK+QDX4BA|D8TUX$Bj0qRr;((UyE zlF&QSB877bZ+uR2fY}TbK{Blb!u5?~&q5F;^GaWM7SIy*n=kc47}Ueekeb5Zb**va z0e8OAs+LR8Vfc3eDY0@y8Er$g>fR-ZlYnThevnRTK?N`KicVNFH-nNBDW8YExb3J- zm}e2!+oW4Z*N*h5-i#PKG1A47!wShR(TzBQ8QaOZ;`rl9lZ89Ck%>X5(Vc8q9&gu{ z<=TAZGhGWtalG0BA=#thS^;>OepqX|ca_|?Tq|vnds|FX-X^|~z?T~*h%`Tnu*%yu zduwv6@hdYR{j&tr_X+YF@uuA)(Z5b!I#y~{E2jkPe{;kLmI-uM$3BIo6zOq31%troW3pH-F*oI)_qUtes5^I8>jy<@_SDiN%M^dnoV3cy_@ z(%>@GH6PxYz3t;7JxDxHyY^M848YvV*Q$FAPvJ-y{1MhfD859ziKA?Tx^fFWx^dF& zQ76%30^u~jr^f=JPDTe_vmPqQ=$YXTipHaF!M(P7l#GEP&3$4BC0m+kCUHsBZm-Z(LWPU0&J?12%7 zM#uL&#{3Db^Ywbs59G93XQ+Nwvm zIQcjg`FXwFD-^@q85RQXb9~J2!Md2}o4dfUaX-ow8cr+tQqJZs3mcjX(Ckuq7&f^ zF2Ab~vd+Q0il#De%?&e(voR-li$M8?O=~;S-w<0RS7?dOhngWoa|Qm ze$EV99HG{2fR&$%XF)}(qKf6+`fJZ|-e&5Td0+x)-4c%08&B_GH z%0y8dbA~c-H0h0~tdvvz=nRj?i$Wp1;7sxaR84aT6_4X+dqG`CQuf!u)}#Q%HbJq} zkGwjUwMDTKQwR&ECrB2-)bQN(l1e|aoYg~r~{C3wN~I)a}I-i#c>-jR5Vg5ZgNp+ z*c3>eXbo#Hplz!nkD-}tVn+5}XKh=FC^He#@&v`H@}&-ozir$U6SdYizAt(r#^JVO zthM^C)T=q{;Wa(hMz6=i+aLV-cFmbK+JXp!u&g{P&8N=?k|QAu_}_P#FJpbj&ZAY5 zPsNsGewA*V*X799!81qZs}y?uMo z9!@+cC8*@wybBk7ba?N+N?l`}ADL)xE>3s{FKe!R!w;puSQP&$@?DX3N$^$A`OS|9uE3j^&Iil4 zXc6X}Y`*>dBNtS}U5RCbhIiXti*0X6Dx+xeg*&Mx*>I_MM9k@Wm&k~y7yhvG*K5Fx zfvL4ccMP1s*E})Fj>!@y-&TmY9Xq^+Pc_QGg|KFVTi=@q|Ln|lDP2;_#>#`YmB`De z&Fvx zvxubDrI}?pwjZGY(&72L+p>ooQm>ruIaS>={;$hh`h>A>b?l4VG~ix!)0+<$LE z7(ASbPx|++4xBsF+C#ficI24~7O2`VH-B)aOV>DV&SaIOifnE>c0O%qZ_gTs1@j|8 z5eKkU3J!|ex}c%uP}t3a?ra;zqQ&o<(FFyHR(hu+VHJ1c+RzEH27mN!vvfLCmZd_u z!A?jb^CaMP{0?zp)}+u)7>M`|h^-)&T!K9< z5$tMY?0|r2=MR*GhA(3n8IUO-OY#Yse-+i8S*_~7uMf{);z>zfHpa>+^%I)FUD{(g z^DR34eX7T_*0t4Vv$}2SN6)lIZK~zvpU%Kx$RR*0&1THJ5qR&#^pa`}RN8_3RVFCZ zTT(Y_S`D#!9P4&9dR0m2{#mRl*$llMeyMNfEC@fCx;B4BgI9+~!wvPiX~p z{EQx{^6jccwQx5Cq7ya=-A`zTH7rW0mAN(LzDaZbF!~NygGl`=-OFs7tSR!?BFxd_ z8kv&T_p&|<{frBGa(LJLtxCwOmE_SB`8%OH{8Xg2Pq(aw+f*-`Ft<>N!ir$-sFTyE z*aPi_O0^LCjs{9w8+fiC+EUD7w(}gShEsiWHD|eN5XH7eED}1F$u*kqfe^gWLdvkr z%*6UH_X5JK+W~x7W-&jL@UsLq_~hm?@-CHq%V!miqFUXZUP^hai6a0j6nEuZq5-FJ z&)iDylRcb%8`D{bwrE>bP5~qe2p)25S+-_ev)mB+U&dPc5B~&EGL6x0LUbk_aBOTY znP{{&@p!ak5L-s$G5uJ%7fAS01O9Tvh^>ey7*f47uJE&ops1 z`{8F+CxSpam@)3p$It0_7Dud=Whc9w=;|Baaf>{qR&s0eeH2!q#fQ6+a5#-ktGE-^ z{+eYA3%cptp&=m01B}ku?{ccE-bcwU+;K=8Y6uKfyvrdpuZ%D6-48>oRY|NZfhlhNXDa1Mv zsQ2=w+D0YBmDBXOh0ON_mtR)lMBLi3nyFUnP1dXH13!auuD^R)l~Pldgn9wM!K_wI z)wVL89C2)!r_2f+vyN52x7E69(@i*px5MInZdi&a)n@1B*|bEB`nDk;mWJC1 zZy_tfgfYdKGE2^wio1IP-^=aN6Lr0Dk%aGqjIt)y7hrYzF-^U>Eq2+mH*+4-+`)6q zXL;|1)lM~gRIzj7L(K9tCiV`ERVjB}^N`Z1Nn2EY28NrCHC%t*Bf}fMM_&rf%=Yl) z)l-0>4ZLlqFSCsgb6%8m5+5U2GjV+O_re^5tRBsA*~QZ`o~c1t#nhGNV(WKegj3rc z$vWJt;O*jKajR@mqZ(CPViqZ)$9YxU5NvE;AX!iDNW_+3Pgj|dDGV;P>PMTZmk6a6 zepA6)*gHNdTt=Sh57rr9LH|7SgkD7^Es5jOw_F`>gNMmVqdtiTx_fCoWpq=Pz_OT@ z{`fYhg28)tv`OnGX-6M+_zPB3iC)1yn5`y~tr(chv*P<-@RKv&-+nL3Kh9K?qSoJ| zbcC;VzW6hI-X0If;^OFOL&^8?*$y`NVs*JAYeW?8Ow(>aFI5>Sn4ygTD zDX46R`JmCOIvOvd?BMWfsi)#AtBM##79v2~+46R5+i$=bGU zKGeGO6%U{#Q>?^4IxxAF>$9+{TR@k)TCu*CeePYKJm<_yBMp%tkLUEQ(MeDaN>Tk0 z(YBExkem692G#+JA&Kt_;OgyepfawD!2AWn=qEKgxW{HQp(i<^=U%E;a-|Yeta@)& z9kac;v&UlhylRhqYb5!>KxkZ&+`BpFE{I!BUcH_`jD9el-Dv%Bf||2o!6EYYwCZNm zd@ej_-;=>VzZ@l3bj#Ct_pGLv(g%sEBL$o9|0{C1CMGfFLw&%pNyn5pvMwcYG0yw= z+r&v?gXgyhiyei>%5QcbMwtqm)?l9T?nG|09DiL+KN z%)=Su{rr7%gOX$x-nm_Qxz3)h#7+Fd#$Jw4r3ti)&z(9$9abjf5I8g*6sVU54|E06 zARIBf)R(o-W#L}^!pbw)4Y~e1gnHChOnx*9QP`uZ9!vAJ8;8-tu$=guQTdzL83r~V)K_f}_eOCCyVQApc6_2M=YkuY;|4q) zPZYn}!9*nvbD2q<@G#Al#_+2Nk{wUW%*X`SytE`bE9!94>Ygo+$+X@>%D9pnf3zHG zokkU?pPy}a(M6$C3Oo=|(ds2X)}VDIe9LUf`8h6cj>o<18jJs~1%18OP^C-EV9$R@ z>i%|xT;AEw5s3~9hCeY_a2;e6dlWnV=!9Aa7_i0~;ZLfliCDtB78Ep~m6-*-SI_%`*egD@ndoMt&(ZA^0{P)KZ{(TaR zosH%H?%G^xY9wv7BKn@x^%I{>@(;rv*%YPVj<^;Y9TZEZgxy_UW=`N-a1;;oQT8$D zvGF^as#@hph)`<$GGTLYF6z4$1;!z@tpwlYRy#0N(rgC2_Q^w_c1wrh`MOV0SF3@Lm|BIXE2pur6&MN%13yR+vDIc-z zB_m~~xNDLx9N{C?t|}I@lGhbY8}=T7t+FhvX5NU37!;CrPTVk<+{;eK=x&;qnJ6wy zS3@o;xl<@7u(`(0LE6xV@U)`Kxd)2FXm|T2nhYOH4CAV?@P7bX;);ls4>!%jAXc&x zNvv_BmXi2Mwpe8&(L>G?&2+~|PDs3?p6jYBa;8Ys`XrFAH6JR0&--CDvxAHUt%&eU6n@}6g!&m1$nHa0+!0Cs{X?!I$imUfO^$D(sL9- zGuYti9bLVV1tIiWJ%>dRkU=vGYT6(oU5(TnQ=dJiplbwEg~w1|zI@CHJ=T0K3JXTofxyqj#AId>$|JrNbqbfm)%$(B&kBXdG#pE>;< zaYksVRRH~YRZpl-rd1-@Cca|lSwhhq%&y!<)eRicX$6>pM+|gn971Q)$oOTni11w6dR08b=2Jy&(%|>@ehPngz`SQ`nrUb zsGC(W#*zI33=Z!6-O?QOHKisN>>>vGiqlq}sq;VD4Z_*&Jb7#5D;ev=B+88 zA8`~5giI5gA%Mok!cDO%W{qVcuw_7zEj2U5l4~eMW|i2fSp$kMNni&7Z1W|MD^P}l z%m>%{CC#Tpr1j@7)d5XY^}FeSY0P4||8yB1#9Ja5IT|qvwK5Q?k0yi-5md)*k*D)K zH3yu=0eGSy15M5{y-(rjWUw@fbrfX<^k9qQiHfFBqOUoukjC~i)*S?)umjIOMtXA1 zHDw#d=M=LJse|j`aP}4l3H4i+mmbrQX`R;c$3@z?mLnr!<>Ua6Tc}`MWJ{b%rN`8IwpZgxQ)*WXABssbRiK7%TCvvYcx=j#jpnmMC6!7 z%?u@Px7;mM*}YFRT&`>w2}^+cGUs-%Ui;OLF{ux>PHnfZZ9^~di3^zX6ZrjX+3nsI zBU2*p9l0os)MOTc5<}Xx7^A;BHE_;y$cWmjMFef!3P>geHhUS(Q#O=_nT)iq5R{>! z-heBVkttQXn?4rpl(|SkS6}|HA1$|_Oqx_k%8T4Qmf=zS8`W}2N!=t{<* z2}Qlo`7g7~dn3gVicTfl(=>yQsrJ%rqFmL!ELHeYCF@8-Pn`neN1Sx&a)gReR{<=B z5}k`uK~+)797SAuAC;B3pYJ1$8DF>*Ga`87!IEfJXuTppp_ANbM(2J?KbbHgN3IEg zmWYAMtf$Z@1ut}HrBlOa{fj0NPOUs2)n@H#B2rv*l5uTwcxdLWv&--TgVnv~#uR3$ z5nZ7>0|#w|BT;WUskP_?ixH~xZOi4r;nhAD>Px`8a7Tj@VZr20NlvU5qP67hL@fvB z7agnKY6zz+7;>O}t=q#7_2^2vJ`;|((oYLk63(8t@Ix>-PJrOp=7e;%T=OxgeCfc< z?YBcgWr^9;WN6t=6ppRw5$`+-C~y0-g!h1a^VsoWn#pv%Pc{31YDX6M2d zXNh;a{|{g17$iCrW$U(W+qiApwr$(CZQHhS+qP}nzK!XbH$Uc8O-PeKEq_L6X#G}NAHsx>RJqE6ooO$)dFWCzOTa35bOU}?f{&Y}kwg@I zVV3MDgfeMjs&xaYWb}@Zr`q=23%_a~L(;=v$*uy0_K@`s0Lep4Sb zs&1`b4I%S3Gph~li9_kl{#bj^oCnB$!!7TU=)J3U&|K$AWlm??Kl<_X zfgA8v&hX4<=OCH;w`an}!t5Jj^E3_i(TFHUIAg|2lW(c{m}r0{mRfUBt$Az|@cG)b zC9|fE-E!E?UH|8(E9e;rcxbp)Vr+sfOht&i7<5}^4i$8p;R;(3u#4P>irOhD@Wog2 zWftE1KWCm@4;(D^c8UcR_BBU%YhFSao;=|e`{N&R9vhtYqjnVelntv?GV69v7HyZF zzugZ1yKYo1hqiI?w^>=x>~Xv!kQ#g{W}hRmdO-g}3#C3LFKeae<~=3p(k5bFwsC?N zF+Li(w&Vwr6n;+X*CQ}~-iM$w-|qO_*d zdN9)MGua~BW0?&mh6F?TTXe4_C^bvVpv z!h=f=Jzu)cw6#PS6+~y%H<_TsLr6Q5%}+?`_W>!xShKH__&*A?g1vfFmHG^Ja^s(`Hb0+jo`W~7>R+D^JDSsRjvn|s7pLZ@X|F4-cAedH zIy<)yI~#YhX|G@4FeM*y{B&KoIvbxoo7ZdX8rHPv*t=8xBPUa&{$%c!@U_PN@!frm#;{c|{RT@|&WaaP@d*0I4!& zV@0k>C~jSCJX>tve7_OBgDFwI|D^!`zw?LxU4Sz)GqC;70=!ytGjUVg(dSfcDJvj? z49ulp#y3%n3Axdg2P;C?ZfK$bFhQS9l|&p+|8?xAVZZl|?Zem<6{1nA2GYyOZCQoC z;KB95SkHP|apjzcqG^ie?fbBA!_uDyGBv3fK1++rA>*~iF~*i=_mliASt^~%XG04a!$(Dn za$2iNX=@2j`RDU1(-N!r#~B${Bs=6?$t#gaLdiOjR= zsj`jDM1L2Om#7Q)QjaAHR1^;)4sBMW7{@CkOD%LMXsDx0>KNY!_yyr$k%Hn@p!sMM z2TbO~)b6xP3D8yfCBpktV{w~ww`gh5Hlm;E_IADo29#DP(+JH;S(i)Y;aD~6a$6#! z>#Kd;0g`e;=xM@SX2`G*i8r15nbO03SOkrr*k|^%-bibz8ObOK;kYnHd1obMqL>H5 z@=Z9hj!rEC&uszU!CbApp;Bx@R3?fgGzCvWSgic-RylYnRxC=P)X2bi*$)SSpg}{s z=(ULqIZ$$V2((xb0PqK15zJ*mJ%) z2=tLcIxjnd4u+l}<2e3WULuqNg;jCkSxjGlT$rqo$|gaar3l({_f|8^R)t&F3*Fza zqiCw<;W(<`H_riV(kIs^cWW9;;zKS6{YL=RS2^Yr>L^2Y83LR-K|f8x4#H!Pw!oYQ zy8_*=($u?>4`72#}iBSF*MmN(q`*2rHJn2s7be`j0Z0;7f|c z$-sh5C-fdmu^>pga)=O=03VXd$bUDol$omw>JNxJqi|uM7<0hrtiE*!p@WiP0*HSA zp$t+D2REuZ6==N#WYFI}V6-|WRey3W>4z=9vvfA&%Z|BA?wURI;7ZA69x*5sjRcr_ zsBoKz=_>jae^?FtQUH~G^=3i&Yu^FZa0r&+-Pap&3_m`-lzySB|!Sp@}BmPW5h*n^R`7yL?B@0nUX~OE{6B=w@ zlh%AHevM)UXk+O-^F8Kn2F~t|G7y)KPcpihL!vw%IVRUD+f4OBI%uk-i|H6JcncrV z05q>XKBW*54^|(lLG6{rA!)MlDMjTwhW7+VfG*AnZZjIcLyToO6arQQX&G4li%)l% z*k>ba$#S9~_i}|QCb$x59Dyx4Mn-xj92vm?5iel9BOK4diem8!Z^J2$&<+MXcg6{K zE6k*45zV)BSp_o~gzHE$uZ(=Xp|(9QnKJ?0H2cZ)Aid7>n3-84qbuu_@vC>4guKy( z*I*UW^^`KrflLiPWXd}s)T7Dtv4#cT0~c~z2vc@l$5si7gVo{QYP8)tRrgBOP0-Ha zY~&>HT997FYbpSOmcl|8Db-So3e_dBYCJT*2wgXZwX(<>lIqBAw5?DnHOs6VltE zAes;ZxThq=oV}cSfhWHE3p+K&rsNIa;5`~tp#2wAuJrb-l1eo@qdA|a3)rY2NKt9^ zlRZB}1{~N(l%Qqm4M6eiD((>qQAB3ZelNvCq@2u&g4%%DJf-%=C9}h7gTJ-K&1aS5 zI+qQqc5W`{he_oMb(t7n($2eAh8MI&LNKz_g@?MiZj89gS+2@b=9Way;6=WiPw+0i z0qZ79BV-r0Dw_=#U?wDO#eXrN1s|y7i`7FkzbdXrmHt}&RN-w3mHA%XZ1Vmo6auf9 zq#@ddG482*9n~q)#f$55)roVESb!rki`apc0d@(2AUBs9vOp7CfRTv$_A&Q-eyrGL zzWQTH|9DlMos;ZTxHW0Cd!uiPMu|-4a1gF&Bdq_X4r1CY!U@Xg*vuz*OY*dTMy3)M zYk4-x!U7=B`sZ)yhP1f=h$%*v>h#Wn} z?Y~%WC_me*p7WKMkG92cqs3nN+6g7C`37q1OPAGD*)r`K4-nsJDKT0Y`R8k<3UnUA zY!DimjH1hET_cb+7bCy5>=h%3ll9`r<3eREUZNn-b9tWG*Qu@;L$S@e8+MloaX=Ok z;l1d9nCN(zs()b^IS~>5E2-U8kX7A{*LcJUdY@~W#F zWN>^QVBFH$B$$1{CRD|9--==`Vl<$E zoH*;VL&Es!YzcYv7Y&ITlG|+I|FSGiA)HC6XCnuQ)f=4&dIla)gG#Z(5Pss@Mow3& z+Uj<^!}n)Bm5pM;`O7ds=>+T9UKwT{K{Lc=a4$DS2_0_{0RjOsKK`%<&kptzpFO|! zLQo0RuK2YZ;dG1@JqbO1>%f4g%`=KdZk#>S`2~V)mg33zF!@KCRB7_3>K;U zzyvR-Ogr5%hIB36P)?Wo=mU8@i`sOb;|%UQdr{kE8d$SBrC$DJcn(#3pR)^}3sE?$ zB}^gK&4so$yZHKQl%Br_Xip!LNh>hD%FGmQHxI8MDX_*FF>Q|&B)k^$S1JEnz~EW1 z^TMET69E0elh7Mcjuw;^ijk13fh=$^M-yh$WZQO)1-FLx!DF-smh^lx)CyO2sL%@B zA;3^tX?_@E7E+7EDj$2_8p7NmQ2t7*4iX`#c&ESlEgGx|RiJ6T2{#y#>U#kPpOaV7 zxmmuy=g}@@eCNo9Q*L`SAH(?qZfr-2W@dcP@BJ^u*<1M4g^in;hxeYb)#ovzn%hJ3 zW@H;(1wQSlS0S$e8(VPiozfMhEsde$Q;oETB-Tvn(nSwa(e7CPpck5!S;x^{Q#Th* z>tPTJ?uWtT%H ztN2hPNMxQwlFRR2{q}t4!>@-{zO9#MUNS(hiYu8PIbbY@(jeEtuQ_7ox$R{bLv)Wx z@jDE6LQt$rCAQVKtz3iPL$=F@-Fl{yq-mfW$NFuxcPx9_8XyAtmy;!M5}!hm)7gBhFwvH^AEMhoMgYU+OQ3-4?BvTbjZ zZ$Coe?_-EGldnh}LGF&N??_vn74rk>u1N$B$A8vyOH}>LyB6(sC5ot~L1|bAg10>K zC)eh&s`>5a#|FK4QCfw!d$=IZ#%qE1|9t5+{;+)3#TOH^O-b#JF|z#0t7tQk^w9Fx zo^Cou16`*~Q|lr2cyOCoSw?9e$F*uY942hpjJv&x;`wGyHkO*e5 z+#`~G+L23tMoq{TcG;MxvUgL zxI;R*jBizoov{RaC)o8!9N6Fi_UgPiHj$)`7{02fg~n-qH}J`DP7tpfoAKD>b` zC`1Y!{iVpb=jw(L+W8|0d$;!VZ+{D;=l^Z3ZuS}CSg0B?)Osj1R3K785z*0+<@M|%Xn z^4veJN}5^6M5Z-}b()`BfR5BGc8OW`h>T}gs)UVP&d_nr6XJ&E((2jJKeygd31pp~ zfu6M)z8L=&HDg0SmVxQFm2m9-rTe12+V9*_HbBIv&k$%McN6-&g@L?zQy$a4sCgw0 z()qAHIx*0ovV_^o<=9Tn#pj#luZhd`sjlIKzn07`v=lfM!y~_CQ zvck{m(WOtwvl; z*`eoghJpNgIK>>jR+29e$U{J)|6n?Nrr%{NF`Q+&Ebg$KPN-dQn3sV6NC_KnRJb{& zemhB{bNl)Ho9t34RCO!xwLP2K@Sv98JCAkdq@o*KSrz*b(`Z0k{i)GS3Ue>A@mqUT zl~MECdM{qP>*9f%WqTu1ETB+|Ng}U#D%xztuOHnf@d00Jg6FakO!wQh_zv0{_H3Z~ zvWdE-CfNx~x1+c12AHleq5#;kq-R?cVJ$&pE|c*mCecxf+I8UlTfAPC1qYJwj`tVv z3rkd4VS-SUL=&$|!cd^?$XNS^VM{W)oOV9>nwLWNhNW^!I2=#KAJOCQ!J;B+=UkJd zO7c)@2yGcQ3O`!r2YHeAm)iE1MWiMpUX=%JwnyaLU8?)u8G;q z@0(wX<(PK!UlV;zOL4O&ayLFlc-7Hp^-i;VxGX&fEqvQW-om)R;Ui~6w9|d|gu8FPTC^YPK7iS;JKq0AhGF=>0VV&f!?3fl z|9|ndEv|J(;?acLw@4^H78Jv(n?Xe1dY5U`Kr?3}EA!~{7(!Wq)RJLhrjGfuD_e1OrU%R(q#%<0GQdxW?;D*`7$vQ zA`QCPm^@hcAy7rf4fB)*u+jnZ8*#E8fP4&dXAcCih`zPvor_rH>~;X%*aid$!E>|G zm`Tk|ScFl~u!2Ucc(caW00+cDXip$CQV8L8!w& z5;dDN*8dn;)H3nkpx%uoa44|v*#Wu0mrJ!8VAHf+F09ycY3XIQv>ClM+i-vEg&53>uvDPDtK!Cwp zBezQD+mPZO%m=_GJq2UCOM(HT01QL0&K$GKD&$ul$EY4pb8Qu=wy^OwyWVYkUm+UR zS&rs|GU-pRg(x)($l%<{&voPN<;j}FySY%Y@%%cIYNZZx1uFWCkBDyo{Vg3HHEhgd zNo!j5LEUBMM#jLBmrh==FtauRP+FkSg=IkDHWMg1j1-2QM)!eo$SexcFv1=D(Q}34 zQY~SSd?=bN;so?AtKGL8K{7YnKL~_UEmAno0W@_>k21LsmG}bH$C=SjV18nRUhPRBT&bY|;JNOy-6{JafMe~z@7FJUNqc%U zx2;q4=?Z3hZ&}sar0zloH!r8BzmNiF1U_xO=w^m)Tiycou6OEO>)P0sdb9&mg3sr^ zF5m6YHwr=f%AY=&1aYVi(h=*1*ytBwHc2T?>%08ab!^hkmcsmM5Dcd5Z0&|lgAYt2 zP8{9awzqd^2Ft^&1WWoj!}h4D5E$|U!PWo=K%S!1sxFT;hK_0akKxp^NIOpjJ|C_( ztFAp=djFbq&f>gTC>!#Z(5%6QjvER*2pCgj>Gf>e)DeiQWVZidV3tn-(OIWq%KGtc zTaTc_Z>>u+8{38@rhdf8pl;I1x%PO;^Vhzk9UL}o>(Hr)R86FHRY=zs0I8WXm>iEp5HrLnwmDeb%!<0UI*26cF%^vmdEtF%=YzP!~2IE_Hu1D>cjP}D*afI;ee@CKazpHLU zz?wm2`N2Xf3Z3v)4c@6^)U0mF#dbc6MyJf8!x`2BHj_hpLxxQ{@N!`D= zLQi-(NFl97tY*1|3k7}q=U~>_?%FCSdZrJO+p^w*)}#_4ghY_h=e^|5T#aw6FTQMF z#5PaE)`2>&LlB~@ISf&MF5|`E6`PDb%|JA~Nr;CuYzSe*2n95>GN^`02VRo~Pkvsx z9p@V1A7IVOyc_Sp{=p`MkXhcEvED|Co?+8P@f0xchWA}(*X~gt$~f6&CTxa&G$TYM__(AjX~af zp3RQU>R7_R&BpmZ_^fWPDR6r#zPXfJ-_R@eZ8KM08rQA$$YF?EidaJ)Xt1@xWT+0t zkaY?6eAQ#@eLaF{SDWt6T$%Og|rVsIW=XC760ZLgDXf;vu@Q^?dn+ZnaCyn^zQC(d;cH`%4j5b z`Fhh3u&zI#Evx&1Xb-QB9b71@g3wpx8@6*nz?B0#`N(}77@mhV4&$Yoo86|G@3&dw z+nvputcze?=Ay0i^%pnBi6atZceelS?DUDN?3q~rX)W9Tz-m32U0?GqSX%*M_OkXy zM#HD&6!~TC%%XuabjT8|w$<(4*czzW_Vxz%u2$XtyxafPM8}^Dh!Uy@37AlYoQX2LOF_-~%8exMK7NcxG`v?(>7XMWWvj z774L1;vut7CQnzEj!f3Uk~{7T)?iElv%pwrodIB+k27#ZD>mVSX!0a8HEbkOz^Bh(S4qFh zm`tGRNv9>2q#+d6Yu{pcM+~l3i=}#sqd=o$@%2xw0F#L6&n!Wzx;>RJt z%PSa0zy>?Ut4u)uo zpC;1g$~ZK-_h{*8Td(S0b>#}!9Tydk&o{!BKyX94TBqj)v3n~11Um>K3V7FJ)1 zh|Is{#Yhb3K`;C^Dv<2vm57)hB8vFiZtBLeKeQ+&5C&l`Lde*+cERDbabS?XYbL2- zB7niglz7*dNWZ`$;ngL^oNEQ{4kHsSH_g^3^D`I!%tZxVM-&4?bh?x z(+L+HuVB!>KuUUUs41I5%i?jCLuYArrwO$LYSLgp66jr<@t+ zRLE?+nPcn+4%C3~`TXFnh$EsDN076P3kMGnIM}oVGDu*|GP|VQs<<~YP@Pgq+<$Rk z@d;T91D}9@AFY!2ES%TzaKkPruy_EQr*V7vNUX(Ub(WOwMIDKjOiP}*Xf0#bI)5k% z8_Ic@r_aoeRD9qbE!J;6l=LS>KphDcXDO9e=gwK+=q`_stkv3_R-bLIiEs{LcJ(y8(W$DAo6+R27A{J&>R%;* zElJ)ZB6c_>7{D4XK%PC)2-LazhwD)CdpS^0M;pE+VZF#g=5`Z-SEEEp!8Y;V;h_>7 z;#((VPWZ-6M@5l}H9Z7z(wAv~FK7iLMDVS`kfbWyhKvASa+YMvS&%8S=3b%< z>o7Q3$n{lmBXdt)VIEFuHOJ!_fKJV6Jg>B-a-As6YLwVbroG@WflfK$8LncM zOQu)OEQ6xF&K3oWh;leX{n}Y(Uay9)z}n^~h>~Js2#;NfXvI$Ld>>bXonl4l8gAXM z!BWaEpfaV|O7_s67p|V`{1ja2XcR0ii4Q9(c?(pYzwTb+pZhui;zr3OTKNDwuW(WX zhf&*e$_eGHr6Dxf{NUU^z6${8%EmyHWINGH*l@K=u5~g7C8Y+^)cIv+bSj(oSyZGG zfTTmMhqt*FUWb<0`W7mTtjf|viLb4wl+4V1qqP@*#Dq|)K0U)CjVKv|l2E&A!hsQK z+|w^xTKVfP)^9|7l4Y$e$w_j{wp{nuPx7mjM|yLxvB(Tob;2^Z8D|k;Q!{r47wf4E9DjIq#Jcumy)$KT2;tYFceW1h*bsSwoH z2(z=b=2}Y|-9{;Kt8_09g8E_^)PCD&8&<8;KVj+ag+8^3p-Qv-DH);`)P9~dX52>O zoDyn9ETfu-$Ht(?OQ0OdqTJ|- zL?vTXB2j;2jZWjTWcW2M8(6XiDrHHgD)<0zSW?<&l3xvA9K?mpVX+A=#rm?J5R-H_ z=60_j*KDNsaPW3Uil|V$$gw&q;DUP#lj-I?$$82ke+}w-L>QCdf-Rv zaMI!UGtmOBGf1>u2&0h`Y7t>pAy$8QK$5k&a{Jz)op5%c(-R`Q(>h1#6~%T;cERv+J_%Ly#^LHzA?#ieb%*rZ zET?MjplWSctstsSa5-!U(e8kKywx-YXkJxWEu*uZ+T}P33uojHdo&CCQE})2wGI)^ zOj#tF(MAeOS{5&#t%7hye0vR*7pCX=*i0Ms_%g~4m}7MpG5mo}5&K9ytZM1Toe6t> zW+QRohRZlWi4tg}RaZf?4_o+o14niVdg!_;r^#tSig{FNa;5(S?jqS37$x%csI(JB zi!y4bI!+v4W`8&>)~FP8jG70^B7Zzofp4_VCOV6Pdt8C^oe$&5`X{~iZbmt`mTJ8R zXau}_?LC&`4!|6gibgY4m)5ee#KCfhA=Ja(&~+<3rsL&WBW*!V)SVm$gh^-9Ey&#>bQ&j%FGnq&izR9)2Xi^{ znj+}`J*&kZvBIQe0QEd>iFut?=OJhp$jP15K_^-c0l%rkGx#U=56U=X!>sg|>?y?2LL;dX-7Dj#rflhOSm{j8 z)c87)!f;@nMgzpAK|;jdt*o`qrNi11kEAO=#ZxT7Q^sz-;j}X`p9tEB3*;W*ygezw zE7LOITZbisw~^!>ueHSe#@9RY4*0L+f9BCqIy_|nlYa(BS4!p94I+`}6q*rphPcj- z)Zd`5xr;hs{2FJ+-~wNdtAQ(cf&DvHu4tAmVhfsrVa|?y1@iXLe=lzKSm$#JwE?ny z1-Ub39g@p^=%9yy+c+oHX~|3LPT}RgVEhpY?)Sx`^?ip6&<&Gkh3gJEqbgnki4p#Z zzQif*=6m$fDK<9>m-?x@U6jfeXp0JK)+I!|n8KM{%MyRO#0irJOX=vb!+vGn<9hyB z??P)dg$oF}o$@F9S`vN4uMaW)R@Pq-*`Y=UWZR`(e?ISVYKbE7W&&x(6%_iA@5p-8 z^^~NA!hj4kDj%Ox?=9#GG( z%y2Hd@2W=shvYe{jH-P~tKs+<5K}>EI!+@QSdMZF(^H2$i&#sVH(#7*XPe6v7*+Cy z(x)scQwq6CAe0MS9+OKC20`ZK{G)2Seg)RVLP4iJZkjiIkY{b(v87fu9bm(7C&K53 zQfEvutT1WIoXjpM!*&H*;M`?jsUY0?v$Exy!0T<{bt&ntP_gb92h23o@D#!jy(?06 z9SS^5h&`Bm#f%iazFuRL(%%Q6S+xR8-F zFJt#yn+A2a$+nNi!0IauYIn{98gT|=2E&hcrppQ1OKR2@MMHzY{1>|Gkr!D8)t)#nzrW~eKikY8cgz3+l2Zuq?dli zD$02&zmAxhoS&t7^(K@-$9oCkYrH8XEZA~Wo`*nKm}~r86*r)=YfX#qkD{H)2(KIG z&R#DvxGBtvSn zA;)dqg*yZDtR>CNJ;{|DpKtoC;+J@@cf@e0?dP3Seg^oxv7wOv=%J8Q5%_Ig7~6gM z(FEO^Rew%S#>%BX^GJOs9vGTU$fVGqp;wu|Mn%za#d&V|kB&mch{e2|i^+c+5tpZ7 z*#Cqt`yQE}mzVHrYB|3F;_Cos)zCiJP%Rcdxwce#A)Xyp+`uJW+|ajV2{gjo{FzB@ zF4-xYSQhnj>}cri+(E@oAloc!DeWp;s#SDhs{ryLHu*A2TafZ;NWP9;M%US@qLZza zdtRMGteVY@wo_zTxHztWX+*`f$=STg$KOg-Y-u%@8XT@2WA~coKD#+lPW$y}8eN6n zFITt%Q@W^m9GG_<6xmMJv`uww81)#}a+#K}YlSbhAiSU9K3d>9Iv#GxPQ-sbJ1HJ< zQle9IO-fvUc=-Lx9%(T&xw@6n_#+S2<@eLQ^2ciZmb7JZ9F&@;BSyES@GsHAM78$P z721*Ir0mT?+lA>7l&SZ@jYo~`O5`5#^0b7*FWSkJqrwL1K^+Q6d!`yB?1qyBu*S%n zFyiE&w#1W-?0V@KR3#dt3$1ZzZ6L`csc$24rMAYSPgt}RZhHA4)>nmmhE#T| zmJ_Y}N9{m`R)~{9sG<#QEhXNHv2UP=l_t(ZlH1-rxRc?L|J<|!;A;mCnQV8WMEqXd%5EP zpMP-DoO?$2Zqb9ZbANceP;Zmjf2dAF*cpRci{wxS{WReoaV=~Yu_}sP2PsJW`sNkFH+w=7{5(nqjp&ZT-{@p5U73tX!fV~~76n6|RFGs#V zM667H@Av+->8q`mTbI!9OW|8fKf}|jo-c8)ki^VA#+pGL)!4FOEnDt}A+ue?hT=_SzEy&fLcbv;e0ADV*`nrGA7`)NyDtF}wWU>QER ztMB6J?&k0c{naqylCf_lo&zK;j3N72?@|A0hovL(5eoA1oP~eWdfBDA=CF*Xn05M?scnbGa#~b8DCPHfM{0KXT zJV^HRA->0A;{-j5A!O)f@2yYAG1wDB86#2f}4#^YiCfU7uGx}YgJ3J*%?Qp4aZg1&Jv15x0LME^>G zh&zR>VX8t-hXWP24|#7O2V=fzipCqi3`Nh?y`^-}rR+?<%!j{BMz&sWxd7qRDVPcK z7+%z4j{@m;fh>=)#?D}JAY}%%krP5)b{B{5X&^bih#@1Ui-7D4_G#pVck)O9%wiDP z)7#UI<%i#WC&8O5LzKml6t2f`3|p{7h~XB}XbMpHjEtuYBY^zDQpHUwpRC4sRfDw1 z8jjh?pg1J}a)o5I9(6#LGPzY@CHSjQbZG^rlxGWNN<&p9kuShu((li!&tL3Mx)+lJ zXO8Wif0S)jjqa@w4ui`RV&dotVN0`cO+;LARCu>S$j_7j?PeBiHoZ1kZTnXYsMPw# z*U>7|7M%o(u@*Qew4w9D79O<|wFaAhTs#D+YHozf%F!qIY5LC03I_|v(XGQwlZR+m<-DhS{XUWvhb~SOTsb{hb}&bU!SEw zBtJ7bY!yQ2px!KrwH;i#9;@BR{yN4K(g z!Z}|EtCHL?Zg%&5amV`xk=Tu}wVz6SfQH)eMn=zqY2!IilJ0mb@Fs733kfDI>{JO9 z1f)Bp7ff0dXtDqV2G&^)ckYzBL%NKPO_JU!3_#*gmYyE*3~XBLpaQ3XjMo@uEJqQ% z!joQ0sGeWCPn?J_tUT0yd>!T(V=a<{;q%}kHEH=Z9cEpCLNb&fYt5;LhW^qNM3i~n zLW|dCybLk1x4jtqA4R-Bj6&oQ5b;>;O4r2u5WN%F1A3e)(v~30ojp3 z`ei{qJ;y-FTmZ^_{uvF_!})q78DXRB6RB;i zw33V;93zbFTQajv+krs57-5YQxoLjJKFvKZ4pZXwp;kX@xYu_JpR+3adU`vM8=utEFWFf5=GbZ*eBKf&2WeRR7L_mEiFKM0X&%Y(JJTYv2Oa-g$ zmAbsAa>&-=gw_-dB0*r-K!H=qiv)-at?kN$s2PLT=}F@OOj%EnoZOTejYs=Fca}p_ z5iWsau44U991k&xn~1F5KE4-oD(cE$?ZE5+ZE!OLx3V~1djzfRjW|HcwLCM_64eoq za+KDWmxK=s%#%)|FT*Z3KFC$)qHBC~qSr27S}j zna7ha(eTefYA5Sw&l%h2msbOpN};W?hQ&U~Un`W}{-`K1F7i^Y05`=|&E!tkQnZq$ z9qNXxT5(Vjs%~^e$%2i>lD4N-D)!AwO#;6Izk;`yu+x^Rcv6i|}e~+WH#ROj(ULL4m*^ zu#$m-bBaV44Qyy1W8hg0z>az#NkJHOEY~S#XRd{{a@zCVzQ2cv*WtCO!|5$ zJr!Z_1Y=mKUrcrX>L#sUjjN=QlkR=lKA=7R9_La4w)hZ!Rs7O>^p zMMo@5FscUfux#kyWHgSbo8T=6Qv{C(GsqTwd||={EXTY;P3R$K<9I3F1XhqRMR*=) zlMA4ra|qj%tGbCKYRf+n0|#OU%E%KN6pT`Y4Mc!xfpxMZ{}lv!%MEnYUQY-{NbVJ5 zSj}vg$Xiho4}l9XmkdJk_eL>SCw_OCOD+$p+7l#+6CuHJ7gl@_h8kg2SG1#Ii3G67 z7f(`@gp&*Uqe9pq0ST&m28>llOWijF6zp`Lzu$wX4j+=cVgWo#q%1@EOsHs{06WgF zCSXL}*DYZc(M))W98p{e8>O1ek3U~x3rE8F$AYY&S}=VM_b)89`ZjUwfv;i3+ERR)eTO4KfT|M?sJg;oBWzM=BZ0DK`M%f%QBC{TsTEDIijq-U?S`%u+4|qn%O{4 z{e=o4kv*Qcv}e5R95SY8k{t&X3nxMyy1J6%0v4qVp>Ug!QDI7LM=6z*qcU!%;;}lb z_r!MKZ=NY(hz7!nlPIC=JSOpc8W*)ZA&jk-x7v!0T@HoIbsjl$BTbYVdNC^ijLxFB zgdhAj2((-*GxH)gxDfIsT)tDxZxIOvmxNyd*0(Hwj9Ky?yC&nKY z3!Q{MQq;R}RhhtVC@pjuT3t;=Hv$4Y@>2JB4@M`x%(sF6blk;@tT4w-I`V$+*jdT^Ll*-4)=qgN>ku` z@VPK(!(Ko|-5VYFOXE~-9b<@MGLQ6eRW*PsBEwx!y?r`Cv1Qt`-e7TFNJ@h1nABYs zXN9XpjZ=*#RHMdi;<%G_S)p?nOj_p@l`XHEV&>KXtw*3g%A(9En;ejn=GL~oCSAjr z{_2C)>G|)a(+k1keS@j6fyKKHnP1K7JscNo29U>w2xqy*X<} z+Z6>++5~>=T$c?wp<~bJXcnwmG0@R z0*sdi0+48ZUR0|1oB?nN*pUDt-Yv~N0sWf;>InU)vgOstK0;si7lgDZgCl!BkANHT z)=QGxpS^V5tSzl^@6gk2*$f+Cu3doD=}Na5%rq3Y?8yc)S#WXfhUr#TX1(C-$+;bJ z{;N$C2XNJhxtuc#0;FbkZRyxz?pE0K+d^;vcZYYot`y}tM?I{mFeiSUGiwLT(+AhJ zr*ql?(0)CSOW2Qaq5n_@H%I07KQ7daE|1TXV2wf)Sy6GA=Ozbf3)py~bZfRtOHl=Z zn1mNvMZ(;>eaE${J-1HSZ8K6WE_W1M32JWIH%m$~D}KI9f1Yeqew=Kv6=k*yL5txQ zlQ!bocv+-VD{8sZ3fYD0Jd|}Vhi#p}3)@fvG;a1_Uhfp;j&oBKT;QUIjno6NA-CF8 zFyh~{Z-T6DOB9P_$0|7u0yqd^+xnldIB;9c2PgynW78&~?%A+>i4JA(YcJMB6~h{r zv2}hB)FelRHnlc#55gnh{(eGuCw;Om*D*4rtp$XXs$rZfvO+C4N;b+0@zeX)Ey0f) zeWMM%y4TH}&N8oT292o`#${7$07WHKhNELt{>Z_J_qoB|Pm40Sl|*3K1HkG>K@V`& zb>c=r)5~Mi8CQAPQqc_0qXMO;VH*;k@Wvj(oC6FV%PfUt8iqKmt+pmJ4wV-y8NA#; zj3=@@sG2W-{5*v$(b7W()6ie$DId!;DEIG#DZ%bw-D>b<$ZpiFfcc+N8$frO}cq0)Y7x=dU@TxiW7K2)E9pFJ6+0j zzuh}nf&R1agnkX4%h(ka(%F-T>b9z9pYt;BVqqPu9gA7GW~Y2+w-NgvjJ;EkC;_)- zS+;H4wr#sk*|u$;vTfV8ZR?b6YwGVi9epR_#zep5Yd+@wa({cRpat0$c8#7%GQ$;J z`R6D)-i#ZKq#^g%hkI*}@GrPx-qjO52h}d5;yW zged9tzD6Ih^YA}A2m(Tp&6mqVQewI#Dy4KVPU$7*fu>H=++$*Y;L4B!Zdm#&jYN`@ zlo)A8N!JXE~7n?lT_Q%s~MVnmdG7Bo09UAipxKV|PQ} z%S0meapsgWLsrxQ*weHg$Td&k7Ns^&#yITTn?ItVLEAV!9QeiEO7rB({18v`eHDug z-dV??0K$#GI8+;t4&h0p?E`u4fV3JQ^~sNL3bQUrOKt~^MhPw85TqwT0^6?t;Ls~y z58utxUf6lAsMBS-nF9KO}?=lSwsjKc6!!`ro;@yEVWt?IM$XB*C= zO#^K6h(XO?Sw6?kla;Wx`2yI;x@!5;nm_881Iy&P*|STG?&=>x-9r|^dh*6fY3%71 z9F%5Poo5eziS1egM7QZ9ySh-qwb{I5Y9b>uLc${iC^NK4p2{B)r`NeFq8M&Stb4ba zs~7v@`F0%QuHA)B@AoF_-7l^L|BzOSb$)}^{7j&87%r$eYByY-8^-yjCl~q2GwM)i(W+0AFh8gkgxz2N73O#|XhLC1+dE;qTo0<_%U z@$%ssYweTz)tAyeISj_xnO0-4*lC?&`^{Oy4Z_3dP4Wz4HLt=FW3|jqoCN0PfgVt} zcD-oc!P_Az%O;p8yq~AcxFyUOk-~Q}*NVep+oO}2H-FuKOC5mE6#Yr0D@!O;9C8w% zGKgIJ6Ccg*H^{31G0%2HXuNE~+G~^49{ULDI_8#i>_Wc1>_RB9#upyUH_7g$`Jup;Rd@(i8 zcOnOP@zm&W+^&DIjQ?Hm$on(?USYeT$gh%r{^?*A^X_55Z0@O^8AG?zGw+tNMc?0N zFdlR(viEZstPO@TuK`pEBQpiD{gy;e5%3lwQX=#lKG5G@xgz^Zh9QmO%n07kj@3Uz z=atQA(wI7@%8|t=9hZu13o;-evk)OGFlMq+XA~?gy0`n)+OrRrUpBmQ;#(wd*CY*# z;+=!N=O1^7^X_|g0p4Nj)k)iQ4g=k1ou-VM9%Dzbt(;ivrLyn`@stfkLDcZOFWy!Z z!qva1R|khi9jAj{;Tz!*k=6~z+q+gXyv|FyQi-ZJ>|v%CjaHWDc?YeI5vr!ntgc86 z&-eblRh}1Yp*6o=X!7@LDTVQmGqh>t#m*H8^jHVR#@H`k3kfi1*kAa0%6@P%Eww&*6e4bQ zjQN=RM1?hTNumXqz7BeDQFU9@<$9O1l%m>JT)x-0bot8mu&caFF+WDtCTZk{QN^aP zW&70haa*D?WWvhOxl4bm_H`1u8E=`YrKF-7Dx;@4w0CcC@uV})lsm+3L#&btJ1D9G zs>dRFV|ryMn2@YTS!?-Ko?7pMYR_sZ-_pC%l={04E2g=!3evLbPQhqVZRB9=v>Qvi z%V|$mz1nuknx&Tuvm)fWvv{JV07kCy+c7Og^P>#iu(zT5(jO|^=v7@d5 z5qkPq->j<5y{zVPD4iAG8&!|b0dxlSwa4xUFyWrG@sx)@;=G{ z3$sK{4vJ>0H>)Nhq-o666EHRtgqbAFZcGjxmSDs607G=BvF$er{s;3cMDJpi%%}?N z$kETS(r(8>yTE`SeGD9xbnFsx<_Pi*=4fRNI@vNzWpZ9&EASwaf!TE%EBxbMFH@=N zENhY%ROqMiDl}xD9%iIL<3HaIouE1hoF{yW2U2ZOA0lR)pV69&N_GC%xgH#~O=uAScm*;i2lRz^sNin^AS$EjiB-{tKZiC0dH~ zA(vTBp_FKfV-?QX+X^)=p|k)718!B#o8!;q8Y+Jk-prXrDBQtI&5_eNB8hevV+2_? z(hG9f<{I)#3~O}Go4UU%iXWBP&)-G6#@OfWhqac6s_sKO1g30LlRjrgv0+M&!Raa= zSEgSfJOQ>OR0zUhz$UCMA zOgmVUQ$_#;V1YO>KV#WGyn|8c50crKAcPRWC!{hG$VQd3a8*H7F_&oYr_hdJ;gru1 zTtgEUDxDyL*#isZC~q=-Ajqg=XF>)u1B9gE^D^}(mzEKdz{VvG!{H_I#4E2MB>|5l z>}cl2g63F9!VE^8vx$D#-Nu9BL6QkF2#-GR!15Iu<;8a^#W=dy3P5!-$NPX-Xg+P1I+|&*><_>>rzeE0IV7 z%-<*b19g#Am8duiC3|=XI6MV@2g0{+ob;_$H~|#w-Zhl=nK+_Ao{r!TZe0)`gQM0= z&P4{rupD3^Tnh6`qGKXvm?>qnnYUm596tR#3NTzcJ4i){J-`6_uo#Kdc}A9oFbyzG z7--+vBdmTgHjf8?XSUHr@$OX(75pV#JQ9shC8*Y=KT0tAE8~LZGN-(7JX?aJAfz*t zOn}LqFSv_E${OKF>52l~DG(cYvHg-R+TAnUk>;>l$UFi>V}Nx)*mwz5QLQHOF`S|e zY-JpIFg^IRMo4_Bj2bNLT%G}iboY%(kx^pSpvP7Zc_oewZ800~uvh)&_c_=`;M6ef zV)YC;{uaNES7IelFKJDk+hjnZp5{+Kn#jDaowG=GA+s4X;?Q$WkRaTR&Z{TRFOGvq zD$3f5QD6dtlfgS%myJBvH&_a3Ubup&fECG=!KHcgQ|Z>eY&T?m!V^rhcg2HuxWs9O zU;)6Sc+fwi{1g?Nv1p-m@W-IQy)0gN*B{^6Kqd{j%KUiU>k%J7{0BwXr9V(lzC|*i z{A707G+Yqugtqx*(D5fJX(_5#HbOtK$9mrrZZu|DL-d>mmapn6pW>of;+v9evx@_Vsnv!WJ0%7c^a%2kNw2HBAB=K1TIpmR4+ z=P>18$)@XtW~uM*&4=42eY~o$jkYD*D^FVnvijt7N)d%+je?++)Y?;oh)5wS)}_t) zo2gG@TB9osb=MkwiFJtyS^R&2==y5og)a(E#FU^?z%;qw%=x_Y0(;4K7cBFkqSBl{ z%S(Z&3o6UM->MP|KSl3Nbj|nM)Cs|M%-DPV89LnWI+z6S94Z5h3~u^eA(L>q^$D)E1xqbZgrC6GrCP*uhg{*1mKySGP>TCa>kp5eCQg z2N;oKPm>hNUUU5&P3TB9R4xdnXd#Vk{^x~Y2mN!^7r-qCISedfY+nbu9gY&S#NU~?J~vO9u{H(ZQJ z+#TdYC-M<5m(yJBjq@tn;)+{}q-JlhY6h*PrLc4PP${0bi)B9?64nA*N`R<_P#;{Q zVYd7ewi4FxhcbvWA{Hj??Rx$6+H&wTzzdt4KN$`EaX*a^G$iZp~146=-Q6V z-Wrk<5B7i0#S!M&KVK!o-B}nJ3z*5U21L}+ckMCs@NJidLq%MNP0Mv_1E|Kn{E$g; zobJdK=SSa-R1O)WQF;lHUa)a`#=@ec(7BZ)l)Z`efPns1qEpwovUJ8fi^fZkHw_cu zJ#dyXnsb0{q6B@DfQ_d2(W68j`!S;U)FnmvY0-G*`W*eJaKI@q{e0N7IM3cQ4=^4> z6;JP*w~ORMeTYg5w(0N)P(=#g6&m$@O5m)JX>x}d;(r8>w+m>>?k7mNdNsF8x`^kt zx^rz#eGni;);bfVQS;(qIP)0@uxc`LGtRe*D18S9IZAQb@{Me|`>UQG*QJzgxE4eo zImcp2!h{Ww*Ux0nKsJp*CD><)Ox4469B;vT^~!F~;jqR1iNO+CB-M;3@(ElG&Il%B zq`W+?`{Nt8Z@_C7Iw#1ub@{BKcX{J4Tcq~(7?PK)kKK2Q0&aawdZE8Ltx#H*==v|| z!B$Rn*VZr1?^L$TREhcOKJ)CGqQMVj=TWo1>!>5n>k5?Sc*NUY!)|%)p2gv zv|VMF#Gx@kw@y3P!(KUkHc$Ln&>>G|OG^1Z(O|K_)GA4s*g<#TvFFqp)m-v=GFAeU zS*hWjzO3?jsaXNKgsvrX^UAsTyv0vm$`2-T4uRcMp*jOE%2B+k3U8fbgpPYxaLJTeEpo((F%&U`F0R%gug^%=6K zytdF5lCsvpc@8C#<`x_(CquW&H?M-I z5NbSjP;l%FH&bJph8HDaP72~GtcgI=nsxRgiedAZ%0S( z>ZwPypul@b&D=(kBxfrR!_%{fk6){L-CZ4fvHX%d__np3G>kVJdfy31i+7Q|fg`gi zoQ-@aU1r^xbhX!JQI#K8#~sMhUrbpK0q6hTtZpZBV`WN-!#=C_88T@*E_SK-eEi&4 z(OvOsOfv^Fo+vlT<;4cR9cJy)wM9l}O`l(|pO65$blmpR;16nzT)y(T6(5Ota+&UO z%YgO1j)s#trW;?>(f!=HJzgwG;YfU~56o1Xvus*)R#4^LAKFX!G{rFAtGRWKkVc02 zXp0}8!!8nhW&eV{;5mCi$VY*g#M`M74PtJ`RCI4y?I5lxvInKpFHF*>$54@JzzR;p z_jhme%Q0Lvy)Fb?$SHs1toM8usNhB7c*3&_w?T?7ypP^1^RDJ(y_zEzxkfD;M0AvF z;<&b7ue-KJ;iBUw)fam&h*nIu1Zi1xC@VXD%2yi<-qUZa_%6jB1(ISKb{v1}`|_k; zm+)3%_>DOghpqD~qLVh@>kKKtQZ7flKLpmcFITD<-y0fC&9hN{Z7cyRghmc5^_o#{ zFU4k_ftb?q8|6{3S2@lUvj6^&?wB}FpzH`6hB^5Ddq=lTJds4>;6pGy9gE6>x<*u= zpPMo}IdX}>_UK|XFG(@Q)@pT`Tj}8`IAqc!t;OO^%U;zjToHCzFCMSm-12G=OVrvq zt8V?r1hnGA%$d>GOJGn;sK;j(NM;9m*Zg~ zq#R&`%7Gw{OP%YBBOx4PXx#_!+mP^Pyw+ zk97(Qm@luy@1?}=UEGhKphqPNHbK(X598@UIWGJH^3=oed3RCrz?wI9e9L@ISnhUt zeVmE@1L`-M9}@S!;ewFY(n2w*iSxZ67yui26qf%(*k)oNU?8wFw1nm1p%=5Tb~bUO z7qd2SHW4u~vNJZJ|36{-|HF50(!BWlyFvc0EyVya7r>&1fOFM8YGxLAqFYhbeqAHN+PoRD3ndKq%)iOnpi&feSD3rZSJrWw?HFp zcN9e%7PI(FZ7&;JuEC-eyAjh`o(MA59C$eZ?u(6G@Na_+CT5&XgHqvkC zGB?(Sxg=SYgaLETILpLx5GJkJ-^OBa-IT8%Ks++EAwZaOL<|d!Si@Pg8v>#`tBes7 z#mpYRDz&~L$(zskv^9>)J~&JHzMq#vzI11<~#{y#aJ@-|q_I6o~n7ilV=LxWLTB$oU! zi*L>ea&A;n*LV;ZK?=E~R4^q8kkd%_h>SBo4R|c>j{=i_xRETIyz+st0|K!@I`j2jlW7YOe*nIe(k5O*X2 z1>TKg)d7*<>y)yl z;K5&HvMUZ26E6`7MqyVj{?{UGqKn^GNz{(OAtU@_KiK<=; zLW21UzJ>D_AQd933a)BSi~*kF*&9KA4;fD^*^V`mh6$k-yK0=%0ydQlnP`)cael%S zN7+HjS%yrL(p3hV>&RMA)xSWZ5N#|Y$Nbg8&&b*DAqR+MeiS_&XGFy5$>ESB{5p71imO{yTzd8b#RI+D01rcU$Uh3p_lAV5EtEX3M)3hJRJK5 z?&`=IV3Oq39huW;^CK^>ikxG+MwF)Vg_{tlCxz#GL&>3rdGB4*;#qQPa{4`q}A%a|bz*(?MwkHeTFdI1LL9tP+Q(I_Vu>rr1#zzn`& zTiueG(Mny8x}lc)GPthUwoJFB=>>v(pcHSV&?9sIM+IbgD-0W%PcPMdSbl@LzFO^9qpPj(jACDef|32h-c zX3hfBaIIS(?W-mHm!~d67sa5n1!AY?zCT1$yzAHJV1^|DO#2wf;s{p6ARnVb45TP` z{c(bFuLkcTGP|+1N>Ao0!u&PmfTtE0TTT_z{m2BoTDBa3IP?nggN!ne)iizI9hGL& z>3)hZ7{PHa#}o_wyz%`QH?U_Y?|)P}Ep^n+Gr!JjHhl6E28LVm>ZRxSRs&fJO_jIk z3mH*k&}Uoig;S%zkzB9NG#t$e8Y{V;P_2!Dn=dLXmhU_{v(;}(k3!kw4t;woZGm^T z!9F~_fu)m zq8Q>xi&btK3@M`IzyUSsmuM&Ny)*!A5q3lWttk%pz{@gnQeq<|rolYbU)u&9TXgNX1eI*NPN6o+CeNXt(%~POi~7x$ZC{pe2>2r&mT#V0TQ6p7 z##+f@#fDP;EIB2}^UQDy_bU~Q*MBcji5@7u9jr*N?akPoL{uXg|@~RuNxLfAB`V zFksrT#&~G0T8=#bEz^4bcK7`B`wjYGQD)HWHo3KErsDs}_~l=IQwmrB#$y zP_Dq6e{E&^d_sUwbTgFYN|=xu!J*4x#DXKY$Od=v$qbAmpIH&?bL_(Iz`gZ5p{IDS^I;}BbgG@jXfV8rn)H_Di6WZH)kW6F zd0;C^Bz@CD4p$b-ExjqwYCc0lXN?$Ae97XJucCVC**C#CVIxY-P>c%zf=`IN2wHg0 zxnxa(F2g>gl{k{<vnjz{=F#t704i2Ry44LHAXoFfRWsD;Wyrtok^Jgztz6k{mH*dh2_mbj| zo%}B6Zm6|l1fhXvS{~mL!(qq$i<3JZ*B>?->{3!AuVMguF^h={%Wy@O&~<;DcGBE> zK@jQ;UGl_;>o9urAWvnG{N|Hu!pK+CMOD45$mslmmxa-NBxSr1WMgVs8!)&{l%dF--yN zS$%@BpC7ifb>Fr(gg#R>aCia%GSd(95<^euQqL6TJ>aFGi4X$Z4aW%f6NZja~01# zBLW;L-hceKnB_hU?{=rnWMc?V*PQL6-$a69%RkCF|kJE(){OU|CPEIqkB+Hw* zqmPst&5?#~Ps|?=MD}Tig?Ep1H%8c`yL|@+R|E*G}{vTTwgDz zuLn|;ZOBN^??vf{&rvgFp@;L$%<+qTVtNGeY>|MbKlr82OK`dOS9`qdEur5?lS1DW zCCX4GNM}oC$RrGgr;~|-wy(#6E;-t7rPz|ht5Y)ypYkK5nqu~kR*&ebQK}yb zw}Dr-%4)PON)_wYEfNTo zbHA{G)Vy0pCw?K*2xJ4z_u0)jiz#05Gx8_i@VvcT8Sj@kyUq@;}qs6bqxlG=2lr%yKHqQCKaT-g2QY?yZH zb-8SmfMKUG<`dt{7$o&X@ZBsbhvsaJ|ESbP38;TnYM}H+v>mRFoF2T2hJ;`l2sFqt z;*t2*QBOrkgqE6^`=~xq8Nci7AaQN_W9}k?n*1#mIqeKe94Gap$MoS z%6%$Q-X5?cldFQX+Sfr#re6{D)v(GwVF{PiRUZ=vc?sAJjk7XSh1L|g@~=y+SwV0^ zM4OwmsUK0{zg%iu3{STODXe(XeAi7Sz%$r&Zb-oj;cfCU@%0Tv6BFDi#YL4wXR4}2 zy0pAm%ul~AHLI@rDObMo;;8t!32?xMJlJfJ7wn~u3@ndHU6t5U)Bhirnl+0iuMr5QfyUQ9s7wpkfB@V@RXo(M zwn0T7hAgl)a*nCb0n2PZl?p4rxrz`Dr^zI@QwuR(=oNwGWW6X_9qH5&=nc!28Yg6e zEKurHN{!Wcr@{0ujkiM@K19l2DG+UGSp3U}K>~E3z9RrRL`HZ~c-^>aD9rzoHaHI< z22u#2;`h?dt=s~(fn?%%Nav{+3fT4cCb+pnD@c3JawHt>#<*0* z?6t62mi$|RH&PL{=bc&huQXTuc>f#OO<@}YYt$`~_ zR7+ehs@OoL%hNQJ<0+&T2#<%=ga|!F^m_+Zs9n}pkg0>vMV1c&|1*&Zv0+Uwjz_@^ zknUfFp%Bd-fE8aN20LnNVTK|!2VkAx#Q0Zo4wxfMX67Pu;o^Uxm8) z&VLJc1~N*p{*O&fFJOkO10<#bA%O#50_Tw+JC8gedSus<2sQ&@P`WQE7i2ISMTmvx zkt|SVub~2Ge4dcCWTMgfH3=IG$$uam?}SN497W0HwP5-72zI$l^@E=8a(@qg26^PK z&foBSIMcc51&9-yAi)ARlv88)&(Z^d!)KsKZtX8t!OP|BH+1PaCt=e^JQxKwUj zW*!Q3j0IfdBG(1NX_~Fyz^6hu>Kx5=0^qV!G)_gRf#rZktEc#^pH^y5{wr(Pyo>sc zGgeLl#o6nAkiPJxM`n2y<8*rEC~9U*DMfwo4sp5q=dp?=Bo}hs1`LBt<2xlwM7lEH zTQYbou%QL!bmSVCyy#@WR7*`podhxF=9W6LbIJ2c*c-qZt%?8EJM2AwNItwA{#6j6EC!u69n0#r54t^C8v#E_P+68(*^0Vw$ zj>?8uXzZ1)zt$rz=)0=|bmhBjH2H#M3xjVjC@s(DfeoKyacUYVw1I00>ohcwVI zMf+^f*`ZyB1(YbHaZTqh+m=5`EK5-@%t#t1^qI{Zrid2_8zy(A^EN9caUB}<6d~6* zf%u|?LkOS%zql_!KL;-DRNydCE#(iD~A^$XN$FiRD|SlgP0%--FW>{rv)Hj2{2Z8k{k8BzmVj;oPH zzHGjLjx^d~lg;J}59;p@?QY9XdPY?r6Cxe9G<>D$5*2JZZ3~h4BPi^qf&iWEhH&E{ zlAuOeo{cJcbo5KyK{)#N;po989FarolHg(2zsKt2CChwH#GIf-eDQL;boeui1N@P( zBH2YmL|l!%*=MJy{c24=pOy(mU&FU1I^p(Q9SHtT!WsNwj1}&mSGdeX%mvviikybn z#p0&Q!&6|w;p;4`)oog~y3;9!aycCvE~IqE9eXfHshM(*1e_j}x;KtfuB2bnV=aeR z>ris1Hoa%}VbDcf>hKeWn?aj6v9|0tZaN<(N^cQTSl}JDi3>0=o*xp@niY;mBMP4o z@oU;qLr7e9A5$96K3+Q|v8yNB+zsN+H#2W<&981$Ml`A}RA5+&;W~tZi+XNX&l%+i zaM74Mxnddk&>qYQSzSDSJ3JR)Eoib4=?Yya z^sZMmsIx23Doc)czPnC*>CrQ*MKU>XS#bsuV_B=Cjc?5wGx6evUfubB8i((Nx-5q; zQCT_Fi&QSy3rXlV zSwpi;=H8Z~PD}T%+;&u+s>fX3Rp2?#Y+tf+8`gJdwyFStf$yUgTy;SYPtoH>a zLEY+(LwbRODd`c@0}tv&d2u{xnP}UTxoOsE__?6hhd!EK)w=}og6>;if8Pl( znY#^-NRNv)8yFte-y)wJ>aet4b91^vhTX(rv1YNF;X!NNW;e?SfWSTByW*}m1qkDQ z3->m;-(78C{48$m?X`I+w|U8(Z~53=X~~?m*qlCTx;kcWzH-Kz&~>%xTzxI#l#^V? z$z?mye|vp>MEvIQc>QvVc`a1kq6O2lXG8deQ>=i;YFJ5obKATEk|^Uy8ZMLXKj)9u~MX_6Inb$`}Su0QQx#{ z+`*@Z6TYbD%ObQ~`*hw~m0&k_65db<-89UkHfZS?6D}%L4A|&Dm3UG#0&h)oP&g)| zOnk2|$wb0otMT$U+udrK8Z3Danh%Gwk=a~_BDR8dZ#20hn+-B3o&cQkrVNY(x__OE zQ*WfL2)QOF{mO`{$QByBrroCU?srS3?tt8)X$+AZ>cLBtWjVnM7>iNmvjp<1 zytFSoGb3|CsDF*;#<`|$e=j%S?C9SYqr0hT!5^9T?05}Yi=M0=2W&cj#;wUJXhm^O z6gKyc_c1+1uYZJI-)lTuvpKaKQBV5kAg#{Vb2TqYPtA;>Y1IC_-zUaT5?G;%LZ&QSKY^}Pg=*EMQ+yOj zGRe(O+@W8>mqieCJOASZ*+f)y@o1Lvx(7I@Na2gv8#ntmbKDOU1HdYT{OEs}+?oHc zt}I#EIhpB|T@0N)>`mxZejkj&|9z0R)_~MNS#8KMxV)Y}MJTnAC~3wvDv0>k1E8o9 zTow+kPYlsk4>?9gMFo^V@DB?W7Rt5_kSeNRj0ng{;UG}@&pZZlq2ySUY5Tm&hvog7 zoKeEf&doMF#~iP!%lXn< z&N?ABd(WYs8Z)nR$AYa+-zr{0=PzrSEzQN%)5kE<%y8lUFzszVDtib1r_Z7;voe!e zXB(`!HfVE|@akgj*+t~ze4(d<%R_`DRFq3rugBcM_>+kICir&fuxRMeBZ`<9jDyo- zXqfZODg~YMO`I-LMlRp($5pYkRF6v-TxV2{OR&^w>1@Z;<%Z*2Z2fHH-1&f}W1VZ; z*^?tXdnWIqmiQgx%guG%75wrV>k>8y1{pR#6g9Lul%M+E0pw{)Ev)J`R%N|V*f*yyA z%b4@e?-bBBbl0biyDP<#o`+(&LeDxVu{Jxa+P!=s07*+SngGfol%~-%g}q5Oh>7_itr`H9^B*oB=8aZJ*;;~U+&c}X)c8Og4xFK zIc%n8#)hB5ENBfzXLh^&945e~mB|#H@>_kBz_>It%5JCXeSKg zCvYw(2F&K$uOgo<(z-8e5P^US~^&UL5rF$CxPf5BU$2fDUa_a4tR{oACz8jc0Esd&Z1z zVv>u{Wx*-uzC)N?4h`2IpL2cdq@f>3BkS$p6bLG88IxIsbulXJTRmgFJ}|=SaW)k2 z6L7|oWMjI3eKiez*4B7hkc2$w+zMS+f>*Ls@2b$qOfDb`xpu>_oliaE_?m zdILX%iyq`r624|{vcc82PYN2%q`16rX5ooi-P%fy9%;5GpY)nADMN)pT~~CVwO9rG z@R@GfwG*UgK6VXDL*d?M5XhZo|FfZ}lHB4|sJ9`2)qJH_R)j2mpl*~tG;k)_o7~d~ zGU%R*2&FP;Ro<6v(oq>!rO zlkiEBWS39(*kADH1VJ79M-+rjb0t3yjyjR9+LlzTD**zGF2<-V5Y3#i#9BoU^G=cE zkN%~=!U*1Ko22u?Youh$z8EBoKFN=ni^b} z)0xrsBKu%mmy@H2p^6LgXqDMzSTk0TpAcY`FuS8=QDBUggmG#h3{l{^s4zBh9t* zmikiP&HD8^eWbRG!_V2eo0}U4b^$)R?DChf-CG)%2>VWtz*@0KUO=;>NaY??>!ngZ z%zoK`5fY4?`23_7KT#^Vevb}YJ!4}>&g7aSR#v9Y?$V#o&dTcka)eogm6%0X7rBrH z^|M`il7CV&AcKO*l_yV5q(2w$x0l24wQ53J^NUx_dv%o~PqO7wUj1k_Ir+TnOopD~ z=Tw0{u-!I|zst!{pb~*kJVs!qKlV>rs!84C!`>C=x7`)&o#gLfuheh18*^P%n_Gf? zNYa^R0^OLc^Xg8`Lzp~~a|e9(S0e4cu!;xk3Au$uSfX9d_)oX=PaVG<6_v|npwHUI zgA!siA?rEoD;wBrkbu8x8%C+w?JASuMZ&;fG6ulmm3RRoxpVyH0Nt5xdQxH?-`RO_5SvvytW{bGER;dr>_43BuVRF}IxO1?71ON=i|lfyDq0O|l2(9RORUCj`^Mb;nOdiSd? zbn-ugavfb{01u5JX3)OtGhj$Puu%xvhLmZgRl-{v5Tm^0CwrXBWn*&;<66th0KIJ` zrLJzNV}wFMa4vEU{V_4)mod}|)2#SkEI zp0u;EBV5CQ;Kq81*H3YTCuWE!vO|thy|p?c@LRq#uO8HYSrt`_dLlfZlhO*;Sz@p! zWNQSfX<=|)){0P-C3yz}D$Rg7w_(jC3+x8!fBpSNLc&QWEufK`l7~Yp&VYKose`84 zqgqTAG^#7tgwH*U9n>24`MI816n{}K{*!2h124a}eL zKvOqB1J+zR!Z?!FUX$=#G$w)hhsQ^g>tkyjB)z=X`qRP@+{FF}2L3A3zpf#d4s|H9$i$L0;af#I#kX#)+NKT4{+TxUb`*y3VZp>bO5 zYI<*N;Csbhv;K+?bW#=*rUh_|d<4I#0vB7k4gPs|lvfU%dWVSw-$ z7C&mgWG>mc2M=*L4-8MPAa|9mh#O*O#wTnB3H>t>DkbFf0BmopU31Ky8~1{{?t5P( z>Y&j^<#D7$$zVHS>4%6pRtU5b`nWgPRBx>-feoD@8S26=DWNDG(O!}6Ihk6Co~P25 z=1X`M?aXSqWwI0zBtGQ~nT#G!c~_%}s7pZyjV(&`8Hv!}0aR2VP+?bZDN%caiW zbsx{>n>`eYB2-r&ZrAWh&K_FC0wg4s|#!x1N)9n91V2OE=NSH5Ve~)qR^6 zlpN{r23P-d_gaQ3^@2_Q;H=u|t6P5+@m{%)o0IBnWfnlA^`-+_KI2nSxs20_)LhPS z82R%`WPFCeqsj+W%*LXtf*B?KEm+vIkr97%ESO7+5sH)e?ek|)Fw0T{n*J7qshcHF|xvHVV2 zEZ!yaWAH%933M9Tinpm$8kxfGxu$Qk7Zldqw3mfrCLp0Vlk;xZVTB`fx+yjo?tW;p ztRt(ra0YGMejZ|sf~AI&vWQ6k^MY=RW`twwhoeO*bj*kAQu7K2dfszwrT7elJEiQS2 z{{vh9Gr#e#iKv{(&>9xqVtRm4o?G$l`iAyer85cs`yC`>V;m60xkh zURiOtisvnEF3S325W*$|{_fcf`499oYAW^S=`lAUi>hDuGN0%0l|{R!5{Q?h=bNk5 zP<)x>lq&FGbq@2f;ot2}SPromY*XOZgSdJxU`zc^7?KUmt#;!#SpdE{hI9>-G(;oG z`u2@5BKW+vm51X4wf-zy@hm@SY-yRAP#bZhkn3B-DI?x4Uhqwj0Y=)UZPH5454^ZV z;kzXCaw?p0ktgmK>givnwy1t&SFmsjxXU1*7!WM&c(hDx+OS#G1^wuKHDM-LvDv$W z_q^= zoeksl3b?#!FCq+kj-G8N0;#n#F08(X{=tGsGz+95s8?(+-;1}(zHkEEv+OETx+bJk zK}E*a^}K5pJ*Z5hQ5#tF!a3XQ3r9|Jo38|ul)fAmW^FI3iU@t2CLx@!<*{YkGLUDE z@tKk)bTf_SrCOqg@b)YKEy~5NvJGWpEY9x$d^a9b!!rq~Z84&Qt(O*Ln0Ch#fiFqS zPDkLtfAtR_H`|+L66t_b6C-vh{3!Vs-KL&aOQFom&*G98*L>x(UPd*%uiqC@`~k|M z{KehG&EQ?#kJqK5hdrUwf3B4#a9Mm0aap+~0)di}?KTYJMgHt7U#o0~oTJ@U2Bt?w zTiDi8I#PQ;J)2UrnX9Gb*K|ui}OAs*-Bq~u*M1m3oL^4QFK%xS28#sFKygu(a-};_) z?>~pdn(nUJd)KbryQ-_Y_pb{3{6^wi$tA;2P}@=M0-^*;?~tPzoqc{_(6I_TJ*myi z#?nAsK6@vqD&us@>z2C(2CdpRHN=yj2x)9wxWki_%yZ1;wIj@+XRPzGIy!lAWsL3*YT< z-U_2IvXX6xpH(z8KW6`x*wO?`S|==xo`<|2U2{FRR{S-6 zQADA-o%U)$NCoLkrV@+o8q2|zr_|XwZKtzpJ1j3CE|m6@t@5;0Sz0m(F5BjN!pJQ*@<%k^l8y29-kmDWSBbj#UG%qk~`p(O{tTUZv!xOh)UjMlQ# zl|GMby{>pe8y0*xTGlA^0o!{!_7c`wF39CkVAYR`q&FH1L*Hj_*?uJvU%Lx*(z18U zGJGlRJUnz`Tt6KY70YQ(=aXn19Sgtx0xXy|ZP{ zmzQ;(v0;iN-=A8~9k?RQ*8J490{4)ku%P8l&EpQ%hOCRpU#^c`2)iG3jN?U?B>UoN z>eID4<)J5K&&ZQr@!$qUL@Kh_H+`*bTl&&eLQ8esUV2DA?YGflnDB(x@1IzBb%koKMcJ4}pEg4NaT}JNf z@K1NaXUgv;1~AQabbR3>Bs+$EiMR-r_P+v;Hu@xDQkGww&qu-eBJ`-~vQgzU^Ejm$ zsdi%6u;D_Xgpa4JWyw^7x(HHPQ{2{Ru0DIcED|3x`6;2#poFQeozuC_2ErZ)x{bTr zL+lBS9`=;O6seuDdW{~Q@F;nI-qxDs99BXIqx+78*61w&ADyH&b6?)Sn8AAijo6mdn8id#wpZdcx1hYIN69Qc@$ zYXQ0$?U=41zV^!CX~Ifh^#>(Vh*I-ug__zxpBl+`C)OS;(*!gcod|kKVQe0E#&nLY zd1mQRYE-3@sQ8QT$^3K=EFC2+sy{E>g1;GL2r?6uTX4y?4qcR*CAf< z_C&GQa!L05;`dfx(|vF?Ei001dRrwf8$%#6%=J$9zRtKy-VLoNursDkh;fy;wkKCKcs&u7KE5X1z z3{9C-H-GoqTkg+*l^-||87aumcifhGx;yW=!Fp&S0)-$V{(Rl7aLUIMfpkL?K~88F zjE5rsTxB&s2!m4Ow~*3<=y|H5T`^jIc(jS1zA4hr4Jn7>Kcz&g;3Mzj?&%Il1NpdP zJqYqXiu?!^&Iv6KoNp(C`9V7*L^nl# z5f%CQw+s6Dw_7vMJq39XBz9^kfyB9kfebKI0uKHu8DLZZ66Fo?I1~wq#w#f!@i-?0 z5u}D85HVOR8V{0_fJi_<$1N}(D4aJzL}6RVpY(r}t^nRvX^)CK_1~8Mm-N3=9k@k5 zZS;%kJ_t`hb;z%(1Jv@Wcr=2D!<*u8SS1ZS!X2%La0Aqa0$NKz6~KQb`O{nq+gW-z z6vo+C4ZtKR!5~nWI7Ct$3O9v7z}{qdkZO!2Hl(&5uILJLB-~ z2mnp%>50W45!=qW?H>x@KeP7K7Dyywh*1J$W@8lC*7aZ8aqlU|D$x8Z8BtmWGN$;BvC!l5jXm z+zAPF5|=^BqMXrA&M-I#n!F$F1 zTgYB({O$R_=K5O`?IrlvTzjqYx99(w>u*i8m*8J>?X||=p8spEzctZbg8$82w0}-X z(H_7&)Ek&^{%+0-k^OVhyFJqa2B+>IxYTw8nDNhvE(pFoH8uw819KSy2oBpub{K;V zJkcJ?$n62P4IHvdpn)OUfelDREXHG-a3_$=7;K1eK?8FYG#D7W{>a=(1V|vevbN*L zz^cH&84G;bQeps9-d>{t4u$@y-G5%K0T@;NIThE6%re!x$_k}_+V*h%TvVr|KCkfb zi?D0gPVW;vcbYdXNnNa@GNZC)aI$xi+}j${2!4FH+Qo(O;VD>PFt0stc>Thyh$)|w zo$Wm88w1P}pey$%-C5Re9(}WVwBeT6VS{=;4sZGId`ef3cu9?&>-%IfwBXej4;m!} z4W!ObkE+cL9UC=R>{uu?3BgseP0QM)?(d4t5)1fth%mTLOI1aTt^_y9)aQ@RBONO; zm1V=j>dw$H#OpBSb);}b^4R4JzlB$57L?>L!$pMq>Q@e9LspWM)%U+j8peh7D;Hk4 zp{2vb#rW9B#Ag0={j2b|ZLf%1YT>0ax?uYc^mCcgit zmZv`V5Engz%0Ad-=gbU5K9K$9xtL4z%VJK<=b$aWXG$C8aJPvyqr=$^d_5wq^vpe| zXZqXcJq7jtoo^RVH>#P83l+OQ>35UR{SuFdddm{;AP5m>b?%}Gqt_D2yLv;D50R0}?20Q8f%077=O=0=HZTI&F8k+Bm>oO3fXdg zGX|;t7*DAt4%(i04AX5M0iA)_unV9IeOJ!Vv#f@&=usM*)xB@5NawBiGII9N>u9^F z4R3Ok8b(J@R;%UJLrRLs0G?OG6YRq1(c)7ewseX}ml4X-B`=g&SOJ!)X zHBD*j#O3eQ5HiwTY4zVVYbflWG#qIunY|4MYYI(39i?trXR~(<2;1iutyo_r)1mul z)!+&3@+w6#(d3BQaoST=D<9SKi;a}-Kj0_zX64BhK59s#`-$u1)pco>%q|=3k{(;)WrVehGjHI*`$t>ezzGlainMPSFe~=Y zau%~v!0&<4pBaQ{5o}_qq%$cNrG@t{%pwC>xN#r5*u7ecIiqZbdbni1+>(5Mxmkn3 zjNiQxN@708>{M{6t!c&#Ii}sc&rj->G(L+5&)B4JVt)g=w=J5fEtKY+p!~h8t85xw zQhfI~b9tG%Tp?o(1=A)HT_2@C=z?`?CEgzY@M$)-^KrnXBQZrszw>PdOF)^DO_zrYqw8+6eD>d<@<@p-n`?#0( zmrCA<^gb#$;bN`47#AM>mNh~gY%$t6d#okKadEZlu$VI^WAyQLjIkjEKJR6J+2h1> zmJhG)e5XF?)j5>(^n=P;!Tz?1QbykA4afF_oTra!J4L8GUTWcm^%E{0QB$f> z;3*9-m7+GNL_r(#RaKkbXhOK*~$ZIyvSw8heO` zv131rQohEiHN6w{)%7m!prms1^2`UZ@fJ+auE>k)7$_W7sh%y|QWJQLJQpF)FQ@LV zNA4aLXK^vcDDd*xAhHiv`i#Dn&zRkW(|q_4W&6CtxbT4zCG*?;*DBD@D=RE(Pk=Z^ zAdVTAF4-8-V@Ei}4WxTLgB*e-I*+cKKWjawS;b@uN$F+W=*XvPW3;1s_DJ_39paGB z;Q3DUBm7}*caWOwwKOmDviY}W``V%JB%e~m<|1=Sxa^I0(R*A z=WZ?3DMWwgn+Xrww_iMMm3p&7+p3LJ-_6bRESGAfI~q5twSk77oDJ`>ckm@&FpT=f zUmItCujPc)-7MiE+Yoa*Q%rkFmjLHOhV#ve>aZ5`qeXs`U3C^}myd)fUlqOv!TSzU zwQ?uag`csy24#OkV}m61y{Cole|-iDX|O$HCuXK9ofZP3?mGJpt}H_&`)0OJi!5iE z$;U2@i%MDlW4-2`;`d&StryiBnlFwH+fE#ng1drSgt-syHU6Do;fy#o~}k%CrQMjV&6`c4BOll~-n z;J3d2kHYe$WgvTfKfu)J!FvexMw>`LXvw21wY-@w5vI$?)y&ncDSG`k&+gqbZ+6}% zy`MgMT3)9_syVmds&SX|6iHXCzTC2gu zXCrQ!_aq74TfXFNCZ^|@;#;m&$8|V;>ICVEk-cL(&XW(k@vy1;Z0XVxMRt2han7ZaQiV*1YAvaO!Ao?i?C4@Z@XK$30jHiFKBe<2sTg*5 z^cp+7eE}czWqp!)?5%l|$&I#-+YU7o7qhN=sh=YYYa3>5!${h!1_-gn7$Bxwv+i_fA79!xjAdw<1>v9-glEL z`|!Vvg5y50>JQTKOi!S;hGH350uC55Ys`dEu<%q=b>=iYS)WB_7PzujvZ!aRoL0+2 zu8m%9cXOzhC=qLHIjEcfr+JqVqfFNvmptcd_#CIZ@tQ~XaN#qMD1*C;c~tuIw5xW^ zt`s#_?oHQqEQ)(Qr{Ry%a`Ti+3%0nps94fi=W!VkJ0D>FqQl{H0{WSMBbSxoxlH$Q zqA2NcZGx`+K@a7p!S&fw(;S8=>eGE;>yIAJ+(y`+1n#k&d6~AxL7FqAt4}s%9SSIg zJKCi?d6G$VG$%!R41~JFg6fUU`0l&54hcS{u*$BLcJDv^IxI(aJ$Pk!!=EPIJ&b&9 z@F4e=t;+aKpo5osbnJI)p~In4|7hUyYX`4u3Oxuo`Rpcb;LsuReUUC>3^LcX&xw55 z5~6!|Q=Df)3%rAV@^dokr* zqCYYmf3i3DakrLG1o?^i=kxqu(^tOmO;{zo^M7N*qkc5pTt-Dge2__v-FUN(^Gxb9 zA;kvk!NhnK$;yT=aOaM~f$*{QXF@fT-Sp#=9 zz>L#p?$_lJkNQTvSSk2`(5u%n7hzI!tK*=1OAqwZXl3|>>!+F*w;vPdHN?Jztmp~~ zVRZt(E8E9cD88OF>RvGI~va&N<~C^!T+f7`)J z|0Ca*-Ltu!R;1|-oz{K`{e@MfixSMrOiEWf;1tIzUQ~bFl(~~WOQueynJN;gdTy*| z8qrbQk4>E+X;sYik_laa-g%@^Jn-Db-{E2J!2N<0o&NkBiD-tlXI}5?SG$g_G8DPV zH!88V71;7b$yK(Ayr(XX?U}#*>Y7)<#RYuRi7C;RXLfnO?g^crq2_INMOs=17*fHX zhPVk)mJ^5EzHCMpX*~8?iH&>_Gvw2Fr+QL}->#TcPkXPw;PRB6rb8&#{f7wF3r<=* z%l;0*54tAXClqb_+HM|vbxKCPZTJOud*T2`Nu*_RX5D-Lp(D98>M&6@|Iwk+&s6Pf z8%;8ePS?i{2)SH--k6q*WRW>bH4paBW+uv=Hc0Jwe5Zy+nHWyJ)c1l)UqosFG_$c` zu{`-LaI@n36Dt>3GHum`lw@Vbq;-?I;0K|8&L%mZDA-2Z>nLgS zR+uNa@t<*>m6sqG4S)5UFKJ7#%V%sk@A1qUji}VWIcdX2YeohQ#MGfQ|*8wXBJz6YqOR+h3nKI8T&ZHwN-0)oLT-LCn$J;}XVeU%XOdKP~l&93k?$ zkLpOqzT=F&J6B}#T77!IN@`2d<+F!buvQ`z4lj%fhpsJr40IqaP|1AnV7SX@GZ=CG zQfa=Z(Ff`N$ynA`~W&EB=c3M~0x?UZA9$#?Ca zA*b}_IHMYU8e$y z$Mo$JqF3qZ?e4z3{q?2G{QG1<*H`2PQ|viXQO$3TyS!MUiI}31KiZ5#IYvBSEdDq? z3)9LL;GxkSrKG61^>hgHCNJFN$-&NHvQ_#GU~Ck`DIR}HT&eQD(GUn~?He$=dZa1O zbg<;M>GOvq8n9`yw}H{eJe|4ZBJp#^GI%Ce8^inY($BBomU=vWG5GC+`E_+pAEng= zS1zU8uA(ni)^~ZC>D%qtDU$`t#qSEncSLpz!1Xaw=)ByB69n$Z*0D#16~D5`79FA} zHHvoQ)J6%6k6wBNnp||%O4l!PQC$GwJdSTRj{YTu*;#x;3Hb$=w(G z3@$zldO6XXs2+2)1bx$=jEis8{`-bz%etp-;<4u3(9O*P(DhKTTSY;-W2KfunuCZ# z>YEJzaL25L!^Tth-1|X~w!A;StJtYM zTM7nIH{-xed)4Aved<7cqqHl)+9%ous+N&0nY5zmw-+^K-=a@Q248gXHdt>KnGgJ| zQ$&_hmky>$RQZ&VMb98ZqCCcax%-R27#rn@ErHvYwlB|Qmq6&7J3x9}GS=&P#rGoBdUe?Gn(6J9yG+?91F=J|aDtNZ7tVRl_KH{x#H za}vJH`PgUh(~OtZ2Fn;^wO-OhP2Lg3s3i(}<>Rsd&Q7_U_CDjJd99RB#O)*d&ya?) z-`||SD_+U%F+Eu^fbAibhF^Wn2aC90r#a6FBx#qf2*5yS86%%Xno~ zZu2enzj< z$$j9NNPkE08n}k6iA6f+x30e@`VNR*LE}Au2q-WRK!ZV|jWt#Nm&KZ4QgSk2ebAm) zKCT~bCwQBh_Hcx^J@xiWe+_xV<#$~I60#%#I!s}7$eHp`Gg&>o9yTKPv>FQwx%#!V z5c14)t@{y9M~~+^GHEdzIzggxb5R=IX99eE=f=lZo1PB#xy%t(d;L>OB*+8|j!1wH zB}^cfl13x|dYtiHTX!&iR&X11)TPjLS1Mu8|UjmSjs8N&v=Q;f~n-PAyi zUZa*-6rWAQ$ZeC}mssd(gpdEFYsZ`#G#d_Z+3IIU;4WLjwQsesFW8*J!om|7$)W_F z(8=ESVM&g-)xg9^WLdzId1jS?Whdx3JPRpCRLirnoEOW_E$pk1(7Eby*8O#wKOS~n zc3)`vsq67C#)_sSL|*3U+DnsNcKLpS>zw;k2ER>`wT(4*5?5l7QSMg7nvCG*PyA15 z*WSY;x>v(LJ=`#!CUb0wI$JcuWYZEvCjVp~$a{+^dEzLUfO?=?^}=&s*Ad@HPHJQK z=qqqI{7M;bLTp)D^-Qq*21DS2;;{v<>YL%8nie zG;GPvKU}orei1o7FQ;Inmh0MFRwZjx>5F|3l3bTb@ZHLb+=9#+Sl@PwJ-(a@EV@Lk zHITM%Y*HyCgPHuGui^XU!S#vrOb14iIkV*-%dVy#*j#_$8MzkQdS#t=^57v|7n-kJ zm)%C(Xxc-_1jyN6>}O9SCx#w{>^HCBkliQqXy00>BQs<7K8^^gmmy3rWr=t6w5M6# z9asrFW4M1>`IsxY@M$Vp28obYL-J)bpF^k|DWcC$I#O1VFEUdgj>6PJ$Bvq5(ZNDm zK$Iw^a%Ck$5F4X9uu?SBL6!1$j$CTdi=ye)xh&r}WW-wBv zXQ3PkTRY04b)rsF8gx-h&N#dnWTyqj({U+h$9f;9j|$7kq&IqU+`yDN>f#wy_tWf| zqDGoVpK|#+#XFCmrCg@b(-2E%XqR>cFNqR)Ukv^u15s%{UW|b~^BG-)ztVWNujic^V&#K*+zOJ-BrT=%1&L?_eR2sunV zYEe&w(yDZ%l^I&z5WnHqEZ8i|uG*FEXT+MT-igcI?BufI>O~i(7g`mX6}q4_U8(hw zC)9N^;@kDxFS*X$JPtl*U1wE@)P5CLD@edc;bZW;jLZQzN(5C zzX-n+zx;2<7{Vx3kC|ucX4)V%Ow8U|z%7cb7N2q0UruL#SYnWG;ihk+c~aLu*(-b{ zE9Q30P5l`CqE=PA!UyL)l05Ewc2|t%Qw1B%g?!Kpv>a49WnX<}>k>KE5mO^2>j-(o z_O#Pa1cw=TgHVl2zmB+sC>qXXdHtg4cw27$$BU1%&s|vX7_}cY5aJNh7Frf!(naf9 z+?q)AO8kB{8`lWShgG`kOMA-|ERo=YFB0UeE!>g((E%Us6HcuOZp?@MNtu71beC)6k1t1r=G)oNv59nhjb z5QDLBlW{UAz53)7>eNWuL=7>Q;4_jt{K5vL*m@~357|g(ZUbo zRbnI4!`2`B=7dH$M$D&H-q{T(wm$6*Y^<+C-f8kRD9$OJ^k_qX(clD&%$^q7eD)cO zC7SMb6tsHyzCM6y=Dp7 z4Rt|QIo(4#TT0vyyg!N6^y2c5LNo)W~PiWL{?0x=fqQX|;XUMG>gNw6uZt6nnt;tjarb?CwC29{$1E0`q$xj1;{?0v+emMr#we$CEOWR>RA}KX?0eKIy^q#2;zoZI#_9Rw{nB z>ekqEg@Kkqo#DvU`Iu;428F)2u3himWp$dqp_idc&WTaytNeOpM&^$ACGnDi;_i&m zp|bh4B3x$mVfo>}jCFxg)GKf6L9@6ES^~cdu7TAZ}?h z_U+l6{MHvKB62#i$-!R3wyov8i+Phd;0C)xGcnUap69xr_bZYsZr-c{+m+7-eRX6TX^vlw=j++bT}94B zjK4@U>gjkUtZcv+{kSn@>&ExD;R_o%V>#oj_gJr7cfOvJ^FGHz$z~I~;XA$fbt)|N zZt#Pk^!1TS2j1YRWGgB7_qZ+maL4-gPx z00JVs4FM7UPY@8{|DO;LDG3+=0D(Y3(r^hVTuu@$w+#S+LcqX31TGGR$pZioDaenA zp>2f4e*^yU_xOoFz$|}@o7jn8`bqpxLbvyD{3B?<9(n)+@cYnkAY&JSvQ7Le07H>~ z+n(D942X;=L{1(qB`>vo4j?dgKrY+Ve?l$(3H4rN#Xq3`CB9sdek-v5(s%mBAn3>=nnr+zyyFw`V)Y#4W`)1297^LH$UJ>h5)ii7vqFS;C;7I zI)7k}eg#?l1zfRn`JW&as`9o!V)F?gga-;_f+i9H-~_?eo#=^1;0ZWeJzvmI5?jnR z1j7OC?t}&mgmM6a$#4<`FBiXG0SdcUKOhpnf+KeGeuWL~rv1MOvH#I}e|OyR?%!qn z-%9&efqyOVuLb_Kz`qvw*8=}q;4dNf!rt2hd|KMy%`;Kq=P)+$bSPPBy0>1s5 zIs-p0AQ7;dF|a+Mx(Bc&Au!khPJwY1Fv$6l0L15Se>62#QUYt>JcvO44nJIKJ9fqx zY>iNJ0!X%l2>_C9A`BXfBG}mN39R1{@IwKNvojix25^c58<-^62@il)!AJyvK?Nf* zcmNscj79r^Q8*$3i3C8BU{{id3j$Aa$0A5XFfhXMK)ZqQfSzCgo{d5SSOM@k5{`%_ z07(FLRZbf0f=76vK~R`1*og!HxQSpC!o>yHcyafDa>4?DUM$9wfFXdAc60o84j`yB1guWN&rL*H%Pt9DxKF41>diiLOB2&npCy1blpPCxM`{@NJV~ zaH#EDzatHaMgcH<1Q@vQKj{H|+z|vMX&d(e@W^gwo+Baf+uLgbr>+RB^KJ=$BoaVS zIT%=Z$1%XlyQL}baW>^0R{|^lth(}!gDI%dEbKS^X!M_AZU~E{*mc z8ttDn>O@zt{%&ywyW|GDk0pzA>IyU$DXM%{AL)HQQx1+rw)1lgDZ| z)6^A*_W%R_?+*B=6P5r*?D8UZvk`lE0dO_YqQHosA`m-fKmg72hZ%r-3N$V-dY2i! zhZ(p{e>fy&mj<&-gV{ra`ALHY%;2$G9B!8!w@Z%OLyr4{;3pRf;{^cG!MI%`kaihK zyNskgjHI6&zPp(~8wHHu`y<(IcVUem9o(LdN@h#D70qo69IDhXGUR8^A!LS$8B)Mb?AROBEkvN9^la9IF& zt)!qPBL#yfYba|-%Sl6JB~?_^)iq#JlG3uWQgSeLb!jC#u(pRY4gf9f^sPTPiGkm zS&`tD3@4pti;j0hh=j$fXSmC=oDc|-crJxz7-OmM$J;oj5W@s zNou$cFFMnj2BOBluv?h_^1jd;PF0jcmUiUEzUa(15X-KH89S)9y6;A=w!%}X#tdGb zAVI!1Zlp;{t4u_=MI*)WhYiyWT{KAfLl$i8QN zTpS6u*%m&l2xfiC5U&W1XMN*_m{v@M?<&cm&X z;_0TIu%gQJ#=(tZCOauyAZO~b=Yx*_Z%=F&L>k5s<`{P`09b#dm0XNLO_{IZOIVl!&hwFCS9pUop<9t4>j$|9I i_`=^ji~)!~p6G)|J2TMAK;Tdr23kQubps6s+W!N?fB_Kz literal 0 HcmV?d00001 diff --git a/examples/l2fwd-vf/Makefile b/examples/l2fwd-vf/Makefile new file mode 100644 index 0000000000..39ed08ba53 --- /dev/null +++ b/examples/l2fwd-vf/Makefile @@ -0,0 +1,53 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = l2fwd-vf + +# all source are stored in SRCS-y +SRCS-y := main.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +include $(RTE_SDK)/mk/rte.extapp.mk + diff --git a/examples/l2fwd-vf/main.c b/examples/l2fwd-vf/main.c new file mode 100644 index 0000000000..836e85ace7 --- /dev/null +++ b/examples/l2fwd-vf/main.c @@ -0,0 +1,708 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 + +#define L2FWD_MAX_PORTS 32 + +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define NB_MBUF 8192 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + +#define MAX_PKT_BURST 32 +#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */ + +#define SOCKET0 0 + +/* + * Configurable number of RX/TX ring descriptors + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; +static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + +/* ethernet addresses of ports */ +static struct ether_addr l2fwd_ports_eth_addr[L2FWD_MAX_PORTS]; + +/* mask of enabled ports */ +static uint32_t l2fwd_enabled_port_mask = 0; + +/* list of enabled ports */ +static uint32_t l2fwd_dst_ports[L2FWD_MAX_PORTS]; + +static unsigned int l2fwd_rx_queue_per_lcore = 1; + +#define MAX_PKT_BURST 32 +struct mbuf_table { + unsigned len; + struct rte_mbuf *m_table[MAX_PKT_BURST]; +}; + +#define MAX_RX_QUEUE_PER_LCORE 16 + +/* Each VF(port) has one Rx/Tx queue (with queueid: 0) */ +struct lcore_queue_conf { + + unsigned n_rx_queue; + unsigned rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + unsigned tx_queue_id; + struct mbuf_table tx_mbufs[L2FWD_MAX_PORTS]; + +} __rte_cache_aligned; +struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; + +static const struct rte_eth_conf port_conf = { + .rxmode = { + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 0, /**< IP checksum offload disabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 1, /**< CRC stripped by hardware */ + }, + .txmode = { + }, +}; + +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +struct rte_mempool * l2fwd_pktmbuf_pool = NULL; + +/* Per-port statistics struct */ +struct l2fwd_port_statistics { + uint64_t tx; + uint64_t rx; + uint64_t dropped; +} __rte_cache_aligned; +struct l2fwd_port_statistics port_statistics[L2FWD_MAX_PORTS]; + +/* A tsc-based timer responsible for triggering statistics printout */ +#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ +#define MAX_TIMER_PERIOD 86400 /* 1 day max */ +static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */ + +/* Print out statistics on packets dropped */ +static void +print_stats(void) +{ + uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; + unsigned portid; + + total_packets_dropped = 0; + total_packets_tx = 0; + total_packets_rx = 0; + + const char clr[] = { 27, '[', '2', 'J', '\0' }; + const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' }; + + /* Clear screen and move to top left */ + printf("%s%s", clr, topLeft); + + printf("\nPort statistics ===================================="); + + for (portid = 0; portid < L2FWD_MAX_PORTS; portid++) { + /* skip ports that are not enabled */ + if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) + continue; + + printf("\nStatistics for port %u ------------------------------" + "\nPackets sent: %24"PRIu64 + "\nPackets received: %20"PRIu64 + "\nPackets dropped: %21"PRIu64, + portid, + port_statistics[portid].tx, + port_statistics[portid].rx, + port_statistics[portid].dropped); + + total_packets_dropped += port_statistics[portid].dropped; + total_packets_tx += port_statistics[portid].tx; + total_packets_rx += port_statistics[portid].rx; + } + printf("\nAggregate statistics ===============================" + "\nTotal packets sent: %18"PRIu64 + "\nTotal packets received: %14"PRIu64 + "\nTotal packets dropped: %15"PRIu64, + total_packets_tx, + total_packets_rx, + total_packets_dropped); + printf("\n====================================================\n"); +} + +/* Send the packet on an output interface */ +static int +l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) +{ + struct rte_mbuf **m_table; + unsigned ret; + unsigned queueid; + + queueid = (uint16_t) qconf->tx_queue_id; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); + + port_statistics[port].tx += ret; + if (unlikely(ret < n)) { + port_statistics[port].dropped += (n - ret); + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +/* Send the packet on an output interface */ +static int +l2fwd_send_packet(struct rte_mbuf *m, uint8_t port) +{ + unsigned lcore_id, len; + struct lcore_queue_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_queue_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + l2fwd_send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +static void +l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) +{ + struct ether_hdr *eth; + void *tmp; + unsigned dst_port; + + dst_port = l2fwd_dst_ports[portid]; + eth = rte_pktmbuf_mtod(m, struct ether_hdr *); + + /* 00:09:c0:00:00:xx */ + tmp = ð->d_addr.addr_bytes[0]; + *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24); + + /* src addr */ + ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); + + l2fwd_send_packet(m, (uint8_t) dst_port); +} + +/* main processing loop */ +static void +l2fwd_main_loop(void) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *m; + unsigned lcore_id; + unsigned int nb_ports; + uint64_t prev_tsc = 0; + uint64_t diff_tsc, cur_tsc, timer_tsc; + unsigned i, j, portid, nb_rx; + struct lcore_queue_conf *qconf; + + timer_tsc = 0; + + lcore_id = rte_lcore_id(); + qconf = &lcore_queue_conf[lcore_id]; + + nb_ports = rte_eth_dev_count(); + if (nb_ports > L2FWD_MAX_PORTS) + nb_ports = L2FWD_MAX_PORTS; + + if (qconf->n_rx_queue == 0) { + RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id); + while(1); + } + + RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id, + portid); + } + + while (1) { + + cur_tsc = rte_rdtsc(); + + /* + * TX burst queue drain + */ + diff_tsc = cur_tsc - prev_tsc; + if (unlikely(diff_tsc > BURST_TX_DRAIN)) { + + for (portid = 0; portid < nb_ports; portid++) { + if (qconf->tx_mbufs[portid].len == 0) + continue; + l2fwd_send_burst(&lcore_queue_conf[lcore_id], + qconf->tx_mbufs[portid].len, + (uint8_t) portid); + qconf->tx_mbufs[portid].len = 0; + } + + /* if timer is enabled */ + if (timer_period > 0) { + + /* advance the timer */ + timer_tsc += diff_tsc; + + /* if timer has reached its timeout */ + if (unlikely(timer_tsc >= (uint64_t) timer_period)) { + + /* do this only on master core */ + if (lcore_id == rte_get_master_lcore()) { + print_stats(); + /* reset the timer */ + timer_tsc = 0; + } + } + } + + prev_tsc = cur_tsc; + } + /* + * Read packet from RX queues + */ + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, + pkts_burst, MAX_PKT_BURST); + + port_statistics[portid].rx += nb_rx; + + for (j = 0; j < nb_rx; j++) { + m = pkts_burst[j]; + rte_prefetch0(rte_pktmbuf_mtod(m, void *)); + l2fwd_simple_forward(m, portid); + } + } + } +} + +static int +l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy) +{ + l2fwd_main_loop(); + return 0; +} + +/* display usage */ +static void +l2fwd_usage(const char *prgname) +{ + printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " -q NQ: number of queue (=ports) per lcore (default is 1)\n" + " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n", + prgname); +} + +static int +l2fwd_parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if (pm == 0) + return -1; + + return pm; +} + +static unsigned int +l2fwd_parse_nqueue(const char *q_arg) +{ + char *end = NULL; + unsigned long n; + + /* parse hexadecimal string */ + n = strtoul(q_arg, &end, 10); + if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) + return 0; + if (n == 0) + return 0; + if (n >= MAX_RX_QUEUE_PER_LCORE) + return 0; + + return n; +} + +static int +l2fwd_parse_timer_period(const char *q_arg) +{ + char *end = NULL; + int n; + + /* parse number string */ + n = strtol(q_arg, &end, 10); + if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + if (n >= MAX_TIMER_PERIOD) + return -1; + + return n; +} + +/* Parse the argument given in the command line of the application */ +static int +l2fwd_parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:q:T:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* portmask */ + case 'p': + l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg); + if (l2fwd_enabled_port_mask == 0) { + printf("invalid portmask\n"); + l2fwd_usage(prgname); + return -1; + } + break; + + /* nqueue */ + case 'q': + l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg); + if (l2fwd_rx_queue_per_lcore == 0) { + printf("invalid queue number\n"); + l2fwd_usage(prgname); + return -1; + } + break; + + /* timer period */ + case 'T': + timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND; + if (timer_period < 0) { + printf("invalid timer period\n"); + l2fwd_usage(prgname); + return -1; + } + break; + + /* long options */ + case 0: + l2fwd_usage(prgname); + return -1; + + default: + l2fwd_usage(prgname); + return -1; + } + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +int +MAIN(int argc, char **argv) +{ + struct lcore_queue_conf *qconf; + int ret; + unsigned int nb_ports; + unsigned portid; + unsigned lcore_id, rx_lcore_id; + unsigned last_port; + unsigned nb_ports_in_mask = 0; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = l2fwd_parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid L2FWD-VF parameters\n"); + + /* create the mbuf pool */ + l2fwd_pktmbuf_pool = + rte_mempool_create("mbuf_pool", NB_MBUF, + MBUF_SIZE, 32, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + SOCKET0, 0); + if (l2fwd_pktmbuf_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); + + /* init driver(s) */ +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n"); + + if (rte_ixgbevf_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init ixgbevf pmd\n"); +#endif + + if (rte_eal_pci_probe() < 0) + rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); + + nb_ports = rte_eth_dev_count(); + if (nb_ports == 0) + rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n"); + + if (nb_ports > L2FWD_MAX_PORTS) + nb_ports = L2FWD_MAX_PORTS; + + /* reset l2fwd_dst_ports */ + for (portid = 0; portid < L2FWD_MAX_PORTS; portid++) + l2fwd_dst_ports[portid] = 0; + last_port = 0; + + rx_lcore_id = 0; + qconf = &lcore_queue_conf[rx_lcore_id]; + + /* + * Initialize the lcore/port-rx-queue configuration of each lcore. + * NOTE: Each logical core sends packets out to all port-tx-queues + */ + for (portid = 0; portid < nb_ports; portid++) { + + /* skip ports that are not enabled */ + if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) + continue; + + if (nb_ports_in_mask % 2) { + l2fwd_dst_ports[portid] = last_port; + l2fwd_dst_ports[last_port] = portid; + } + else + last_port = portid; + + nb_ports_in_mask++; + + while (rte_lcore_is_enabled(rx_lcore_id) == 0 || + lcore_queue_conf[rx_lcore_id].n_rx_queue == + l2fwd_rx_queue_per_lcore) { + + rx_lcore_id++; + if (rx_lcore_id >= RTE_MAX_LCORE) + rte_exit(EXIT_FAILURE, "Not enough cores\n"); + } + + qconf = &lcore_queue_conf[rx_lcore_id]; + qconf->tx_queue_id = 0; + qconf->rx_queue_list[qconf->n_rx_queue] = portid; + qconf->n_rx_queue++; + + printf("Lcore %u: RX port %u\n", rx_lcore_id, portid); + + } + + /* Initialise each port */ + for (portid = 0; portid < nb_ports; portid++) { + + /* skip ports that are not enabled */ + if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { + printf("Skipping disabled port %u\n", portid); + continue; + } + + /* init port */ + printf("Initializing port %u... ", portid); + fflush(stdout); + ret = rte_eth_dev_configure((uint8_t) portid, 1, 1, &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", + ret, portid); + + rte_eth_macaddr_get((uint8_t) portid, + &l2fwd_ports_eth_addr[portid]); + + /* init one RX queue */ + fflush(stdout); + ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd, + SOCKET0, &rx_conf, + l2fwd_pktmbuf_pool); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%u\n", + ret, portid); + + /* init one TX queue */ + fflush(stdout); + ret = rte_eth_tx_queue_setup((uint8_t) portid, 0, nb_txd, + SOCKET0, &tx_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%u\n", + ret, portid); + + /* Start device */ + ret = rte_eth_dev_start((uint8_t) portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%u\n", + ret, portid); + + printf("done: "); + fflush(stdout); + + printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n", + portid, + l2fwd_ports_eth_addr[portid].addr_bytes[0], + l2fwd_ports_eth_addr[portid].addr_bytes[1], + l2fwd_ports_eth_addr[portid].addr_bytes[2], + l2fwd_ports_eth_addr[portid].addr_bytes[3], + l2fwd_ports_eth_addr[portid].addr_bytes[4], + l2fwd_ports_eth_addr[portid].addr_bytes[5]); + + /* initialize port stats */ + memset(&port_statistics, 0, sizeof(port_statistics)); + } + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/examples/l2fwd-vf/main.h b/examples/l2fwd-vf/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/l2fwd-vf/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/l2fwd/482250_L2Forwarding_Sample_App_Guide_Rev1.1.pdf b/examples/l2fwd/482250_L2Forwarding_Sample_App_Guide_Rev1.1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..0d83075571f05439954d69a4c43394e355df958c GIT binary patch literal 80691 zcma&NV{m0_y9FAX9ot^%tk|~Ev2Asnj&0kvZ71EaZQDu5xc!}bzVDn{wfDKZX4R@$ zbNw1)KI3_xG2VAlD2RyBGk#@5q}bh?nnQ$RX5%CV65APCBJ%Jsh*?-WnK&?rSsOT+ zh?p4J8Jqlh;bf2_W@KbgcDFZSkTUi1LoX?=PaE_p3ROw#foq0-csEUqXdQ#&9)h*1FNE@b)qCS4JU?jU@bV z{ZHoXj0;CLMJeXVL?#&y^j-)MsfcpHlhk>QAH|4L6KUJnd5!u)xCry{uxEEdj3aG> zK|isf&nGfYsUOP3#e*pLuC5A9<|o&xb%QO&rI3~5*(&xj?wjRE4zgOBaw$hATXo1; zY0}YaZWr%}7i)INST>F1FK!-dyU9lc`{t5XLm?|qP}@Psi|7Go=FjI{>2p&9 zRNeRsqt@sJeG=iU_S9mvg~{ zlH3hA-l7HNFh(Suz<^B~QJP(a8yUsv3%~Ukh$3S~S-C~_e^^FRQo^I=R$pJsgrS;U zbbB>^y5hRj&4GT$KD>hL3|aI2wWbZa$%AnXIUyhTo(;6xu2@=MhyMt<;hXoQJU!?U zcz<{zxQgM+g^ZW8vBtBm=}KJvn1(&Mlx+5A(gROJeqjrJ_SJU-=@Vun`@plkie|Ny zH-u}51@o2?dCs_(_5}rLpbKn5gfp=<{%>{tx%-z+nc0E=K(TE97AzAZ+rI?+cX~CB zQIsAKKneUD?Pd*WT#?3=Dnb_$7Hw2eKrLi2vz-|jyQphh_F33ER`j~F(NDEAwd?{5 zoG7*~xgSwl4-p}B0mKcF{}N2%&Z{_;$j z$^-Nz#-RrKs~Z@D){zj`QDc}FGcJ1xasUF`qH{5QLBK&sXStaE5*X7z0LIAlZ(vOS z1&rzMfB~YVtpWgjkSA^6HS?qGb_9`5mFe zU~9!8g(KnOmnKQsYcLlrb*joF#V^rdU|=~zKfH+cF!%pD_UScg`OO#0XfYt2D%9x6 zuaA_1F=vsZFZ<)prJ$=Q+o7R_iB-imUQK57RPMsR1cIaFOYH}ACSB^+eFBa_pn4%D zQVqAgTft;1KPVIkrJPLAUy{fC56ENT_&0eRjKDut!S;9LHLBRyt+AneK)xcsEgL!# zo$7#vgGA%~vPfu*YA94sMs6HZ*SyD!*WG)6A+#uJPRP{)M*!=aySaQ?k05l{?dQ1b zOccej!HM9!;45=8g^Y1m#Eji$W;n>~@`{MbS&k}gB;N^iPen>olny(A-Lq?rxl^I~ zZf}QFB8DB-tfh5z5qG9AWn!d$ZiXFZOFAZMppO2EjUgmkAJ(#RBV33!qQ31%&i%vv*m7MQi9r%@4L4fQR{cUr3V84vw zo784%Qy~CszEr=vxwTIX;X6zSe0%3F*$!oDRUHyMfMR|h7=e2er|H-JDweOSHv?;2 zyRet`;Zw(f@lnk7vo8wf!X)?fChALIKEh+|PAebJ@U&&DKc5I(#}GQvnZk|(WkGIHaq`Hg~3ejJFR8dz+=M9$W09y zt+FH*5sqTgok2q>`CclnhP?Vd=T1MIEmvhjQ10ktBb?rgAk6!+>`3?YY&uW0*Yx;W zr!{6PAj6On#P3-7c6@gWjL3Q`LkR@z_6L; z5p2OU{l|V-z*E@4q&sdaZ`hKHdfsJmvfQyN+JNWwPERnRwfDG^Zr|%C!@-h5Q9cDgCB3k!T z?AH5tm1YHiO~V2p+%j>lgS&yuCmEqTFG*N?rE1N~ihI{iq4=Ef?{5W#X;>UKDzZM% z747F5A$J6eq;lscw0m~`*BYa%1e2Eu9Wpbb8IuIpT&*V=d%Z_6q zgZ0m|N$*#MF(y>I>VOg^$U{CaL?S7W=+Ed6K@6uUMqI%lDIjM7>em^A z0u!1=4I=>vK$7(1fvBNXfYzs1QBlwhhjDdXIQn?&;=WSE*vo6b+Pn?PVhxlaeE)S? z#(n4|L3yGdDW)RM!a{xfa-qQ5qj#ILRQ>X>ci&(cvRX1%Hs#y}QKdO??TS|ie+gz5 zQ!y-}+ppau9ZXp(Z`Jz%Wu290*yu<((rj$VhR-gHwLVO^p$__!d}fl>2#6g)fG%^3 zS5O5n_RW;erB7$OfTUvB@2H$JPLL*;IL(+{Oe=v z@YvEO|1f#qad0O_`?lD__D?SRM%!u)DMAN(t5%*Z?o^vJKFkio7sGbOm2Z;hrkdH* znfpIsLxx~u#tX@HCqNS48wmM%WHtIE?NrU{`n2KV_Co`V^5U6-fQD$)7eyu zc|=xO>pBwF`NYr%Nu;!x{L8{({U?R>pM{?Z_^%fKx9cNIREO;V5JA)@$fzUq3&j{4 z3e51BE*C@``+)GSCG{6|zX7n5t(33p7uwkoW8nhACV7<0*7hk!Q!I~~wm*7>f81(G zkDBYGKz8kMMy;=fCsNvr=1Cz>YEtxhhCU-B^~!MHb~F&y++b)Xnqi;a$(`2X>XqxXqhOwQPIf zsLMCYRO&xZ?RqkboP}A6p7bLzgykl;>I)*MO{P5|z!pN2_*Upu@n?qNN180u0 z-ohM@8PZP9j)Yt+hfF1#YKWin(8wgiOL(qvG1`(? zbtr0*Gq@-lr`g+r4ju8$<4F^wa|x3s}zPY|HKul4@@ zO4{YKQh;_mt%oknbq|9b%N*0lEC?By{%0o`0toRU#m8Tw;`j%snEqonSlIqm6D)rR zOIu0DY7dC=M)$g`-B!tMhMY&b*OD*>=dK$o;4)F$Kog z9~(LoLNu`OeH8XmG(ag8oRv=7Ob|8-#2_q>@5YZtpZE3N>WED?!+mSTd{wLE-vuOh zHcXJiBDb-I6Y=f3BE*FQ^De#EW49ccXhiW1ZN#z2Ke|K{oSgo5-6AqXSY zvls75hOlHY((kd$yvuuB77N;4lL+_4GI2InAwN!nseqHAC>7I!)K8%Z>Y%7tOi9y{ z&;_NkkqO^DYY38vEi3YyMXf=a93nF=;+UJ-)Z|*jZ%lP2uYjIE)p8L!cK?^v@=vm) z|Iq*w%b#NTTW9)514x0yD55n??7g7=>Dag!T4YY9@J7dMhE8xXJp)i3?034}1$q9} zXkIVHV>xStci-_8Mr$=HG4_NLN}}r1cpP26d1?^wv@o%Bk166pM=SFf$Kn=4g0a;i=${oh0 z=s;=hnIXb8{11eMB7H3ltkEEi(1fY+%-h2lmGuM^y&dc=B^$kcBQm@tEJYz}yd|AmY7|HAeEd1a)j*y$TXi!fPrz~D&$URM9!m42-U^VPu`67I3b z&8P=ozukrs=OkvL<%-N?*p#SnZc;c7SUuwqP+EYc>GoL~-)&#d^%a|<1=&ggm+r1z zHHkjg`t+yUK0SH*(9;8HaRlbk$DeXs+oJwUZ^!XZjPd_qW90b%!{$qj0Mau{q#tB1 zgAf5%fy%b0kC1^{K;YRuwC!@Cvxcr9uonLH^2Xt)J&0u;Lqel2tr2rTcyUH*L_Ng% z)HqdIpwiJ~R}de2U+2#KO4`;(l^DbH@Ji~i<0d=~hv7gTKhpXOeCR<$sT4 zmANZLjs!lD^d&qe)5+tmLnAC`BcfA1*|=gm;PN0gkQ}C@WPxjrcPjZ{hR*%G%T>&2 zaSfz-1*(_0bBa)!i4W%8Aa256N_tmCSFXMwr)L?D&4^-bIq+RSGpm85kVMRD9N~t( zU?p;6E}c`WmPX6~+00Hu?%k0mMP|p#4+GMpchW!V#`&i$@D=tJotvug0-*pnxe_|< zaGvIDNv=a)7F~q0nAT_*HoTCzw-)+4;yhcZeQ37ch|>2xp7)E3{$g*nQL&Wk;-ytC zc}F+}`eUwnI3{|`2!XrPCXV*^flJyJde?1y&5|$__f{Pa=pt}xyqyu6uT#{zkTNii z(k!|-4nfaA-F{z{#+O*}b4rB^stBh&*ulR|4Q0@j zl1rx@)1u=Et}R-pS6MLYw!cS*OX4v`&EI+=#K(bPHKKAb17qUP`=nY#iCzn+vjF`_(?Tb{*w;p@^Ae zBi@YgT-I21V7A`Hw8?t<`-_zAaO*^s0)OrlV~YrGh%`-!@>dtoActAC<}tQ4vi1+6 zW9rm$?MyD;%~lua`#lttMU&_B!g#rIyWVH|UJ!nBQGvfy-#;;_?Ck#@^O*nNp7OT~ zL1ki}<)5%k#K+{LSF@^#P7tP%6HDrteji2i+mAZoU7^=UmCk(V6ih`YTtK0FLEX|h z0X|G&B;h$scrKVjY^s3&F?M3?^1y57CcGy|9LE#Ptz_@BXO6rz+lDRfkwIn+^kfFX zgZ`B0YkpoS`a*u4dHYG-nojZE`~COU1GL-e{g{n`>syzXx)-;0W7Ft5b=TJ&t}eao z5W3W{QI?!T&}xqt9%T2|v)YZrTGu^}gVH`M!;$ENHc8)JR!=KRb;iCQu&UCvs{KlE zgK4p0tN_*8)QJDN(j)$T1*=fxX;9GmsN5n(UifN4l3R*LV%YIk_i)Xcf|9D~xTn2~ z4R%KlUxcz25ZEB(S5s-P9)vox%U^_+g%5vzMva80uGb7Q{ArF!ne{|}`Fj3|)#Kp& zPtRupvixgcW&C6L{$@XGJWa=rpanJfb5z&TE}D}(gh&7-S0RqLR1;f_Asx331wRkt zlv;3q|M4-6x8o?b(PaUbT48`_z;$?sn+V^*I2D;*+bBpu)OuKp@X54TRR0MYn333U1=ReEd>d*- z6c!S=+JSd~K$Ud%puJ0wn-T zLbBh1rWt=s1t>vmIHc28q|WSNpp#|fjtS9EStyug6RtVr5Jv{KlTryYqDlb~AvT%t zLmF*HEyvnHv~=9S2(eHTE;+!YR-aRS6E;~af}|7U2(=a;9tu8rp;JE>7qPEi(_y5G zG9)YfKm@nQkW)|_SiVwPUuRmNS(j-u7k&__Ql|bDQ`Zg6E+g{5aD;+U)3tb=#0U&}WeG$A#e^_>8no_uVrMO+g#_OpV(ZIl z@l%iMvW0*UV#FA>o*a!OLj{yU&leo98Y9 z0roB}x$WmI%fYTlroY$hR0JdNnZt;g2Ihd z9SHXG(hEbD8J*872A=2Zj;2AJ&#f4>2wpWw;xr%Lz6!Jj44Nh((TX*hFtCKK93}G( z=B493>;{x7rTT18M#?kBbB4VXbVK!>e-B{1F@YYG5V4K*n^ zr!ve6;v5dSDg>#*A{zJj8A6BA&m-Nk(fvjd{7Ub!gRF3pYu%vvu#Og6O7C%y2rdaN zI;*Mmpb5BUy+^Odpmd%)h|DZmDH=37?IpGSnuoRhQ3 zC@I2>5}KiF`pH#+U?H{ST;bLj=!heRj;fdK#0wMk3o#_*#1r*NQ#g`rRIV`OExQ`1 zWXjxuhC0$m+?-YKL%7eHJZjUECz^a%KrT2B`dm75Gei13_|h>M0)Z(Ut+8V`O)3re z6F9Lw4JZ`ocTE{A;ED_m&!}IN@inzLTYMiGeC5V0Z|botlUpJujf=;le$mm$wjwH5 z5&|Ix?Y!+1QsvlqUx6i?*A*%iqEz=IHDJGsNRaK;0K$Tw4~dv!zbl&9_JzeiaPkPX zP{InhjQfX zaU6y)K5sodUo;^GTX=zZP}J_}Oj}J`Ic7VyM=kDiDZK=AeMczb{Ly6|OdYS)n0>EwIKr z2#R2mJPj>koVEfj@@B}4oZGZG?J=lENZ=#=onytQOgS5`-b5WMQhWP3c^f{MTg#gP z`=R;XR-MV?dvEA-@;bb!M_b9U4I^#FmOE?z|Qo=Sb89M+Y?FRzLlQGLGinpkG#zHkl~-9eIiAi&3#1s zR?5F{H@F|ViAyX}z2&@@WgqwQWoEAPzI|NZps$~7*vZSs$bWKpy&MgkAosl8-rNkl zm;B1fG1bs}9;5A65aQ(Y(LI)c~XVvH9$$MpcVypY}6~gTH{(kLO zOwO4bh79`3m=vaifab?#)Ltwd49ma)T9r<}%rk zBL0G!orTP+RMFVERn=TjpH9M5b( z_5P@&zb`Q2_h(SM4U%FCkwx?A9rLlnBGVQl_vxqjk-uw=^(SuEQhbVD!zZoZ{c7OL zXy9GR^bEv%Ojd4lz>Aj&oJGug8%783rK+~4+Pihn=O5oY$^%p|2h$&&PjYom1jXT?*}Bf>dGz2D-b+2M#+N_Olv^sAsrtGf zil_wDq{Z8Le=_i7>>sZ<-4DkOi2vL+cX*bl%DcRe!pgR~Ic~y?A+);LuELDTw!ERH zzq!s^#2AEs`TFs&m#m{iqk}Zm1BnfisNS{mm(#^R2}s%hL}34YR)(GZUr92@-&-#= zXqg25BtAT1C*l6X!wZ@9sT0o%C@CfaM!72}oOW7!9XBWyBedX405ON&`@< z!q~+ys7jn~^zloBXUMqU|C0#_1TK4Qf=}hAsXo1rn9)a9i&8?Z7@- zXiLHga~wB#BDcVZT$*hrxeZLLU(09_Mw8#5Dx92v$Gy`OQ&aMK-q_=xFZaLKTsg$R z=AM7QUR)fFjlnvpzu>BzV*+Qq5(gCyL9dsaWQOXzKzQ87=GllqkvC}<3=*IPZgmTk zx6CRO@SDMYTUJL)7Ym3hQ&-Pvp+6pTM|J?$vJpbM2Y&9gx@NNfaME@=0~@NcKXtuF zHue6znL0Xus<=z?HsRtC3_IHIui50wYhebtyN8Rix}W^n za@|$s+z_l|L*yj%X%z9(C*^?A6igkDczXu)4&v z!@D=cveUUYB)4FcSU~f_U0VmM;+GQeKw+e zGDR4FEN@IVSva>3d2I0zzM@=buFDNyz#ZK6ZAoLrOywErS_{{MC$!vN{xTpV8WB(*ZXJr1j#2A6> ze9>q%xeESrNR6S$oyY{n-oCK`A>v46C(wXa2y$zmX`w1EIvw)%R+{?lAl1Q zP$ns37M_NHzzPG)kd9jl5o|21Z2$ZC+_=`5!`^Vx#u=T1SuaUmDEl@+gmKdG%oypxX?U21 zf~SPjH*&l}O|sA6ceI8yyG+N+F4*|fQyc~=uJ#}ZrmNSi^JU&FuNSWHjk!wzLqVm3 zsE%0H;LuYGAiv#V;?4T|!+O3FQ{UD&p41@uC@l+M?vVqz>wxR@N|<=hOTe zhHbm!6K09YX?`Q1epXaeS0!vsc8Q#X1h%Jj+4))?;Yn0^K8S9QwET8eR&Re`Y(+Fv z)IzB>0^4U@R7qSCDb^&rBLe-3X&O`9S-b6IMKoQ+La`0o_-vClMkjt3#)V2W@mJ75 zPI=d-Iv>Depwk)r@;-tUMb2S{#xkf9K` zzT+amxrU<#biBFD`z4gCF%3A$qO>sw>MF2l4jLUO!f#{@vIJ=oL7tyr05h6c1Mb$b zS?b`Q!&=&-Z~!n{gavX6h?H%>{%qN|6mmsKC*N3O(!0*^Hk#m<+{R9nWb2KvTJ0=Q zAFysCe8>@tD4nk-B3W=8@~vt8FrNVYxV;h>Iv6BP0CUnFY&WUx4F*oHYkv9H7(EchY}jsWa~g!} zmADex5jmjha$+n43{Hl`cbTjscf)G8eKH`Qx-o2?=>d&wmNH6djY3M09*C`IVsjxN z_bUn>#{Dbu7=mv3qQVl98l&oo!PyB@1d}tV#!LXQTV}=edKt5u&jC>hl!6)|59BSG z=-_vx3{fZl3-pz^Lz*cVV?X{}e}e^dJt4ihp?g38y^42Qg@gHzP!0PzAyAnq;zjlS zg)-`JCGB0(Upd$%mdeQ9Hf)4QYm z@_raB$NAArtsvI}g1>Qo=Fk>t6No^f_PaTl=Ld@z;>J|qYpY^kt~7 zv_vfNw6|7L1WY{?oQCg6$QPihjvhoXPHszmY*a|}-WaT(SI!ka7f_5sYPBn%EU4cB za=ps`3y9}%l>#hhLYs$0JZ3+mu0oa;(N@9?Scb3^a;kLj@*jkUP1O#wI=uyn15LNsC!_$I#f1oeOVU zZPUPqhphwSxw2`22D+nQLtb2}qywnA9%j)F|(@O+qu-DM^WrLxyt}8icjr$V8IXjH&JvfkeQ)pE&{-+ro6} z8{A?xXriiU@IIqh^`vN0`>^?L^YDm%b|{+#B+V$^a6ZZcvy7-o$O+oiu8}%b7*00d z0hngah}Q~@Y)RGP5EQmcQaqMUuI`}5;A`q)08jPlqM4$1Qj^~9?sX>l$xJo5JhI;C z?scVe%$7=AO+H$M=H0w%IHW@zIxd+0Tlp8+v;B6jt{x;NP6(DQ@iyJJe z)^`QA%EM~9oMw_;i}S6oB?<2iWD-mLWc*T>BL3ssoqK#V!EJtqJiTuM_Sdx;?3ZU} z4s2L18m1VL9_`$cygt<0K|Pnv8r8(pJob`qe(l`HgJHXOAs7a7C|Rngf<_N*#MLLcDgZ9)!-FA+>rIQ&{Vg<3PaD?5Jm z-U*M=+>-hN?CC4?;}*63eP-YGI&8lZYCM9%OxJUD;fST15wswjTWkBS9R9`18Fbj8 z4x}4K`0owaBuRTZrOALkuh8VSTk_ZgCi};MnsrR*lzxN|^c6u@YQxBw*>zVQ2jSpl za**f5z9kRCDgoe5KNJnU*co37hSaCoC5R~&EpQtW*lK7( z7wmwbR7$}J>^I}r&@b3mKaGpny6n@x5UCqfVyZIPh3M2hNl_$$1J1jV9n}Kz9;cNY zaVLoSrACt8_xuQ2F&l7B!Y*ZrY#ru^qA5@j?d-%Tl_++p&$)xW`E&~(_&H7wT%>nU0XFS<6HQG`G4ci?FE-Ne`7?x)}8dLP2o4*a&tx>X>KvNg_c zy zJXkuQt0m4u^Ea}o&@P!`dQjP|ANI!D%Ne#Xtpiqd6uvHEaG;^cDtvtNk;wH`2s|N| z0YI*Xu!QA-&(si6$aIb!h$;N55emG7yPK{7U4+!1 zI@CR~3b$=k;?Rg_?9YcEw^JIJ)8N?lp{E|DVEH!43uN8F${!TwqVwO|-?RH*G==5% z9V!jZ98j2qH<>SklhoOl)YM*DN*IVlUMhdb9SgW2@w2$Kv@-!4xq|TsU#e%O+()tf zyLCx3jNfoAaMf%XmVY$ua6A-6&{Up%2*xqq>X{p;`E_g{@dl$~S-Ds7OMT~e#3vObn1#4ANdcEHPH+U+A!@0fdIy&?tugZyuD~tWZjqaHG z1`zJ#4KO-1LLrX_Z$QE$Z^5A}Xd1<+vP^e(lIH_S3 zRGBnY6=V=PAMKBE5PX8m`Z$?^!n-xw zLeiFU=j*wwuwkT)Nb9v+8}&+d`$tW;<)4?sp{+N*{CXA)T(tot0l3oc<>uqNeTm^P z1_IDqtf$c*y}Oa3iO1p%a!V*k;=*gTZ*6PPu=qxFWO@CO%phmq^c7ciq&SJ|j%WdB zH{z0E*)Wh-Jolf!wSOiYm5|mkHJf?5;!Ng3A%DVyhuOAuw7AZ8o6Z^>km73wHoJW~ zygsgB_MTeO^>pOCf77xH_>c(~B*0uptn6G+dQr7HOeo$dBzbB`4$vHG?$y>Je}wLS zTLd_q$d&6;Ux#FYwwvMTODx;sfowhtH?|-vg;{b@=XY{QL5IxLMX8FP>LZizwz!6k zgE7c1NK^1_z!{n%R;(+sL(8BX<^(}31l0!iqBDSi2usl7Q*3<`rx~u;=ADYTolY*8L!gVT4qlhFWOklm0KSN zyy10VQZH-2{LFY(Ygpe|>Z6%nq*{Mc^5>(1d+(E=f2;7~aa{tFPztkOn>5mYa~EGV za6SZg&lG-^F#K?Ojg#ZvFAzEV;J*e?_h*#P*xsCsuC;^qT#TV7bswc!7olf7(DS7` zeq!RF-RNL1(f@ipM`SjV-m`InxedW2x%ry0_`G8)syX03d}>q#qf0p(*7>?H1?FJu z!tRw_xlo;b$X(IKZrjpy^^KqxfW^j;V1;PnO!mBnbmc_E6_lp3xn{~NOi)d124%}` zQy9vKqNTdQA8aO@bmbC;Jg2fQS;^m~5IIrp}NOoVkV&pfZkMx#S^x%*z(GN;K|8c2BWw~0B& z7J-^(H{Q5pTObwcYE;~ipmFCXtc<27OBfB5r4VQQJ(aJ^)U9Q`CTMT~?_lCdS#Bob z+A+0Gjn6jYfuh+dv*9HJUPjyMifxC+RdURalm>13h2_Vq%Zs!*kY@UhsA(f;?}fOM zDqM`OLwT#L@aXp?DL5C~zqtFI8gwhi!%JQ_pkMW>OEOZbT0XmsFN>ZhXNlI<^o}is z>?XjkFiv%(@2aW3mxwmF=gsQ=E+R~Q4W#=h)}uV5^5{x<^dQKg^SgI*yr<<2<@n7G z{w5fI&F&Lc35{JroU@Pv=vc1IgTch3&q>FM0D6al4=9kiH8ab4B$EYXH9d5wf!{x| zzw#|Gc;tYir#AS>8oa?4vNwyt)b{DV0*hwkI!F6^D=!XoW2LPz4qs@a$WN6o5^FHT z3{R{BYAY4Jt}vx_x9==j63!08rS_a|u1L&s?rLrAASi7yG0VwmooPC9?pr;yE)VKo zG*V36&5!qx36w8iP4JM;b5hcmUD#%rRT7KC`z!J-RTLr4h^Q{dV?1g3ZH^*uHB6XY zwUR8!Xuu_vx8$9BK{IkrTq&Ouaa8Ct`)2(wG!uy1jGv|@kc+0+=F}!aVd!5APKj?8 z`kcrfx>OTAU<(q8=xrvIn{jTA7Yw7_zH&`Gy&enG-u2jBxBPU{kR+Lyp_uHPc^Uq& zCMRAqG`l%96AX*7v(o)Bwm<8`otbiW6&6kZm>|5!pwh`o6-&S8^y13iWN%QS>*{#Y z;CwsG^%IT4cwIE47=j8UmTM0G09zX_2??GNQQg8S?bMAQ(?HJ=?y@PWX z+Dw@|;?(9{KS5QGLD+ozYm?wrTC}-xb~pyi$NB!X)|67`cc$r5-0T<@G}kOZ&aN`% z<@ctkRg)e_;T1

Lfhmd$_{SZ){I^~jYUvwj#C%GBl&otPSuPcFTPy2SvEm?>58 zV^y!gxzouP&rS!dViwpHg{wx@@3vbroMR>QsX;PLfv#6&DFvjM=#<5H25FGZbtwxq ziC?RqzP)RgqtUO|2U|WZE?Y+{uM~8_ODLL*cLB}h{oHtcwV13c`gNH1;~tpmL#@hu znB@TpURpZhju0mrE)4~wts$#H;g>cC0+%NXnGw+d?L@)|S=@aV^4gy5DNI9FL|qj9 z0y{))$0DdtBk>GWu_`6g1SA0)A{E>l_=IBw4!&|LlTxL4T!ONc5or_XU4l^B7@#G+ zU7kH~Rp<)7z>gz40rdkeESyW~K;lwyCqrs`O&YYT0G3d`e6#E|Cl+V-_m$DL;~9pk zX!T{|dlJLnaTax!&{Z=Qq9A2~5$y#j+j@_XZI?`vM@TUi?Uu8D!pT0TAFnn~J2_p_ z>vPl#?0FH}^ki6}ullvGX4PQ8S_B6URtKBjEo8-+hpi-X66xvs z!^^w!lXOwI%$xHf4^(Wo^Si)rV%KjLA0by*x!$YO-TNBRopCV3duEW!fl#f^pq{;{ zW^0d1Y$nq)=<2%52T_p|atyO10K-Jp&oT-y*tDXeoSpYZ+}%*n47=NPg&?4^ds-WG z)qxsPLPCaJP~`h8E}g7)J&h2AcH`hYn9H@^u!4xvwhnUxF-KBL08Iq_Msmb=iXg%k zo=O==mmY}wAmFHjr)xbXWR!;Q67FK_lY-GKAqsi>^A15s0B~a|RE-Wrp}R^{0xnrV zGDkx7N!U>aG@X$$K!J-yqPb@BP8`B=O68B(97@)H>9uMEhhdxrMql)fxFMO+?;%J+ z+j17_9Q@KRGda35wPB<~*Q+dj1O(gF7t&ERFzanm!5w?7U{rEA@ujqauo<-m_bB#$ zg<|LAzJ@DJb@g~1Mxji;PbI_+K{ai^y%8H9C=CGq=`TJ$025~0utRs+S=H;rubAyV z7>?lE9t6WZhSUba&-Tq$-?Ojgx$9nOk3P_0o>sX1fdaAGIwPw6LxNq=zD-YucWqmP z6%~lHr`7L`N??Ums3%e8BKqAY7HJ6u5@{G$Ypc(lAJZ1tOZrb##X%Pn;ssZ1FC3p^ zdQYLFk^MUH?vvec8qhj6H5ieA>g$UltH4+*%MD(q`>%&U5|aYOV}WpJbQ%kr?1?9+ zM02(nR&BbUD4@E*S0hbzMHu8k+Allj+>_N*Un@_r8ENqP-PJtI6;Q_F19af zC~Rggg@O)6bH$z@ua=qK>WG;c1;-J?Fl zxHL+6fJ+0evDA&lTvM~nVYC)btp*KHj+0vr@tG7UvoQV?lz}i?(zxdZGd1C)gJ}+@$FXMPG%ah8Rbt*|y6N2nm3j7-Ux;IWUz*Y{QGe>5vAW z5Icwe!G$XSGeO`zbwg4C$J<#rJda#6(0>-kI6()V7(oPMOkh+cQO}YTIr}a`Uz7Ub zoCf&-20>8thEf?|alro=qd=VM53h~vn`1}FL?vuJ7ONY_=A-8>YBm9UV6PeJ>g?w1awHMO8Tn? zI8LJP8m=5jh4TinK1GvP3Qr8pk@9iZaMk0ep~0K**dlu?SVozrL-E;z&xO^e!pD9u zd$yUB(52+Bt)3~1cSUR+a$&Y$wFVjWu&c`)qkIr~ybH?K6?t`U(Q=DY7&jBeHt*mj z*uo`ePWHViKog}jl2<}B=T8TdHKSfOxP^`$mfhrBV1L*@G82MlS4887iezlDLX%ct zfsiwrGtD4tM9ZRxO#4eoR&@N+JTHJgPv@%G?Dgf_D{_#Leyz;%@*g7nS*}f0s&nvl z?>MO$PV9XpSzUq0u%sCPmkWpIO*F{hxbufj{tKRWu30a2PB>97!7ArlCuy}3vZWy8 z*tr7*d6x<%_!k!}yOf+IXQRn~6h6{l1(ul9Du%f?f6b4zmqqYGHR0J&;9r zRm)a{o(gevHvHgiDUkZ%O-&GoOVq8;Px(4ht-@u8+@@0)Ew+3e#zCx#8gSW3NX~~A z2G*I_S!9iF0v=ThV+eKMEh%@?(zFm_>5Q$A zmmf8Bz7lu4Wrh`u!}mc8fwbi$SnodHXEtwqM1rRs^yKkf`~8mD(ucmN?%pQ&R#rL- z@tLU_$V1=){tv$isoTnxlQd@!5EQAUu`Nm zfXBp~VSs^g*nUS%s`mYx&PX+jss8%j)!k(aUz1(0X$i1<9^TL_Pi#9uDUfH9A=yUG z6+TM5D}v1%`zEkODX=9tW=MALSZKe@d8bF=Y|+JBj>^dj`#t3b4#Sb&dvEqNPS^Ha z;SJM#3(eV?W;m1h3*cUk@ii`x*QEnYMlIHE3&Fzl)lL3c&vPHhhfw50#_WIISGIug zN;TL1jmQTZbyI59tmD&p+iKVUKBax`oi{Ea(_p!?|C9Uqc`PH~da&J&i=du*DuzC` zZLTnz_W`1Tn%%A>TnB92jn&c(063h`|2RqBmR$GVT*RMCUD^2<0JYfu%BXqqXk6eBVzlJAucO>OoMvZ^jqWSRfv(vjrf4= zhc!C zMH;d9;dVv0T2xzWVmM>?y2M?pVxBoi;w-b;pH^wOtPc zWAR^}G~E8g)9WJZU3kpXMl6 zk&_z<50p7>Q1f${zIZB%-JGgH=L;0|Sm^1^Kdm=z0~pQ`0u?`C91cl(Ge0I~1Xd;q z4Qn=`&-ahJu&-H@h*7O;&ErhgB+8fXrs0Xxp&$JqIr6ZRMK!4~=JS1I3x1cenSQz7 z4mSOK?CgV)=$tF^b#`GLEEvcd{i-guY8zI^b$PBT7#g*g}ewwkOPPQ_3B`na63e%-W68I{At=s}!^Lqk9CAHH!oPOBTnvL z+kNtC3s(RB}yD-F_?C}~dE&LVhj+3R+zS?~` z0&SGkdDkGuHayXqmeSR)Vz>jWJ?ty;Xlp7O-1XJN@~~5q_vsM(K4O_7?bdXfk0x8e zl$NjhD87f+ttZMxD{c}|kU*yDr-6)Z>02`SdiLQq#rSFQJyN2IJ;d^~ySjC3vHIzF z^NC`@kn*a9*iy@g+LO1_k)e1Px>O`sZu;%YENtuHXN1nHWskn-)Wrb(bKUV}JcP?n z6u^-Km41?;+rvThf`&rV!lRQhqROGcDA-&=>!&Ei^wvbODeo@qfUIcpI#29`KK-Uh zB|S2?N>&PExHpdfo}3qdv`NR2mvj8+6jtY`;%A$>w>XUsK~v#d_&ece9OZo`UxVCN z1d!nJ_?yUY8)9^_rRR%%*s7YUv`>%8zA;u`CB2L#s62}0#yqQ1q0pmSx)f=%%)>Va zf@S%sB%jcJ>~?eMc3Nyoo!{t?{_1vLpWhHFxy2gt^k&zLZd=Vd-l z8EN>rs}mw(3!|MxTlk%yZ;6-M%Pa3>HK!*LJh05Y0qr>PUG|P&#$`yo*5W*6KRx02 zHed6Vvbs0AU-E+N7+h2vWms)EZcv~z#_$>3lMk}1Dbru0Iv=5e20KC$B-Ra*j~*w3S)i9=5NDjjrW*3~G1 zW!#Yer{-=iGWeLnurC1XeqPJPIJP;ow7jc47Z7p0w#zrrWqfNEOALeV)~D*pSqO{ zfIN6HCzY<*W2NkFCd@pUiaFZOa=w{OLkIQ(S2PZTSiV#UEYPnQ+Na~?bA$o$nuaE2o60(OQiE>z4Cl!Qhva z_q@^AH#ID6Ma)Pn7SSbQAsi=XbJZ~y=^R8ZVy^h?W$aWvhu`rNsWZ0U>-u;tTpw+= ze`|h@l^XFS9`3_!l>+%_3u9Std{)$eFNOu{=q-rI>GCV93Gr!71<}@!M$Em2H@oXW z&1c*KzRYTJmLvWf2~`@8*Yok@9_63oCGYBTAEo&=?Lz`wv)gb%x*!p)rRt++K6~we znS8(_Nv0(x#_#S!e_fE4HV;wc1LD0wPffO5CacQ9MC?8W5e^}y>{#Q1@x|pKOGJ0# zLZ)PTK^1lx>{de1hg^iWzf3{67v}BWUN8x*C>P`>jDodBHCGDb$x#CnJ=@-U@Q$a@yb?EJ`cu!&5uU;}P$_6H}OqhY&?zL;JKnN&x*mS z6ov|oc(G-3x&zQ@7Om*hB)0ki zJVJ~HB;YGtu3sj=Q4IRCTI?i193hv)vsp3pV2R;q_I+rlC^kS0bLgV_P;g!-Sa>hA znNGUB0c6*j1!2045D1l6G9!8sF3c=yE=;WDQs(md&)bVOx~9^3*Rcf@L~a9ITZ_?n!ilr{7Sc5vS00d)_SNA=+GoV#LiXnKO#5=%x$YVk%bUjN65~qDvx0 zVZRI)h^yE)WXV~?4~wz*36L0_rESblH6etRkTcMHY(>Z45ACo>X(CIOy;2*nj(^gbj~>*QB|jl)UhrBDi6zz$Lws)%?p2+p#(RJ zxZ(~7a%z$~jRC=f2cK$o7Gin9rgO|tO)yZG71jloDUu{JBE=B2TcM1cY0YL5m%w+N zzYY7xh*h-@6$!|iq^m($;&Ts_hzj*vbqqWRV(N}K2rES}aWBeb!7C_mQD7S&lT4?! z_ovDWX9y-&opBHe9C9mDkm)c-kc%f8I*ORZLW$lY10ZXqc|ZjVem}st)Xt(?D2FEe z@+59J6Xp?C0K?Yv5C5T!MjZAWKrH|r#yuWN!M_WfNnVY8}lCy_}@eXY=ctts+4Bk|3b_8=`7c5-#e0b_ausU-n zf%ehd5>PmcYFtW+K9!_VpLrq04OAPKG=n|qjVIY6icVz?B9{vvw5aAG8dk%Up{zO= zh_%>W&)?NZ6erS6l80~#@10!KUo;v>57L!KlNu9srP!Zi(jQw9s6jMS4J%g`Potjy zL;nP;U_ytD8x2gJ^r`63I*LH|cz6rk?1YlD=9Fo0WEDL1)=fXsC`|T#p&TWbmP&8) z+;%t$ZIzhw6}mV883OtoQ8Zq`--#bYR)QDR;gOw)dCxok^+>6;=I8( zMZNlTx${ghRY_T65wd#QK9uro*u1y{i`^*9k}wORfLDSsvY+cCGdh;m&X+FQE7+Dpq2fZRm=CSfZYp)lki7B z&e@XfZ)D$Q>fv# zb`~uQDbQq$PgQ2UcD-Lp``p%hbH|u&*&;7Lp&RvHJ}q1+x(SANw(GoUY%DyruSE$8 z;C;37f8w-RDgKzNANWABstwV`ZJj_DA4KP|kjHH8egw>ev%i9HG|P~Iid2aU{Z5F)MkN^5+%k54MQX>-^(8|FJe&MLsnTckl?WrcZqt- ze}>>*H_+{^hDDA`h6eW7`SoFqC-JP%XUsjLZkrXGS-O(IWM?=VC~8qGAL8Rsl*=11 z0mk12J1LgSP0F*>qF?C=NM(m`Q) za*B(ybbH?D1g6;M77l!=oPM9;!2p3Y9JI9NOD;0+9knv#%pQ_#ZW2;|RZf}>5_*1( z!tv5&{EQCr@hrh+M#1-E@$;M1%ID3`G&od6uR@Hftk7-lW#Qwf7B(?#1=S}Y4vfj{ zTn=zX{r>HC@34CJ=GgzSnBP$ub<*}H9rUhGu=*N_2Ld0LcMeT&+ppXG_JFW&^YbE8 zm7f(8ywjlN$RKi-9s`ly=|kMUG{*Ha|D1gKt3Y?Shud%@_OrIz+c~vD`t8Q8=xcuA z;G0$!v!VYgi=^;T&F6?XmK)Co8d4U23Dwl}+#b=Lw{vao`y!5W|Kn4=K4)}MkE>Y% zjdc+bS?PS`n&$McZt}ib@gmU2#oG6F)7N^_?_ZeHYAW9qRqlH7Pcd1yc)pESgmF|& zg@PO_0==MlZOTdqn8Iszwj`-5 zKLtetzjTxQ2^7NJ5l|jV%Kw_J``H_Hhq9HF3OvzE*s zj@aJqEfwqMuC32k3{ZL;xGco4af}Xv>DfiY-GNOL(RXALdaK_H>=6BPT5sJFIn1lp za9>oqWFRBz1S&eO^80?y>+qR&mF~q*v5XG~fFiiGqc~PC`yu zyj!zG5k367Yo;H(d-UT`k^9A5q+>B6JnXib-F^>4L1TzSi*9@rMyad1QO6bI^UNPX z7$%@rc#!#jh~fVC8J_=+;n>+YIsRu1SFN*^cEI7(yP~qoP&^YsA#^j-k!7swO}MtERtn{cu8LWT+vXEGk4Q!}lAAWCLud9&dh) zY`z|Aywo{QSnQ(82q#(85n@M#(J_|1IVmOIQ_PUf3`8bcAUIzwmf0K7*|;ejRk@}QjR{sSudjmbO@hhp3kR44c___k)uaDXs7jYZMU`!45jsXo>g(rk`dPJm9#EGPK(isz&97 zZWg*Xc`fuvIv;s8$cG4glt$V&%vhe{apG4xjG+85*P&NGU5F(G?jmiuT2t?SkZPi{ zRJF>7g^!wyrjDpFHdk7nD{efV0DrNzc-g1poz82V=rhmxi{&3M3YGzc%xUlHJAZek%7r+GA#{QNSTz0@s z+E&<8?EuV&;hnDW$xL{{43ST>Ww0}$&NfH^z+%neGT07gQ_i}HhvC-F-UjkJi4_?O zO`$y4$58^1`nVz8P50mxwa!K#3`Vr`h&dDeZ=CAYES7mbf6;^hvlI@O=+K$8#mag3 z>k}wM20RfrA+u^FwquW>{EVpe$(q8OgS#m{gf*2)3iR*){Q&i+x;lAo2u9xY1YCRu z@)k^B&nV>;p^6F^EdPyD;+-=dtw4RozR`!^WNqI&tHJnrfsFV?1_+SU(=oeCT%#~POM>`dF4IRP zTc!ZW828G`kS5C{H%0f6^D4&buT3MF$qMOfxLCqKB4RO;6Nk@P>~xfmay7A$(H94~ zRV39YvysWvbaLdBmhNAyXCH?}N#UFoA1S5Gh`P&|ryRjE$-q6pQ;?vxr#DFwh<7$$ z7(c%N1?&1XE=*C5c}stzpZrV|AKd|%8ezP6kTAbr%`k14C?5Zym^p4(2&eLNlHL*i8uO(?LsA`EcMm}_bXTosFh7~)?{%S#@ZfBsC7%46&t>; zaJSO4<=GTs{HS~!?+I3u1+amZo(?=?_dn$%AA9kaaB{MQJ`>8XJHINwPfp{Kf%ktSS|+^LEUlX|k7=jjU1uq1$K4+~ce!HO8~V}aK7*`6uIvX=qc46R!NW=UjK)qW$AmomjVn%kYa3-X8tgTpJDq2eV+31 zZT4}h-gdD!8~#P+=Wl}v+D6(sU2pY)4*6oXW6Lq(Eh{xmGQ({J{Yz2II zirMY2`hLsdvU92(uba^V|C5wNS7^vd#HMRyG^*Ndq4^%#K%*EgbNKiKqR_A;}Kh zVQsvfCXq#-v`}5~3LrsfLMjXW;rLK7LtjAL{6d^$nl*#ZU8}+9`tTBOvAlv$VV22# zHWLx)S>d{?zm^vtuLk3tM*~#hRtky*y7qS>YeknXLWn%q9hw_cGu=4V)2UJIT3M)~KZs&c z&7C-n)LHI*o=fcXK^K4*!d$DT`?%}0Z=N$ zKapurn&Koe_4P2@-kTgv#`$1=5bzIIj4@-RkWs*yFU%*lNHT=t!u*xZwZrFx&Qy*< z#}L4WDA9~8VA$l`%+8<)EBS|m z2e8sU=7^=YJ_Ui`@K^XCyCFlafi@ck91HVuBiCoArWIVai?f`Ye4TT35W*@#p9XhhIK@>wHz$RQs5sQ5VsBj^$5vJp*zG)g>8?%MP1&7F|p$JhEq{t7hly%_UHPXq&9BM7AqB5fCwRO98x zB@|kmU6#Jpr}e5*NE`%CQG(@*50>H+6K)-LMQ^4nV0s*l&K0rs5^@^6Cq5A1!l?7` zwE$<9uJ?=mFuFXk1G+ps=^Q1+vEu}ZAUYpj<>{#gck6>oxG4H0PQK-3r!IF+M!fC- z9k(*!O95}U5@RKiPvzPSG&)IJ(wW(Xqnm2+2}B%`yq4n6OwI%OQR-!EGP2;RoE*Ky91 zXtGKBmQKH(X;gr+`8TS zKv;7zEFr58&0{Lu&9$+@K;rCkWlUTy;@3oX+L|j*PReRxoe6EV09&yKz|y1 zKvI5R*-p)KE*p7TEy1LA@S%D*HdIo1+49Jm@S~H-MMtcIvhFy&DU49uAKHu{;W-k}A?Js6A@nv&XKb4H;m_LG^b^7}_xSDHa0Y0Yk=ha!x zb@Q1}XYh|3FAhcBD0Y2Tb{*LZ*wY@q;qDv4q{L+m5^3m8hj~lCv#3$L*1~St@x#Ey zwvZG2bH-7&9K7{_16Rq~xUKZzU&{(laJ}8}!2YBCP4j`~0p5j2l0=uj25zf3eX}!! zD>)a|wjrjn^1=9(x@YfO(bBM^kmH>E6Fgd};qu<-yEkm(rK~)!i`)5tl%v5CeFdxz zAvOmH(axBeD9q`AZ)J5k6=8)Q`*}y59wI#g!W;wowG6WxpGGrY)Y#EpJGYbW=D1JY)>Xt#16SM_K38^>o)2cSypXDpUnua6pRa}S7v z|2!5-IB$7`!}7?QmHP5O@qCy$RH8wx$afP9%4I&^|6IA>xj}J@&uS$d2NJtIpBXu* z!Tv#yQJW`8@A-Zv;!0f2b&}DznrWk;DSlULd)<2;NlmX~{GwfOA|u0Vh{`b8*>2ot z>4fX*aOr;Imp>aXTZt62Jg}BvpE*Lu;;n$XH)DExC%iMDTaj|sMwW%m6(Rl>Bz&du zEO#J1ISEqo)6D|sb=DB{<6q(%gN@^(6x-=d7Mglqoxc+LVBgVNg`RYGCF7_Oe$5Zg^2qOOVM6YES6h~)zZFB;1=Y_(BrTSXj25>9Qp1KzoHYF z+`^;@3}aw|K1O(lc(g@hbZ}D(XHQSM=1CPyZB+S(sJ|BFiDkoA*LQduc!KVx6rM!J z1d6};&(uNox-m*;ld%Re)UNHJREtAMoGig-Wv(AyhyfRZCth8=*%nIUD3g}3mW-EqUDtHw71bk)eOD&b!>+$8f@9iCHMkmy}3?xeSTGOfLU#7N^1FIWks5!%{`AbCX!a*ZlL%32FHh3(~92f=X9EqGjm`|GYp)#HsY}5@efE- zZc};QdJ5!h)fp2`9$W0FTx6#0rGpR@4x3~72|s~mrOZF>RiMd?_uiR&XKT(V?RT(4(y5;olnv*%T3Im zuT5mk%A9>?9Uz?giy``sTv*g>j?krAdato0(wls)aB?c?XAvyM;J!10LD0*)!+D9K zJ&MJXC!kPD?RnqjU0!-wpTs}x!~S;7;!Jxb30hLAolr2$Oe5r=zQB8-R6g}!f$_o_ z?fib7H^gTdWy)XZA&GI$Yi=^Tz4v3JIhP!askYUpL#Y54y%b%^H@AGA8dTwzB(Nf$ z-MMBWx?!{-X|P;B3P1o@7zr^@$I_(RAID8@dwrw+Idb8D{Yopne$s`nGZT9$u!e@k z9E(3@bT2FzIIVmg;QU~@*pu>3<)jeo;o#iq z#4M0LCn2GcbBU>}6Ph0U6glewf3pvh4Tpm;w<;F(A6f6n1|F<~K&zJhlD~q!`6&_m zq1M6~H8{v;GDu*Dq&IW!g=dTBI@Lx0{CT0#Mq$8*DP!+g6NIeS?XeY^3rjrGywiabm5%KP*D z9+=N&`Chm^^Sj1da$fc%y?gtYDK8S5QzP_>hRytR@2^#Hp5{8FzwfdPy_WuW@mhu1Y zhVzS%pxv+QmzR6a0+8%a|bt~l4Jq%F% z{h5ecNw&jF+D(V)^_N-6rLO=rw>N(YPz3{}`N&C#WUEnGRDWXe6$5vom7H zHUt-pmLvVL@W}a46F4NlEIc71M=McIJc(7GK^z-=(z{uaP>v+`;uB{G&eL<*x;^?8zN}{`{SByj0C?o!wu18bqFc+_xN<3?Z6Ra8-!vj7c+! zjo`FcoPu)6BJ>;iTfpYHG6A@>*tn&PD-~tq$b>}0{X5`i588+;m~s;Ylz4v$hZ-3Y zkK9dIA!T6DJ&smlNHPF_s55O%E*UioZg90x0^#gGB!I8PA|^0pjYO0R;7a|$p)Am& z4Nu}qx57~4%NR&pM`#xQ3=AX^HLRWBYr);5&<6HsaeapCjR4YB{J{w9T%vHC?`dZq zB^?=YI256m%g?(syT&rsajN{Skkhpb|BH!_7y>Ldx(+ow1WbEX>?@N%DJ_i)1E#bYRXM@O`zx%)Fa(tAWE86aVWHy~cmf9qSh-{&{lm1&6apkd zI50t|TD*4q2tA@aEEb5i;!q!bz$ke8*42ek0hle6Lu?>uLJZnMLV;9;6CNZi4CtM7 z$)8Sw`wBh}lUp}E#)S!6fIvu&?;!0MCmn|@AbMoSGfvANK0+N$$!h`QlmHFr-baSD zmatH9J2a96BrT%9T}KHF2IRj95&hsGt;Hh9I)#yK%BvW|<&y?^WS|{d_$6F#payIJ z*-tYoEy>}A&0X}AgG2k?=UXBh6^P$5&M@jpG?|wmpVAdLWh^!hm~~F^mQ~+1;-LH| z&;K--(S;2{5q-po$r|TCK9Wi?JPZqTqmeqq9%VoNwkVJNQYI283J<&m;?h&FqKl?1 z)Nqm$Y&{Lbo$21YDO%#Afuc|jyw(W@;XabYig1?2ke)zK=`k-Ys?%h0x^|PHaL~Xy znn%;5i)S)vk0Irlyt+cXR^Hs9vNaxwu1+ZXbt3JyemOq80hx`26^EYllmy{!WiGd* z@ZtXZkK-p$BO*Xu`6G8JsrPl1}iJmLCa%;Uy%@SQ=I zzR(T|S`8Z|Hf#@~Uu}9jELX_NOY&`jblBc_f>RP@#S6dl_6LCeZ@QOJM8#b~)X z))(D2rmh_O|HH#q=}oW7R1Oh|GK11|IQi?5hM|RxJ{hCXz&1D|*}vTeV0{Pp$W(?< zbN{~erSWi86gmQ5GdKSs;!*TcLeONvkPUZzpdQ%eMa^b(v&$781Qr)OCB>WzACY78 zN%o9(iFr*ucgj}md=cy@V#b$TKMZ=6@F|>maG=#p^%fh1Qe`IuTdXqa;Mm2hsV<0{ z<=OQPqwz*~rQ!v^oxdCh1t=eOdh^G)9K9ujC(zV2Org^)DA}c#owGZ1h9A_7__5oX zrmXXAH?A}h@t1SGkg62~c4H(aQI{8o{6+8@Z$EU?u1%fF!ZqVqeduyaUb>1DcKDPw zw0ONcZ!AWTbZzX+!WTDeCXgp!efW5TrG=R@kDZJidbe<;#Ly2=RK?on3~fM}=ekKR zUTFK_Cx?eG|M5tA1Hn)>232)Qm(@hj5VBR}bB+a3icj;9%b-#Hr-yBR(XFtr1NaD z@&60unc)8Oxsfmt;aOiw;ku@+3|)3A!Ac=W=ehDIzI|h&u@XhA8I#;yw3xN*@&lX= z-~Rh+RzkfOQT7x<1?Pidb-MBf^i>MQ->N@7OG}s_v?Zz+ki$qc?~oz+MyBrdEZNN# z$tGJRq-S~+R0lO?FAMWxh)w;1YZ1albBwiV(LgK23q_AP>@~GopFFv>5D|N7_6pFcu_`!XN}ce>rl)CCFu4$LKj)4H7_*?>|z5G8!JI= z9LZ0I)T{QOfex7r1`$yItU;GnFl|Zs9;F$X5Ivsp{gj-q#jtt*6gEXjv~6~(GK{PQ z8lK}5HgM8-<=LtoPfbxby%8^Vk@Vsc4OKB1rWt}1BB)#stu-$k*zj}PDy~*i*KX|fT-q9<>eBAZhdY-D?+8D*bnOa8-y_d)AHp@^~<<@BeR=7n3b~0)WCg%y)U);MIvu z3A&|e!q)6Vg&z4H&|ZkgoC>*&Tzg7Mm&R_%6x*qMV@#S^ub_wC7f-3q^!(pyoYkJy z;*hFHchA3}8fQKByT`MQ)R|JhPi;`PrP_WOG}uZ;fzfNe!HLAqw@=1F(3UhIDIeoD zD%8?NrEaUXE#W#{e^pOo-u}#5!b--+@af$*iUur}K@%6U)gT0PYoE2pFGcxVx`9xC z?9WwT4UDeo`UygPyk%gnPLx$Lm<^jwTaO^^y@u2*mD24LaM{$-1kpDzvwo?AI1w?P zeQ-}Hx|z?sN_7e8j{LG%W&%-4egQw~J)rRAKu=oFr5UboS&lK8=zpM8Y>XrFjGIZ; zlQ7u%1=HV6dNpYKCTE;6aVJ`WDR{^9!PYG~KH;i3`($Ty`ZiWRIDwmP-cl2H1QX>y zX5u;uf1dM}kel_%BTc^_%XOZ88uOMt9#lzOFN zci`8NGKHP$-Nc6@QK?v~`Dc%4^?baXpB-9^scZZxu;2@G=a^fZlC&Zd0)zyfs(xzV^W_&VfU(Iw8IK&c?4>bknVph6V# zg;OahEzv#}cVs$|&=ukCc}C?3cl&)my6oRZWk@fAq#9>Rl=X4q{UpWJ;^CQ^QnKlFayf0(kCT{csPawG76ImhC3 z7a$0=#}Djo*@w)0zqp-iTcbvo_P*x~U4K{y9tO`#?1+&tkJ5|>>;(rka)ui<-jN*7 z`{k)<4t0)tw-4*wnXc8EZG|QEwMNn`lmmtgrL={ONY?~_mM-0#0$yr47xSg*Evj&o zE47A9$PT*QZ5ZJ>O&>kl^V)Z&fym7kJ)c>&;4NIJPJ&wcSfiD8+}&P2o%wS1%H@F- zb=%)?`*G?A{PBoLxH88{2!Gn(*ecEtoPj)xH)kS2eqtjGWON(J zu@j8zOip}Fqxy)RP#ZnO>cg)&!~`D}`zl#6d33Rn`~mGIyIjBOI^$}x-Ey72@%fBt z)|&EsbShF>7#@TZbgO7fc;Rw7is1}-)ksk#7X;88+GQ$_az1bE-ioI+1E`ciVc~5< zRSVKkTO5wFe;?QNKnix(s-ljKc!w&8KB`tEiPwPTuX#IO9Szf9x#pD0u3VA%xd}RH zdq}&3#^XsX7Arl&r|a>1diktC$#vKN?sTn+7h1E18LVEpogp_o`h|OMLMth*6)m@_ z%F`YODM4}F*w0Njb}I4x-uEb3f}hwoL>462<8df2AU+jz#+RSZ)`aX$ztaqW&|9YNTDCJlg@zzSaCT4>ON}h(nUjUN zA7}=EOPzo0|EI?N|85=o-;Fy90|(>(Y}~svrxG?>5PN2n@*Ihj{-6NH0i0FfpwwPU z(sUYIWfhH&2kbk#Vb0hOlZaWpv1T2jI!-VjU)aVCrt6GN6H=~evH(8*(5q^0Rpx=Co1^=NF>_^MkhRoD-GM8B*&1n~W+x>PE^&6AX?$S&ww zxPxQQ@Zx4wfQRtdbgM)>(LGQ(cqm}7F5=efhlk)NsZxHM@47`$XWy@ris)8&j$n5# zX_`f_%2u=bmNio%CjuAGI5oGtrUbBOC;dlXWg}y#lh3l6(kN1sT;CSBFihI+3dILC zQ(6iyqPRPfQhPEy8?vd9H-n4=A{6XnW20D-PL;TLBrQemp^2ntA$f_qi$9{h-z*8` zJ%(Y^O_k6^11X)Y#&P_&Sc}uyfWJQ=XlPfl8!AuEJ0%ETN6@JqNJkuyOaP36Syj5b zLr(*ziSthH*K{H|q{OZ~M1We=UWh zSURH!)I3K*X%Mnk_Z@tVYbtpra)=+C{HN{~H1^0bup{ZmD zRWm+BvBy`o&VOHDBsT(Y=9H6X!3lG=R6^nX2pnOYxujGeB?*vINf(vuPoes59ASH& zF%D4->l*ir%r6Zc-S@T|k0&|x$thQE@*icD6gyy_7Qz@n0-Zh`LncS$2Q-6FQY3rQ z5or^%3b_xN zKJFMIIL|w;W0oi3#L{vjzx)80;6I|C~~HW~jN62ZaYhkqAQw@D3rbG5A(( zFF7lrB2Sn_9!4r@E}ZxP0x^)Hu4pSs^Iy6u;a^=<1WocleFP-1_9!sg1}Af0ADX|@ zp5d^EkUV{a?yv>qzX4^Ff~zq_@p#fXepLZOEnpAMYSzurqFjEC^?M3`-W z{paCIMVpDAcV339( zih_xmz6j#LxSXU-{tyz(SMauUzCCTS6Jyp!L4SQ+`K(uhoIY~yXsR9CWOZW#UAua+ zlOk5_4C!#|upwdU3(k`HtWykn4N4bjY_20~0TurO@j|rGj2iRTvnHd*riU0HmW5K> z6FI9Jv=>Ojf3{|Qiti2LCW_tdbGxM3GykQk8b6DhIH_!KcFX}_jv|R8t=wfgi#G&N zXO8TeRPRZwS2Gg_Btl+ZT*6=AzEchkf+LNArJmHV@uy*&RST?&;jQLH(cu>tTOO;y(EL`H&9IWU5I=_W%& z{9SyabOKS2?w4<)XUp1d<@#pLEG-wDw5~DeJ@pbDJh&n@m8KhsNd}swkWJsQ!IREo z7@g=dWnq29%4pUc+xNwB?5KlBf0}!IDFThqyF|Gx0~ta1H?G~cs{pfHIgdZ2tg--t zkjAg1N<AKEl@nO(lV|fV!UD$@+dHHl}mPK9- z!imTRl5^@XJcH13u2pEEawhibKawi2g(LFg%SmmrNglq(D>TF7c&5AG7?mVTVvy1a zK?ZV^Gwab&MBg-CsZIiz>nYT0p4>tT=AuTNhsuUuCcYr98%s3igtYti|K zlgw*R7MWO0_&{jE+0uKpGRS*-gzkJ~FEX&B5B8Z}*zUIGrr-d;+q-c4Wh&41<4cxR z#ithTtH(CfU}t1KVL|twITX>8{Z3%-E`M}HRXHV5E&5sTBSw>&O;|Tp&*_dhD;;NX)&@$QUrfJ49Z0Y zZHmYsK>Bu%+Q;DP^?Ghvh_4^W)@dlqVGvsH`}4$!nq^lmNv3(6H4C*|lBr#*0Ec)pE|XgYfypDVlAM`jQ9ogteSa$S!QwlDFBn4&YiE&6}y5* zD8KC?HRiI~Nyx?3B`JBmHTF9@I;Lfl1PRu!w1ntvA^|x}M5tiPplS&*b^*rIbmM3T zSsWaE?{@h!KDPSSB1Aa5NK~ic5PweQ(>mhVhRo~ z*AO9_1mg-1lz}p5B*~z*(Sfj3e8qd32~21SIlTy>(Hzc=#{R3uqSu}Bfs2s}4sUJL z@5u@$_yifAaB*y9qCP>!j7GqrsW>gd%SXm!{*#0EA3~4sVB{(*xX98Yb_&W=^yYLd zB=$}qF6t)s(@c(G!Tlm2$(`2E_6XV&bt>{C4@6b@et7SFjD?^T(l2$iMa2m2KJ`ipE0Ya*mGh?5pXE8QC*)0Ik@6(!OphQDmPHKjup+053GT-3& z=0CP0DVk$;-$bO=@(H=U9#c~<7v;VnRjE%#=LXI2bkaHE++j>ek;O@G?e6^1J>_Ca z(d|y@)IV+t6vWzp)@!!$?8;mba#}Ji;aj_LNSsCz`1-etPls^pwlDuE+bFwQ`d8R; z@Sor%=TKKTn{RFtPq*9{O;XTF3RX6AR%#JbI;_pZzI=qmrSzXUnt|8+)25vPN$AVO zZ%_Oo3GavRB-0bWE&KCYBG4&GmExZ3g$<&^I2Fd$$py z(ygHfqcCwRl4QCrA>$+1%5tIOm(OcA9DnG=qxuEWI0ZaRNj|I9ta0n6v5~i}yTz)r zP$*#CsWt8H(G(;=RR5NQ`EIl_wFTVFVPZe$o!?K{N|$8{(P5-lY%)BRlB$4wH)*tz zGiG>u{ww!>+c3DS%T&Pwx&p9H0`1e+-uD<<%qscVe1W`#|)y zFQMvjoP!tJRK075CpqiNvuc@rq^@Wr>1J((U%d7vkSM+W+}EzP=t^&VWO|PgB6e{L zUkEp1hN(MmG$N1mM8%e@MbX@_SNFl}5T>d9c+Q^W-;;_p4lVd5^L#pDbsk6jQ}JP$ z)>uV7L+mprbJ=xd|v=NO8)|+QE zB~K`g2N?IkT)Zg0tAC<-UunB|(Dt!-PG(Fx5SgQBZS*@TLjs29BY1i)-O^$OvnJ2YCl;lgB@t9%MZ_0SL& zRns787sSg&8b2@_5Ts{Yr{X3-nYkq_q;x2MkV4f?m0`a!LCI?)dp~1gk7X&GS3H33 zr1xmdd-aXJ+*eg`PbOn$e9RjJ?|J-&b-FsOuS+Ae3T|iZiH%D4-Kn!h#24=0iR2iY z{$}TIHE%?EhNq2y!`b#Mm$fd3hycpNYg9!O}AIswaOMlD>sPF=WHoB_#6A zM;TC{ILel}M0jpxLGl~>`7%1L z7%NGgUwj}z`I7p&xRdG@ec3y8L(IsN=S9zdlFUlPKcLhBph0Zrde_9;)fVkl?5hel zRt%eyjWJZh&Q)aWjVY#(=uR^7@d9A-YJ(`spHSnv(NoBFlyEAN@_YjTG(RhTV z8iRt`Au6sr_Im~bL%MO_$nePhl6{t9*70-P37k9fk^6}@s*fc!UZ+34LVoMhsd)o8 zDfZ@S-kgzK{EoHuGBsQ`!<%`=&FXv|^;IRo-$2{LOZ68t5Q>(IP2IimT2N z7D<;xFAfebz)zSJwXQTjQnipuQWl=Vre$j~4q0uknj(JQzvp?*_Idu9Uj5?v*hMv33Mx3Du!gwHX&_P#maC#xXB6ElgjBXvz-cn7KC6zY_EvsWN_E0q z?k|&ePC);fj|%YVD^E(B!Y_<@*4wcJu`shpu~4(tcD_6FtUk@y`fkCwwqDb@?aX~S zy!3iio!!aI;PI$ESuemXrdaA}bgGkUvHP$b7NX)eKWA)CNL7X?maVm`g3H(8xC~Y^E{cm-~k45lq3?q23R@`8^O{bx`)x~ zd0s4O#Khu;2>_OY@u&OfY2PkcS?`ro5CB!A4t?=)=QAZ!eKQIZ@Z^5Q!gKa$TI~b* z+Jz$Wt^l_=+6YzK4+Cx&hozP8|2c+-V-_HNXB7E%9*i6JRj(Y$?J#6CC*t&cJF#Mb z0J~^T3!72dg`Bp01@^p$i-!KrvNdh0Ln`MFRBc9rq4o&YGEwb>?5NZy4!0Chnn)eW zhJe+>!^hR?DmcGL6q^vH#k>6$TcRJQXl}UpLg6jzD|)}!aEfShMOIu!Hqb#p!CV?w zZ5v%F;e)h7%OnblGKI8SvE4}_;Up7|mdq}ah+XS3F7#a`>_RCCQezw9M*o_Bo4U+< zYQw)~WBdLnwAe3MuSTak4WI?VRaa4~f}=)&M!!yPwaO9gM5Y{BsmP1gPcaF)WjW%A zVy7uGLE9X+P-1W;mW|_a5M7cY#vvXiS@oZufyFm4z!HDl7&$q=zKCkgT6I^pj>%GG zk+V<7zUdWrWNu)$Mc3=_VxFbsJ1H6(y z)Ccj+euAvvC34*wSrEDJ^*%o}q6i)kBC~qEC9loO~2^ibkuTbSm95@B>iRF?l%JG?P2mBJM zl=OF8;X48Qg3(TBQE<$m=Y_+%cEneo=Y~Aq>*4+5Pp|8w&4}tjP~d=?cqhNhwT4%7 zs1g(v-SM*V_4p`?E7~O$LhtBWJF(XO_G`vsm)>F)VNaCg1=7E!%^wsSzJhl3Rpm7r zWi@gUoOs8C24vNXW^8Q29>f&Zghgnfx8L-SEwCb3CViowP=*l0v=zL90 z0fO?-LR3!Y36Q!7vo4fS!}L$PE)|b|G{65@4KwT_!3s)rf#Ai_ahmjOCuC^F|oq0pZWYoMnc- z@OBjt=GQ$>Us)lwSN*oG%}7{D#j7buU^Uq8TXx`~Yph&{Q*F2jko0}dRD**K$-wOX zX_>D&;_p$s1&r+q@AAtsMEX3Q9wtmI(92d~eU0@ZjSxuDc9{d~jk-hD$ivJ+@BMn~ zjm^q^tJy&Zt{5-iVxtscSR&$$8e(q-@P)!*!N<>J!UQngdFL3ej`R^I!FNOk07}Ah zjTqn7_Z3+4S3ng-lQ7Wh->mGt5qI)HOM1_D4%h#V^*%`H| zN`g!E_Bf8#ZP93FHCWJ`;~Tchto64TCB%+KR|1la`&{dGYz_g~hS(1`zY3#THVp!+ zpBp`KP|q&mJdr?kUgCr62CQXGbp@?5>sNT(oY4-u;2r$k3_VP`2g(%DPt=K<`FIJ_lF9W8GbEvSw8FS&>+ zOrIvUUTvO#;lqKr}WmT2qViYkQ`N-OSy zeizUwOxC%rmD?|BD7mCX3`oUUXZg@}Gdry;m&fkRzuoDc8d#VvrGxxHBb>B>YAvqP zM#~bk0Ij2oNl@)9H!fFb*Yf+>RXGDCCG1}b{hgt3MrP%hf3*n%k+n?Qek6Fkrq|nXzXC-qV94O`-~hSf~3Tz?MJ>3730mmpo1heET`nKE@YM`Ew} zxwbjoJ0Zj{OJIwFTRUQ0yp-y>^(=|F5j1mT0`$MgArW#i2)t`6wKws0UuWI1tAR{G zjkD(>!?V}W%TaS5m`q%pI9v+-5zvD!(i+?6;`H7f8_o@XVLA_@PT)(An(Ic9#y~RY z2f^tqI&S+{`5NcXB*upXWUjvtIY3_*(eCzw0uwfpsXacOKKr}oJn2STaI0FeS<^&No10`64=DY zt|jk%M^Hc`tWH=Y2#l{yGnir3Z8_if;-kIVLrw^jpiR=od?kcaER_|y{tL2ZMbCJ> z9e(gXf5<^0f3>eTu&G2bLs;04Udf0>Ia5Hac(HtE>_|~eQedQMkYF|t??|e_EEUPX<6gdQkI~)v15DPkF%svv(@XjZNVb7C@7 z&PjInWYb*hdX2!1YW)0~uA_s!>WR^Y_wn#@-t_1Yl)g@7p7_Xkkn&c#MUhyEf?%Sq zdejd`{>p{>E|ZG5+|IB|I-~K(HYM=@2A%6?#jFqOe_N?P-f2MQ7qWK}Cbmeg*22 zt}v~_z7(T=_dY*{%&bbOu)LHoz3(M)(f_qZtUN?=i$P>Fjmu#Qsx$iwqa@S5`lP2M zgyq5>*De<=(EVY5C0Po4AI2^C)|h!d5EIEMA(2~)iWVv&IwVo5STK$T)nD&W?|`(T zYec@1S+!FTVoZ-?76p$>TZvfX$9u(x)blw_bP!dQI3+$hHYUD7o?NrtP9q&Fr`5D= zLm~kB7t^>##)7r5EB44z)}}g!hYU|TdJUhVZ6;EuOMZX7X0PV~f3Fk&mds0^UQ5fR z@iWrC#i6h>_$7mo{BOE6#gq7%k*y0!YVNC`A5G%%f(Z_uKc5F@^dm+XI(@%tlG|-i zwy8cCw3DC3jkym9e*Y^|)f4jrEMVAfdAeOM)*dI3Hm6wVtCE&1X==FkN1=S`f>Ov` zt_X+-0+x>%9YP7W;j)I)A1EUsskps#fc5EA0tEYTysDgCPK)4&YWA57d?3&kleD`x zJysWpoJnKE=3kPli}V^}>bS<&cf$;#F>C{*V!P20(BE{zbWf9E11S?7=Xp)dd@!Y9R2kea1! zS)*{mZ}tN0V|#*bq23sQ7o?XTTMxzR$r7)1)*K%(q=TI?gkpGGaBzuth{VYEhwtRY z!5NY7>hJVfft7)IsnExw*jw+u4vxI@!#z zqDljyr<(iuF+^Jlx#f5V^;y&VIsl~f_}$~Za|8NUbB=TN%iY1Qh=Ks84D#UHNiOQe zOI(MgHA&&mp!9k1K-OmZg0xqB8`Lid*&E!MYREa|<@};tIug=b#9D1D$a zi3o=sy_MBL&6CiG@8Q;r^@{6v(5cc&I-O||#f|>gax6BdG2x^ZE(V;Tc)`T-qbC)w z0bO+WRHe1nfvjH{hG%W@zC!tLzk=%~%U}BMq^=}i_GS49^QynjwA)G40@@V@cHW4R z#>}npjrx$_Vi7~2_|aQ&|TbD5rq;D2p# zw&EcY_|FXjbs2dAAv;GC0v0+ZI!0O}1{P)lRwg=T_Frs3Lu;_n3@b`>|$lTf4o{OH|&CQL@jhW8Q(Ttvvlk>L> z6Fn0X?e7j+Cl6a^19w_mC*uDq^1tf{n>ZOcTG%^V*x3^Nr>=paor^Qi|I^-=$5YjP z{~Iz!=FGauEY7{xEM%VNd5B#1UR*Ahd#{;7WXh~Tlrcj|g@jN^MF|;05i({-5heT% zo_h5B`hK40_xgQazu!NPm)AY#?7i07Yp=8RK6~%8-uv+LZx{6QZ?|SXdkXRlgk+wQoC8kYEL>JQ(+Vzyh4A{OE8rpzBGfkL|?{IEoR6+Lev z8rxJ30WE=mn1b18h$h+}jm3dY+?xn7zz`u45JNEJ69<0f0FT0WyFn}vUOrefMD0&q zAyx#?RdoC?C^Tf(g|V1l)!0q_RlA+q{k0-WVC9gzuJ{)T+bzHX?fYx$pVix~$#y^E zgTwDw@IPtttDqfC{!;3Hvnd)G{q%_6HbyV6UmXH$QAS3JnmDB2_D#^!RPh5%tEhlP zqtSA53KB91d1(o0DS2551r$u*i8m*8J>?X||=p8spEzctZbf`84m*BXC& z{;#?I)ACy$2JpJ(U7SpI#8S{5Hhj9QZNNhwRLK z%>iR@vO<8sWfXRDO?=SaYRK&ov;$mvJ4C=7(8drQ0TVwW7UR85D7QUjHU~@*ZfI~a zf(F3B>yONxM36*!x1{ZO0YC#B8Dnv7Dk}7#$~YGfdbsp|{%ZIi({MT2JsbP$#uZu` z;fA1e4eK@^r|Dw(^;reyUlTc?&g6X*uMw{iQg8TGyYeyjH#U@?ADR!NasI5w+PbP6 zw7SII{njGDZ`wab15)(Z>!PE?`z732=rPfZUd+<#;{jI}6sN-4h#YkiN6U$8LmSgL z*?}jg-eAv+Z~DIx5Z*uszhwyQJyi&NOx=7ET6G-9`fgRI=1ua&Rn=PIQreuskfovO zrUStI#^ltf#p-CMvcI9uY*c}P$Wb;M(^^Y*TCYr|nojgwN*+_sJ=!WL_@%|M$NECgmdyzA z-hED!2Un9%KivT1NbyR^9T&${*$ao~b282f$59U! z7tZnubG3WDya5O~S*XgnlIa*w0T(!JWCMT$HooE0wuX zB`WVOzgu@VtyXb-;#zze&gTmBo5gFq>U%j=*Z@mYO%~fVBR0cKt8+C1y-)l@)Q|B9 zl*xWJo0|nlksMXUjf)yTSR)mv_I=Ts`r~J5H?v>%~Rg-GkmDPP&M2kb3BDBY6eE{=|FF$R3D{)*` zJ7g7I#9?Sg`O-;+srxKhb8Oeg#jy8Vwq&?78ynw(H-a`K%wr4bJ}XX%E!>plocJuv zdz@N(av@su7*LgZ@?&@Em4MRQX%XCZL-|*ZFAQm(v&P+dC1-|`Jd3%K>AS3V`z+L; zqpd;qrI4A5+A_BCWT!D>TS;R^tz5Q9lv!D5pJgEEhxXe#@j74`~kknDvV2MGqo3u86(J`67ljD5z+_|DhP!;mn1y zI3W?2{0LdLidT4VD(2Q^NRECFs{~`#J}#*b3u!F@Wd+Y6MOeMbHbaXjU4F^uO9KGV z)zOA3Q+vnKcMkZqcVAY{A}#vtL$XektX#=q0f%mX&ORXa@W2~-AllS zpq~{5sEZrFCvU5S85Adl5i zKPHrvzy}YOh{gljSa4lT6OG({*!=NukX88Q;b3VAOXGo2x2>{$duUE3kaBfY4txH^ zBd2#;!VB6`_om$4X-MBbRJv>M4cj3aHl|Ud6LMf+h*r{pLT2!T8C9N=twu*!U)UzY z<2nCl5!W?4zHA1i*aQaTKW}>fnEKw^0T&O8bnk+NDS0RZkB3~;I+Q0ZvYTP9LRnmU zRC>Pj#z9ZRk~zUHO&mSkLH?Gk@o3`PIub5rNgZ`MtD_#ocWK#zp_i-aISmdwaCnv( zpVT?5{qD9oo%Ef9Prpd^GSZ72raoaL^u^GOUYKNsi&WrY;a949zT8Z?$n4ITy$E-=F&q6yK&?tF2=`PeCs}^ln*qiTvLxY12NG) zmZttZy5)PwdAlg!ILhA>KgghNf5>A97qc{*9-=jU#uOzjAs*PH;jXOrp~z z+>oR7prBb=ow~jkOn;?X|Ha7qn(wlhQG(SyI~4|6$BFuses}A_x?9Xv_uqvXOLRFq z)o6Nn(%3AU3tw?qfjz&G)QkJ5;uszuosaw-mt~OLF$?to5^|aX$p+~c{ zw+*WcM%zos)JasR);top=PbK?H70{LQ&)mQk2mrjPe2(Jmi77SCWuYj2A%%2KIPt` zQ$Ic+l%sL(nz0{Rv%Xi>00aNK^)wz+lQ`%Y>Z29;S~G@+z##-o@EmD4gHh9Lmta>~y9tEUvmJhT;X?zbJQ4}#8=s`asJUdx+r^Kfy8O(WU&agjo=T@N0g9yqw(Ov4}V z-uDi#x>Zg)72=T?{r2D!{?V^xDR~dxNR(HHJmvH)v338P-LB_|y)BrY6y;z#g%go- zG$McAt?MXW&0F;J79l=pzK46b*mG2fQ%^wT?J#=Xx3Q2?*zA>T&;9GT!wBJtZ>`M& z({j>Q=tuFBbWY3$DQGTicQt*_(ZKl1t-B114=yqe_CZOu9Kf$)8Hnht4Xb}c@1LoR zehz@&@?sGXVEC z4%Ax}LQ`C2QeS%{J%5hsNfVHsNsjGZVssfMp-P>im^Kq7++4C1$d3#3$cKufF4yW; z9Jyo2sTv~73MuxW5QyuJD;3qxy@j5PTQgO{biF+EIL4ZYCY zOseTT4fmiYvhG0F*E1O>-Wwau#a>8!*y-Yl8%q%uc zGHG=!#3e5?&F_I~TH0MMIkAZoDjanSvFer59etm#vSfxJWALeS#{+wHg!R+L>|RZMGq z3Ugw4=eM!(k+Pd(1V^2Bk7(mTH+_SmH6^C_<~}8cCmGi5P{oH;sa>VX&<74Kb zHj&Fc!b5T}`!gCQFK~Zh^@($S+B2K85Yn{DDMfCs2i@Vf&7i#>w7GAYC?Abrat7RJ zAf~JzI0*sWLwLa}AJmv{zqqT|yV8VKjhOV5quWG5^&EWS=vc-=0!R9|m4M}GTr)|r z3i$01wLZCW9(uU(qO^;Tb~#=7OG<*(rz>}QXo}vVjSLyT3%j}5R;vVbblhOV^3YLm zsqtc?dY`I2OUy!I4uuT)iY$MNUZX;*^v`-mwx+7)^0w%tac@y@kerhiwf?R1!~fBk zMjpO*#BP}ZcI}FEno~~Ea?WZQ%_Hz(E-q^L^s$yr;nI986$5>ZpJ7-$s`a9@+%cUO zc~iaSR8P;kaZ4?XPPwOkro>!Mx??b=aNbh`<|do^BvG~UtF#jOb?qPr`S@s(-HjRA zPNG(`7KhpWXSNcRuBkq((xE3WKa!bbg|)%@-iO3BXy386@6beP`yBPn8Nqv}HZEh& zvRTS?R2wcT4N|u`pAwc;6ZxhXvZ3;95h*{w(&=0w)|*awmsp~66c4TOuy2kh!UhC_B5pQ3aYmNE`UWw6f8gE8>Nc&;Q4+ra zmM|ex2sNXp+O3sP=VEgfIe5q7R>daYgQTsb}H#Y;CAXTkHqJ(+t5&zz_oolMVfT^&C_0b>bo^Kiv)8v zs&1*a`#~JD_aD<-3THN5HX`R1DICRG{bQv#JFbLR0+13zOc3#t21?UOJ-&# z^Y1DwN?H(2WIS&k(*w`8IvU9C9>wjv__jastZn{+oMQe|B~_iXG0Xn zM(R)_@eb0uHxCSO(kMqY`&Z}Yed`%3y3T$7lcq2olU9S+aM)sc{Wt^n?4;$^r?Rs| z5%Qzd+8^)-MMHz`ceGTm&t)(SCRJ}T8&%C|Wp_;umrjmN(-tLa)Ba@ z&dx{o^zx|#b!!RSThnLFV{YO!uZ_+>_YlQpy>v_w-6vP)k$Y_*G>i9S5#*KT4V63L zD;YIU(Xywg0-9$H((Y>}QF0IRPTt3y?IDvr_;mWDj@Wp_eCB1{H%Z?%HSA=1my9w? ztT>0#MjkVr2-swECK-bYxct`PrT&S-%fV#!Iy^Q*&6^uWr?pC;b#$gSD-@XJP(Jf2 zKLPb>`1C8Cm-4=mP2tL?4dZfha+Hec9IxqVDijCOT@9(+=cSMUXaT0yo( z6FIbrg{Ypfm>Qu$^vg|jYL_H%<~}iWR`gcQO&DOgDx_?Ewc>oPF;$J*yIbSWmWtg= zL#qObc9rMQa98o<)s4*!dA2W(FKEOYZ@&0QHdKF1Q`E#U_+%!y{X!{stuT5hyzKnJ zF}rM;JG0R_x5dAodKIlW@@452JhZ7dJt)IEk32t8F+q%Nj=$AQRHC%(e5Fk5xx+V$ zj|#($??v^x}$ieh}@3M+RMNX-;HBv(j_Z{(#H?MX0@j=BXmDh<7Rvu%7W zEKtA5F5LIsMmYLmQNWB8ZeCz+nbj|Cl!SRKZ1wXgwO8p$X-10A{cD!h{nmZYg}Ho; zsqGn7nC6U#DB5>IVc0q9yvl5@osC{W~mE)4aUv_ALRP z#P%|k{VOkM!{o{HMuZYmjy-$}ymI2k-cO|r#mCgsR7|3?CAH^G%O5A@#l``tPII(Er;uLd4U{a*= zz}zAbWhm5GHdB2za$f8k*@pht@8S}JX=2)!M-Cg>cnwjKub+-ddb#fQ(MGr57BJDI zP$-XdJFIj6W8vH6Fm>_ORJy9oj+Ik9kHfw1lSX}p>ZL3C#EfAzi6VSn?@I7 zud|*yTy6NVL?A}L^ZKk_$#)iKk`o(ZF~4mQdsbxs)9C(}@ej)4h4+vuUHziXM|Ud5 zN??Bck`U$iR7*y084stKv`_8#&)cr!clgGRT*%|oCZ zkz{%}PBzOXMB!?c-J_1cBEMB1ulYV#C@J+8G zZ&|cDQeN-X6c3zOo8uDg#O-o9S8E=79k=2cD_KW_Xkpt=ENf-Tq4S^?Po?NF6V9;M zq;{)27d3h7cA;+36y{`Y{POV?v~KwGsYU@5SE8F9r@hB49h@yr_LD#%E1AHSKz72` zLCS&zmVw|%Y0e@c&BP8}zJ72IeI}F*L+QTk<2r<*YwhjtASMNAql`MdT|Jg!3okR! z7@^{&h`GWmkKPP*^e@-5;AnmAST7h#TC0oe8}=PYP39nf@09OrTJ~gOCF$(_f)$zP zfsdbL3$i%!MV@u+rO7a7veGg>2)HaY4)xDhtR2aCrbO3mm&G{E zdDaYi4;ATSvi%%lb@FmaVQcOMhJf(QH;qZo<-2RXq`WJO1wsywL{;+dHwn+Y8=dAq zr+GNn@7X<7*f{icdd3}VMWLo%sE=6c^l~#h;mdii+!som`2#6A3Ap=pF##{y@*R{U zFc|iiVGZA{Lf1YTD&U*>%;tWoggoZCmLU{L?akx@)JSTZf zXM~L{QPpH(BGpRC4JR$g(=Cg2IV@7*%%IC?>H>`~DM4xX>4yddEly0VK5G~mbXz2> z4us^~l_U``IV1^CTp)vF3csTYmhZNS*6F;nK|&cAPFh3)wVNqzR9kqYDgel_2iLXG zd`}prBh~%hh@PG?DL%7h(p6^JNJ7$bvUw6~tZ2~umEZU|YhnDn6o1iEfp|{sxJ*Fb zv>ES``fDS`LaN!tk_G<3+o6nLsx5ax@eqeBDJ4K>cO$G_rV#u02Od_Ba=J{ah zNs#+kP#in8x!1WwDJiMM8lDTOHMtKyoL2l!AGV}?WXb=*#h8WXacJ7~eMH~4wxW8X z>MF|0=Uc}*>9>@cx8y(G`fSV95;yTtLCH+B#Qk|qy}a4|AZ&SfR#O2XXsa}C3p!_F zpY53{vXVn0^PF1uZ7$jSDV6YiMpEV=(>E(4>yu{~D8{nbixlt3ujWu}te5-5t);dn zuJcSWa~QhOeC15=9P^~<3?~sFWosd4%Oxd79EOrxKRhT;CU=``Ey9`cKp)w`SgO`= z2DqB!YdTt0rq>i-BK1wlXVs3llM1U+$MU#!}OpY0xoIjzz8=X3{;{q$3N7)>SZ%se(A_0{H!$YDK95B6RVQ`2}=l)gmUA z)bY{!8eXbw1!878X7eR{JrX@49+WFIM%vilKB;id$89cmY=g>3`wo1FLAy}ACYw7o2b)uv zgZT5TAJl0s9c%MhsG;UzxELe;OlGuwjMA5-Ca9j(X+Q4F>f_3#fi>n(<;MM|$&V-H z2WfbB>Wd^;%wEFjO*2T1RIaErY!487V4&@ zRHYN6h*ycp%khRnp6R@;OeoSI;o8*D)bE$RTSxLE1V#lPU3e{GCiqGcCbFc?43rAa z2-I4`bt+DLDtu4hiV>~PsLn`WweUD&dA>)q6j5r2iLWZH${|(|?-0|Mt!DH%%g$Aw z%c64-Iyrf}O)C>dtNtpt#?&@LA|v>@;B$F4jo!RqGnNvq9$d*r52qdH0JXtA%zX0x z6!+==#Ig5>L~~h}*~nPoYzM?nm|wCk6dM$)VBODVA~I|fd4Bmk+4(+EUh7$l%U0HA zUrddyhHr(M1=&Xzjg_S3nit`%(fa4}J#Ia>lCV%N{&r5%ARu4(mH4#sGN+87jEPL1 z{Z0D^gY|Vu!Lh+tgKvC0LLW(~am2d7u)qPSZDG}IBV|)*_qp+)Q$ilwt-B^SY&?w} zbdDK@WckN@DNM>vx@eqaT-mPSSW$k)`?B|y1ux}ud>Vk+V)(F8nC*!Aai<6RTW3kJ z&X|WX^3KrPtPMTEqBzXkE`&x}UOM6|qH?r^>1E3^k&cqb)6sW|e501U$DPJagboVn z39Se*7@`erE=^|oXKs2F;hw^8!0&q*%Ld4exDR7C@tKd{?G!`N#%SUJ#3mC&8mhK%t+a!#Qr<{w)z2xfSsN-X~lMjg{gutJ!gJ$>ASyz(r_q^kJdly?ULKm!+S z3i{hkh3Ys#PcmrOoy zsgSU|J7miCQMywaDtB8hmYJ2Uhjm%#wL*=+np&)PEVS^LC0rU_1tUtf$&^WBQRkPg zuiPk)5sxA~K6d0bQyvpNQ{|1f){BnHd>AG>MujSEH>1AGH$2(;gl;Oc2y#9-r^uO@ zbRfYye)HinvJd8ltarJNbhVW%3oDz#>iBbp#3P@HP8c>1KDqGJ`02ot`M9k3fLI_o zXR^*mycS?lV)0;}WZqy-7LXK(7Q8Doo>q`em5xJFQ5-(U?n!?meB=7qjp!?P zj;$LUH8`-)G&eTSH8_8J-My5EF`jwNH=;a`Uan}W=npCQ;v{j>+Nm=6wn+`Hu&Fqo z@L+P&as9aEQ=8#Ze7VO>>+8d2$^qeF&L4BfA6?*@xSW4^HKrzJp)a_nx*yYj{?JpN zcKPpBDpifEo=^J|>1pY;=no}-OghIyuQb^0-uwD>VUJ}Oo!tJc;v_A;`(G13$X$^* zD{=Q`RbT%2yPA({mAHZjyo#e?`RfAXs22hDBUbm!25E=#zRZ2eOj=7yZU5LFVIgkp z7ck&|Hw?GDk=pH1e51YPs;Gj2eAa3I(G%^p1D{K$ih*WFjt@z*r+j>SpA0FJDsNn@ z2OMkXPJMN`WwBT*?r&p!@@{SLEZ4aGGrt!f=EDw#c^$X?-r0v67&-NLR(s0mLzW}? z3##@5igZpj6Tu%odEFZ7Uo5Mag0jMi0rL33_mb-i?;8q3fJL`AIj3*W9R7wFJyNPO zk$j%7f1_j-`5|_qCDW|`Rim()3E#OpPp@ueY<9;ieJ_4rJi&6EB{AJKy}0;IvA2rD z2KIZ|a-z+h`7*q?(I~PYynGUnS^ta7{}6?T=aD8hJ^0 z0R=~wKen&$VkFLBnK1*k(H8!NhwH6DQqJ-U{C=3 zhe}Dn;M+(J@SV^ff7?)r{|1iZ?|~41pi=%80I?IN^pp6XglO;K_(w7Rd+5Pn{@;gy zgBiO}l5OH&krc}O+xFasQb6S_K^%pYjH1l;IS8fLL91+2|B0FSC)7V8r?z1a|A79N zIC5qFogj210_`e|LZKyK(nvHIgf0WdiMzVENXVdFk@5;)Y?VAh=7)A5|DW;VfAC97 z!4L{EaCr%77pRPcG|WX#0)cXsl|Z4SWo2PVgew{f+u{F-d;sA^eu3fI5_SJpx+VjLjjDdD;kdm@rVR4 zU>|V7gXmKLiNJ#hSR@7yA|PF{=s*C4BO;JU5DW>p`+2(|@P1xcgdY(AM_AryPXG_< z34l0j5PmA90Qmahh-d_j{Qh4w<=Jpoq?s25;J zz;0`3q6Jv&n#vA^0gbp_2m9Rw&|p|Jfq((9yQDZYK-kR*-Z?;EvQR+F503*+qyeNK ze)|>#ffusiEx>uAy_*+&U_$e5P^9kBj4hKSU(O`u^I0Xa-Zm$RqbV1NE)*D9z zB*7mLTMXdA9R&#BYZNpJ@Irt94sQU3cEh95fDhJ>uxl$~0FK}X8VrNO14MT)@8=Z) z=?A`i@$!SfFv_!?b7J&q0#$EqeXNFjCYGO*(EpGB{$haZt@4gPcAPIx{dL{1_36!H`i*H z)oPd3Y7eW`PaeD7OiOnh-Wve@-wX6n7c2ol?D8UZvk`lEK@c_Aq5#BC5r`c#Ai(DN z!wld(1sfND-epGbVFqv09}bDxrNQjdVD`{pe$t>pGkEV7hubB`?ULj6kmLR!_{oLB z_=AXZ0Jm!dzgjD9~kf_5{(HVPUc=tr{S?hYA0I=DR@mBOB2?%e=xQv}|N z0QPwrIIN$SHvu9In1W7@L4sQ+fPKtPclW==eCugCD8aSWW#BSu>hf}0(r_6~xH?o; zURFa(N=g$3gTl4oDoV0anlcLVFf|zsxV((KG#DYSt_79SmeG(=kk!yoS5t8W^t@eh zAUJ8aqy4!;1O?1-I3k$7(=7uQm|(CQghF>KWor($MjHnh1P+0LT`++NV(Y!=Y2Ali zT$~w^&Mq#jDAYIL=4Q^!R=N?ST#=>wbb8Hc!z~k=Gqr7oTk{#4@Qvd&ylrU2w?mnd3B>XB5Ynvcr5*aUSKSf2%oKB`YEBmz1Mm5d@ zhJp!cQ$Q1#bfFtDcS>}jr%0IS7{^AohHRBIWFq6tQb}W^UT<|i#1%B>3UN#&oh4NR zb3Z(DuGQho2-!SSF>}Nj14AEAX?aQFizBnMKt@!weSwVe#jeq~ZBQ^BQj21Ytzi(T zv5&YGS1hM=@;N+ZLZuy77(Ce*AoTQz>JW!l&?A?JO{+dMo%k&Nlx}NM8twgzcStmC zBbqJ4_jR)~t_o$>P%sn7?u16{i=m9=CKu@AMUYTKNet!9J$~~D1p)MUVjv#vN>3{T RlYxVuwGkB5GSQ}|{a<3T_QU`H literal 0 HcmV?d00001 diff --git a/examples/l3fwd-vf/Makefile b/examples/l3fwd-vf/Makefile new file mode 100644 index 0000000000..11e2b7c99f --- /dev/null +++ b/examples/l3fwd-vf/Makefile @@ -0,0 +1,58 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = l3fwd-vf + +# all source are stored in SRCS-y +SRCS-y := main.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# workaround for a gcc bug with noreturn attribute +# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603 +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_main.o += -Wno-return-type +endif + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c new file mode 100644 index 0000000000..a7f4cce203 --- /dev/null +++ b/examples/l3fwd-vf/main.c @@ -0,0 +1,1079 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#define APP_LOOKUP_EXACT_MATCH 0 +#define APP_LOOKUP_LPM 1 +#define DO_RFC_1812_CHECKS + +//#define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH +#ifndef APP_LOOKUP_METHOD +#define APP_LOOKUP_METHOD APP_LOOKUP_LPM +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) +#include +#include +#include +#elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) +#include +#else +#error "APP_LOOKUP_METHOD set to incorrect value" +#endif + +#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1 + +#define MAX_PORTS 32 + +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define NB_MBUF 8192 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + +#define MAX_PKT_BURST 32 +#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */ + +#define NB_SOCKETS 8 + +#define SOCKET0 0 + +/* Configure how many packets ahead to prefetch, when reading packets */ +#define PREFETCH_OFFSET 3 + +/* + * Configurable number of RX/TX ring descriptors + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; +static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + +/* ethernet addresses of ports */ +static struct ether_addr ports_eth_addr[MAX_PORTS]; + +/* mask of enabled ports */ +static uint32_t enabled_port_mask = 0; +static int numa_on = 1; /**< NUMA is enabled by default. */ + +struct mbuf_table { + uint16_t len; + struct rte_mbuf *m_table[MAX_PKT_BURST]; +}; + +struct lcore_rx_queue { + uint8_t port_id; + uint8_t queue_id; +} __rte_cache_aligned; + +#define MAX_RX_QUEUE_PER_LCORE 16 +#define MAX_TX_QUEUE_PER_PORT 1 +#define MAX_RX_QUEUE_PER_PORT 1 + +#define MAX_LCORE_PARAMS 1024 +struct lcore_params { + uint8_t port_id; + uint8_t queue_id; + uint8_t lcore_id; +} __rte_cache_aligned; + +static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; +static struct lcore_params lcore_params_array_default[] = { + {0, 0, 2}, + {0, 1, 2}, + {0, 2, 2}, + {1, 0, 2}, + {1, 1, 2}, + {1, 2, 2}, + {2, 0, 2}, + {3, 0, 3}, + {3, 1, 3}, +}; + +static struct lcore_params * lcore_params = lcore_params_array_default; +static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / + sizeof(lcore_params_array_default[0]); + +static struct rte_eth_conf port_conf = { + .rxmode = { + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 1, /**< IP checksum offload enabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 1, /**< CRC stripped by hardware */ + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IPV4, + }, + }, + .txmode = { + }, +}; + +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; + + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) +struct ipv4_5tuple { + uint32_t ip_dst; + uint32_t ip_src; + uint16_t port_dst; + uint16_t port_src; + uint8_t proto; +} __attribute__((__packed__)); + +struct l3fwd_route { + struct ipv4_5tuple key; + uint8_t if_out; +}; + +static struct l3fwd_route l3fwd_route_array[] = { + {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0}, + {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1}, + {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2}, + {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3}, +}; + +typedef struct rte_hash lookup_struct_t; +static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS]; + +#define L3FWD_HASH_ENTRIES 1024 +struct rte_hash_parameters l3fwd_hash_params = { + .name = "l3fwd_hash_0", + .entries = L3FWD_HASH_ENTRIES, + .bucket_entries = 4, + .key_len = sizeof(struct ipv4_5tuple), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = SOCKET0, +}; + +#define L3FWD_NUM_ROUTES \ + (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0])) + +static uint8_t l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned; +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) +struct l3fwd_route { + uint32_t ip; + uint8_t depth; + uint8_t if_out; +}; + +static struct l3fwd_route l3fwd_route_array[] = { + {IPv4(1,1,1,0), 24, 0}, + {IPv4(2,1,1,0), 24, 1}, + {IPv4(3,1,1,0), 24, 2}, + {IPv4(4,1,1,0), 24, 3}, + {IPv4(5,1,1,0), 24, 4}, + {IPv4(6,1,1,0), 24, 5}, + {IPv4(7,1,1,0), 24, 6}, + {IPv4(8,1,1,0), 24, 7}, +}; + +#define L3FWD_NUM_ROUTES \ + (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0])) + +#define L3FWD_LPM_MAX_RULES 1024 + +typedef struct rte_lpm lookup_struct_t; +static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS]; +#endif + +struct lcore_conf { + uint16_t n_rx_queue; + struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id; + struct mbuf_table tx_mbufs[MAX_PORTS]; + lookup_struct_t * lookup_struct; +} __rte_cache_aligned; + +static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + +/* Send burst of packets on an output interface */ +static inline int +send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) +{ + struct rte_mbuf **m_table; + int ret; + uint16_t queueid; + + queueid = qconf->tx_queue_id; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + ret = rte_eth_tx_burst(port, queueid, m_table, n); + if (unlikely(ret < n)) { + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +/* Enqueue a single packet, and send burst if queue is filled */ +static inline int +send_single_packet(struct rte_mbuf *m, uint8_t port) +{ + uint32_t lcore_id; + uint16_t len; + struct lcore_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +#ifdef DO_RFC_1812_CHECKS +static inline int +is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len) +{ + /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */ + /* + * 1. The packet length reported by the Link Layer must be large + * enough to hold the minimum length legal IP datagram (20 bytes). + */ + if (link_len < sizeof(struct ipv4_hdr)) + return -1; + + /* 2. The IP checksum must be correct. */ + /* this is checked in H/W */ + + /* + * 3. The IP version number must be 4. If the version number is not 4 + * then the packet may be another version of IP, such as IPng or + * ST-II. + */ + if (((pkt->version_ihl) >> 4) != 4) + return -3; + /* + * 4. The IP header length field must be large enough to hold the + * minimum length legal IP datagram (20 bytes = 5 words). + */ + if ((pkt->version_ihl & 0xf) < 5) + return -4; + + /* + * 5. The IP total length field must be large enough to hold the IP + * datagram header, whose length is specified in the IP header length + * field. + */ + if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr)) + return -5; + + return 0; +} +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) +static void +print_key(struct ipv4_5tuple key) +{ + printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n", + (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto); +} + +static inline uint8_t +get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct) +{ + struct ipv4_5tuple key; + struct tcp_hdr *tcp; + struct udp_hdr *udp; + int ret = 0; + + key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr); + key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr); + key.proto = ipv4_hdr->next_proto_id; + + switch (ipv4_hdr->next_proto_id) { + case IPPROTO_TCP: + tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr + + sizeof(struct ipv4_hdr)); + key.port_dst = rte_be_to_cpu_16(tcp->dst_port); + key.port_src = rte_be_to_cpu_16(tcp->src_port); + break; + + case IPPROTO_UDP: + udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr + + sizeof(struct ipv4_hdr)); + key.port_dst = rte_be_to_cpu_16(udp->dst_port); + key.port_src = rte_be_to_cpu_16(udp->src_port); + break; + + default: + key.port_dst = 0; + key.port_src = 0; + } + + /* Find destination port */ + ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key); + return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]); +} +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) +static inline uint8_t +get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct) +{ + uint8_t next_hop; + + return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct, + rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)? + next_hop : portid); +} +#endif + +static inline void +l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct) +{ + struct ether_hdr *eth_hdr; + struct ipv4_hdr *ipv4_hdr; + void *tmp; + uint8_t dst_port; + + eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + + ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) + + sizeof(struct ether_hdr)); + +#ifdef DO_RFC_1812_CHECKS + /* Check to make sure the packet is valid (RFC1812) */ + if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) { + rte_pktmbuf_free(m); + return; + } +#endif + + dst_port = get_dst_port(ipv4_hdr, portid, l3fwd_lookup_struct); + if (dst_port >= MAX_PORTS || (enabled_port_mask & 1 << dst_port) == 0) + dst_port = portid; + + /* 00:09:c0:00:00:xx */ + tmp = ð_hdr->d_addr.addr_bytes[0]; + *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24); + +#ifdef DO_RFC_1812_CHECKS + /* Update time to live and header checksum */ + --(ipv4_hdr->time_to_live); + ++(ipv4_hdr->hdr_checksum); +#endif + + /* src addr */ + ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); + + send_single_packet(m, dst_port); + +} + +/* main processing loop */ +static __attribute__((noreturn)) int +main_loop(__attribute__((unused)) void *dummy) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + unsigned lcore_id; + uint64_t prev_tsc = 0; + uint64_t diff_tsc, cur_tsc; + int i, j, nb_rx; + uint8_t portid, queueid; + struct lcore_conf *qconf; + + lcore_id = rte_lcore_id(); + qconf = &lcore_conf[lcore_id]; + + if (qconf->n_rx_queue == 0) { + RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id); + while(1); + } + + RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id, + portid, queueid); + } + + while (1) { + + cur_tsc = rte_rdtsc(); + + /* + * TX burst queue drain + */ + diff_tsc = cur_tsc - prev_tsc; + if (unlikely(diff_tsc > BURST_TX_DRAIN)) { + + /* + * This could be optimized (use queueid instead of + * portid), but it is not called so often + */ + for (portid = 0; portid < MAX_PORTS; portid++) { + if (qconf->tx_mbufs[portid].len == 0) + continue; + send_burst(&lcore_conf[lcore_id], + qconf->tx_mbufs[portid].len, + portid); + qconf->tx_mbufs[portid].len = 0; + } + + prev_tsc = cur_tsc; + } + + /* + * Read packet from RX queues + */ + for (i = 0; i < qconf->n_rx_queue; ++i) { + + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST); + + /* Prefetch first packets */ + for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) { + rte_prefetch0(rte_pktmbuf_mtod( + pkts_burst[j], void *)); + } + + /* Prefetch and forward already prefetched packets */ + for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) { + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[ + j + PREFETCH_OFFSET], void *)); + l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct); + } + + /* Forward remaining prefetched packets */ + for (; j < nb_rx; j++) { + l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct); + } + } + } +} + +static int +check_lcore_params(void) +{ + uint8_t queue, lcore; + uint16_t i; + int socketid; + + for (i = 0; i < nb_lcore_params; ++i) { + queue = lcore_params[i].queue_id; + if (queue >= MAX_RX_QUEUE_PER_PORT) { + printf("invalid queue number: %hhu\n", queue); + return -1; + } + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { + printf("error: lcore %hhu is not enabled in lcore mask\n", lcore); + return -1; + } + if ((socketid = rte_lcore_to_socket_id(lcore) != 0) && + (numa_on == 0)) { + printf("warning: lcore %hhu is on socket %d with numa off \n", + lcore, socketid); + } + } + return 0; +} + +static int +check_port_config(const unsigned nb_ports) +{ + unsigned portid; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + portid = lcore_params[i].port_id; + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("port %u is not enabled in port mask\n", portid); + return -1; + } + if (portid >= nb_ports) { + printf("port %u is not present on the board\n", portid); + return -1; + } + } + return 0; +} + +static uint8_t +get_port_n_rx_queues(const uint8_t port) +{ + int queue = -1; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue) + queue = lcore_params[i].queue_id; + } + return (uint8_t)(++queue); +} + +static int +init_lcore_rx_queues(void) +{ + uint16_t i, nb_rx_queue; + uint8_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + nb_rx_queue = lcore_conf[lcore].n_rx_queue; + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("error: too many queues (%u) for lcore: %u\n", + (unsigned)nb_rx_queue + 1, (unsigned)lcore); + return -1; + } else { + lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = + lcore_params[i].port_id; + lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = + lcore_params[i].queue_id; + lcore_conf[lcore].n_rx_queue++; + } + } + return 0; +} + +/* display usage */ +static void +print_usage(const char *prgname) +{ + printf ("%s [EAL options] -- -p PORTMASK" + " [--config (port,queue,lcore)[,(port,queue,lcore]]\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " --config (port,queue,lcore): rx queues configuration\n" + " --no-numa: optional, disable numa awareness\n", + prgname); +} + +static int +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if (pm == 0) + return -1; + + return pm; +} + +static int +parse_config(const char *q_arg) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + enum fieldnames { + FLD_PORT = 0, + FLD_QUEUE, + FLD_LCORE, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + int i; + unsigned size; + + nb_lcore_params = 0; + + while ((p = strchr(p0,'(')) != NULL) { + ++p; + if((p0 = strchr(p,')')) == NULL) + return -1; + + size = p0 - p; + if(size >= sizeof(s)) + return -1; + + rte_snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++){ + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + if (nb_lcore_params >= MAX_LCORE_PARAMS) { + printf("exceeded max number of lcore params: %hu\n", + nb_lcore_params); + return -1; + } + lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; + return 0; +} + +/* Parse the argument given in the command line of the application */ +static int +parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {"config", 1, 0, 0}, + {"no-numa", 0, 0, 0}, + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* portmask */ + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask == 0) { + printf("invalid portmask\n"); + print_usage(prgname); + return -1; + } + break; + + /* long options */ + case 0: + if (!strcmp(lgopts[option_index].name, "config")) { + ret = parse_config(optarg); + if (ret) { + printf("invalid config\n"); + print_usage(prgname); + return -1; + } + } + + if (!strcmp(lgopts[option_index].name, "no-numa")) { + printf("numa is disabled \n"); + numa_on = 0; + } + break; + + default: + print_usage(prgname); + return -1; + } + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +static void +print_ethaddr(const char *name, const struct ether_addr *eth_addr) +{ + printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name, + eth_addr->addr_bytes[0], + eth_addr->addr_bytes[1], + eth_addr->addr_bytes[2], + eth_addr->addr_bytes[3], + eth_addr->addr_bytes[4], + eth_addr->addr_bytes[5]); +} + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) +static void +setup_hash(int socketid) +{ + unsigned i; + int ret; + char s[64]; + + /* create hashes */ + rte_snprintf(s, sizeof(s), "l3fwd_hash_%d", socketid); + l3fwd_hash_params.name = s; + l3fwd_hash_params.socket_id = socketid; + l3fwd_lookup_struct[socketid] = rte_hash_create(&l3fwd_hash_params); + if (l3fwd_lookup_struct[socketid] == NULL) + rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on " + "socket %d\n", socketid); + + /* populate the hash */ + for (i = 0; i < L3FWD_NUM_ROUTES; i++) { + ret = rte_hash_add_key (l3fwd_lookup_struct[socketid], + (void *) &l3fwd_route_array[i].key); + if (ret < 0) { + rte_exit(EXIT_FAILURE, "Unable to add entry %u to the" + "l3fwd hash on socket %d\n", i, socketid); + } + l3fwd_out_if[ret] = l3fwd_route_array[i].if_out; + printf("Hash: Adding key\n"); + print_key(l3fwd_route_array[i].key); + } +} +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) +static void +setup_lpm(int socketid) +{ + unsigned i; + int ret; + char s[64]; + + /* create the LPM table */ + rte_snprintf(s, sizeof(s), "L3FWD_LPM_%d", socketid); + l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid, + L3FWD_LPM_MAX_RULES, RTE_LPM_MEMZONE); + if (l3fwd_lookup_struct[socketid] == NULL) + rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table" + " on socket %d\n", socketid); + + /* populate the LPM table */ + for (i = 0; i < L3FWD_NUM_ROUTES; i++) { + ret = rte_lpm_add(l3fwd_lookup_struct[socketid], + l3fwd_route_array[i].ip, + l3fwd_route_array[i].depth, + l3fwd_route_array[i].if_out); + + if (ret < 0) { + rte_exit(EXIT_FAILURE, "Unable to add entry %u to the " + "l3fwd LPM table on socket %d\n", + i, socketid); + } + + printf("LPM: Adding route 0x%08x / %d (%d)\n", + (unsigned)l3fwd_route_array[i].ip, + l3fwd_route_array[i].depth, + l3fwd_route_array[i].if_out); + } +} +#endif + +static int +init_mem(void) +{ + struct lcore_conf *qconf; + int socketid; + unsigned lcore_id; + char s[64]; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socketid = rte_lcore_to_socket_id(lcore_id); + else + socketid = 0; + + if (socketid >= NB_SOCKETS) { + rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n", + socketid, lcore_id, NB_SOCKETS); + } + if (pktmbuf_pool[socketid] == NULL) { + rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); + pktmbuf_pool[socketid] = + rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + socketid, 0); + if (pktmbuf_pool[socketid] == NULL) + rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", socketid); + else + printf("Allocated mbuf pool on socket %d\n", socketid); + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) + setup_lpm(socketid); +#else + setup_hash(socketid); +#endif + } + qconf = &lcore_conf[lcore_id]; + qconf->lookup_struct = l3fwd_lookup_struct[socketid]; + } + return 0; +} + +int +MAIN(int argc, char **argv) +{ + struct lcore_conf *qconf; + int ret; + unsigned nb_ports; + uint16_t queueid; + unsigned lcore_id; + uint8_t portid, nb_rx_queue, queue, socketid; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid L3FWD-VF parameters\n"); + + if (check_lcore_params() < 0) + rte_exit(EXIT_FAILURE, "check_lcore_params failed\n"); + + ret = init_lcore_rx_queues(); + if (ret < 0) + rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); + + ret = init_mem(); + if (ret < 0) + rte_exit(EXIT_FAILURE, "init_mem failed\n"); + + /* init driver */ +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n"); + + if (rte_ixgbevf_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init ixgbevf pmd\n"); +#endif + + if (rte_eal_pci_probe() < 0) + rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); + + nb_ports = rte_eth_dev_count(); + if (nb_ports > MAX_PORTS) + nb_ports = MAX_PORTS; + + if (check_port_config(nb_ports) < 0) + rte_exit(EXIT_FAILURE, "check_port_config failed\n"); + + /* initialize all ports */ + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("\nSkipping disabled port %d\n", portid); + continue; + } + + /* init port */ + printf("Initializing port %d ... ", portid ); + fflush(stdout); + + /* must always equal(=1) */ + nb_rx_queue = get_port_n_rx_queues(portid); + + printf("Creating queues: nb_rxq=%d nb_txq=%u... ", + nb_rx_queue, (unsigned)1 ); + ret = rte_eth_dev_configure(portid, nb_rx_queue, 1, &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", + ret, portid); + + rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); + print_ethaddr(" Address:", &ports_eth_addr[portid]); + printf(", "); + + /* init one TX queue */ + socketid = 0; + + printf("txq=%d,%d ", 0, socketid); + fflush(stdout); + ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, + socketid, &tx_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " + "port=%d\n", ret, portid); + + printf("\n"); + } + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + qconf = &lcore_conf[lcore_id]; + qconf->tx_queue_id = 0; + + printf("\nInitializing rx queues on lcore %u ... ", lcore_id ); + fflush(stdout); + /* init RX queues */ + for(queue = 0; queue < qconf->n_rx_queue; ++queue) { + portid = qconf->rx_queue_list[queue].port_id; + queueid = qconf->rx_queue_list[queue].queue_id; + + if (numa_on) + socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socketid = 0; + + printf("rxq=%d,%d,%d ", portid, queueid, socketid); + fflush(stdout); + + ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, + socketid, &rx_conf, pktmbuf_pool[socketid]); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," + "port=%d\n", ret, portid); + } + } + printf("\n"); + + /* start ports */ + for (portid = 0; portid < nb_ports; portid++) { + if ((enabled_port_mask & (1 << portid)) == 0) { + continue; + } + /* Start device */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n", + ret, portid); + + printf("done: Port %d\n", portid); + + } + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/examples/l3fwd-vf/main.h b/examples/l3fwd-vf/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/l3fwd-vf/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/l3fwd/482251_L3Forwarding_Sample_App_Guide_Rev1.2.pdf b/examples/l3fwd/482251_L3Forwarding_Sample_App_Guide_Rev1.2.pdf new file mode 100644 index 0000000000000000000000000000000000000000..304b436c625d64c53bd60370e0741d78e48e5284 GIT binary patch literal 61714 zcmcG#W0Y*$wk4c4ciOgX+qP|I?zC;&wry*t?VYym%-W~wzFXB=o%_C1KfXV)W<-lv zZT7xqA9IY+XOPGXi_$RAvci(=?awU0LNT$>_dSU1VSDL6D?#?H zRgWe0gK8WRMc6pJA$u-5-H{elTwZ@_PVISjkS=gOl}7{1wx@+@8^!SQbXV}ok{%m` zL2CyPp0fmewHa>@IEKm7rj=zhI;?5ae`TyJU5s*GLDh7)+#aLOgt%*yI8V8-K%T?b zcehVT$v($>ld(K6Ib+y@B_(Wd@m*il$~Ns@2s@4kDYnVh47)tZ4(fD%+@48Ypc$a} zOtj4G(yA7cU}Y?hGWS;Vx_;`Yn=Y&*Xc0V)BBxw_Si3YvMXAX|xay_hVdcwHR@<$V zpI7$cCS`FIoad+6bhuW!x0@&K*D}tg6Vk z_9de(coVN zIK$i__0ytB10oIwACW3)Hj$MuMKSdE44g?RUOB`zNE`N<@4k<#gYI5|W(>drOIv;< z=c$W|kziA7STCR})~{{6vZ=?im4abFS}pyYue>N!2WL&3Z1l-`R4d$cNY|XR9T-5w zdLA%$_(YP#?g#JQ4LlqI*TNeO-Ja<@+(Rp>0~aT#a>t;OoJUg>>JG!S%*9mX)us|# z%wt=Gx;^lE!_t&0Y9Jq%C;;IPnb|?OCiW|7U`rfigumh9R)IrAot>CpV$YU!EZ~hB zLSlj z^BAyd?LnqY5nL!H(j+gBSVVW*(A2NZdU1Hsd&h=ZVb=o-b3Y{=4ThjfwI%yHq{uTQ z4aiCzK5ROpa2h=;;=-!5v_&&ZUB|$Z;bD>S%-1X2Y6!l*CijD$eY9P{WAsX|!ooI1 z&D=DMsrY%o_DrIp1?{U2gOkq`Ptaf!6=Wda<9_beotwN^&kOE>>lO=Yb%1wOe0B~E zR2Fy`CYE=Qg?p*aghR`L7k76Jjsl^H0(4uir|(>y@`~&lV$tK1s1JaGN3rv7S!4VM ztT8bC7i}D@e^_JuPpmOEG|AJ;(>F9J)3d78)z7(3(@jw+)XhFmOH;&vL1K}E44;K6 zqZXZk6PJ{tZmi`~F|yJFQYg}cBqFqM2oC`WVhE8lL4dL_;@6X-n1>;*e&Hiy0RX`O z93GQc{q6Lb{>k(i|7-e;4FB!?sbd5p2j~%kza?WjQ>7D13L-%^T~PW1nQxUnPL>U5 zmWzhsJHCC8Zf|yYdPXzn0w7CB5=F*Sq|`|?*i}2LTAAiv0O@6ru6cI*I_-mze!uC# z3(28tb%-69b@X5X=Mv=5FT2Y#%NvL_D+f$)C~4jXn^q)wV$t0C$GM~C2Uq-vP)bx1@l`D zr87B&WTbcCCoPf~u#{5`IDRW62B{hp5=)#1aQntAtQZe_Qgzu&|DZMp7lR zZ(VT^YCr~jRa_kzo3lV_Q@8^Y^RkLh;y_)P4}LMrQ6T=&ihifvQqzdXnBuW2Bo~kJ zT~*ugLK~?+Dk&=2OFq6pYoJ48)HS$SWUUnE%}cJT7e)5Cc(`Vwf?g80kbkmc#cjr^d)DF}sP08bO* z8RX|dG(R|8BvIE014s?M;9%b_>~vkMMDxI^D3+dzTlh8sE!pqEr_G<{4*p=(SDvZgD+d9**hi4CA7& zngi&x@z!*Ar1B7G;k<4V4l9NsQ*!!94|UZeIi?w&5{Fq>Z}{pGazU0wUwE0^4ly9k zi&W(hJBU9*vxAb;p1o`&MhvkouFfjqxhzC}Dx&_XBt$X6E3~BbjE|=d*Fk}keZjY< z=i=Pj`gWPw&V*4g9GqV4KnpV59kLf!{}Jpe(01CoAv*kSgL7cv&?@e=RL7+vXs1!y z^t+)ZlB>!oB|dx7RVe{2JhahRDhs}$*QX(yLe8#zJCkwOYd6YXCKSmAOiyw5#A_mE zzP91swN<9FWTxSxv)r3!@k(c@7t3;~J3pmiBmb!NU?W_0cIx5fr}y_m{W$?%Z1^ox zMyLzl;CpXP+k5^~>K{QR?w{5Dt)N)`fuI;T{vAkJIT-$mq^$o@QYi{n8}taHy}x*D z>F%~9$TZ}EwmNcT$Y0a`!6`xq(AZzEnX4jehP+Q5FOEiRtB-;9d6I zdiVw9Lh6>Cog8|!1&Sp|9D3#bQ==xc5$ecVr%cpZX1)goHRwsRmZOOYu zvU{PTB6IBmy=l7tq^i{Lxs9xi9b1I7Wf@Lo!@LZ*lO1SMAVj|<#G|m9Wlm0t6p`rF z_yl*QL5pp~k79&=F{)qx_l7{fFdIXL4ZS~^(VrW*S&gAH78d$jh8xd5hv27=Ar+<< z)^lTY4!Ybd$1xa37`S%bAjYli!3y%s-_0$SM{)A!s zXk-g|KSY`trOW*aIWA*AgBA&jp+jEmw1;R}<=VYLC+j(5j$Wc#!Yp!sE!@Ne)!|iM zDtWWb=$y~G`nfUHVb1EZRNQhRZLeR*)V@N-FJ8hyaufe*RMTu6mwQ(!kQ+Ie{iO#xV zlh(`YiJ%5}aGn{K4T!I9eVgKD&! zK0-d+b%+?)=kHt!m=ocqg%u^3suYf|rLCZKA|{er!0wIcPrY z=~3deGw;5`(%Gwdet1^Q3{$Uq{!M&EZfF*i?&;A(gH>&9?}A$^IdT3pV5kLiPia3C%-FQu( zbZAUnt)n78@buiX;qCTbmIo~enjmMcabVM80^2B0qBCRZz%=?#OO7Ao)jwl4d4!-B z?pl(37}f+&xD#y(u8a*kAMg-s+P}F)ofa&$YyE*1@kNgLx6s)Be}jf3H73*7ME(=4 zn14(MnW!z`*hzF{%JSz5r~K^m3uSM^sEvjbY-sQZIjh~Z1Ghy)lRS&kqO~*2Bv@9m zBDt?29<#X%JOjz#lITnSCe+Hk>@38LL(Zh(=CSVm+`$Ei% zR9CLr?%~hNR4u~bZYy_m-}I!42Kdzs@@5gNd-8d4=3qjRtWs<4zswt3lp2##treLX zsLn3C8xM$FM1xR#A!I9Y(=et^svJFZjc;KO8t_!J!-Hk}S}eT%!SqN&()f3nSpP|j z;(u%G{|gh>A4~|swGC{m#9s)+(U@8cr$d6o+34X~ZeDB&SFm=bn4mx%aPRgZ=GUJ$ z6x=Mxl64wQBe%ul^Rna8stxhu^i<~O+Kjt|Poe?H-+wS^|1V6=zYqRzOnoFxe~am# zlyVGA|4!>!S^sA-_a8UYCY24_1%8BVxwX+Fe`C9zDrFTxisCRJg&lBd!-ys3a_dqW zFXUVZ9xjI{HU}UPK?O!8&vd7w7KU|5im3PjkNlt*oFdc_3~{0=DT3;->QQt5|F*Fg4 zrAe`f(-TS~A@}`@DjqYjV|SI@E>)ppZR1k-e9PSBtx}s#o*+E13o^mHVQPD3phVx$z%K{?#b>sZt6%HzgBe(ePywe?UQRA zd|bJorfXTUk+fdj?p(rlia94eS2=;au%Ygx3%wv+463QwImj4yp9k$dMREQHn{FQh zy};;=07dKA1W)2rtvRfDZ(}=o^39XN*g)OF7T9*rDgBr^L~N1q99Nx+zK3O(`%}|Y&6Mz+6JKKp-)zEuemmV@*NBEBYH{4*FD2tYQ=!9O~nHPTd* zY^(G;XTQA3+6y96Q0bcJ(M0b!_9FwMjmP7q{0TwN$q|o*rxoW%K{36E$&v>7Y0tBG zE`J`$mo`hEJ3^@<+b^mqP2`)1Qi7gpzNPGDm^r1LANKfU)vO4^D?ORr%j^Cg(OVdG zP$hfy63R5vID7xlMh$M3&>3xxW0A{EEm+Z;@Tr~nEu*xgQfM>M!nMVE+Q9Fu_3?6t zj)2$?(S)vGC>)B2!iZ^%JeEgK4g(c9@V_pEc@X445q^JN2>CqrYP*>+jKqnw3rlbVP z5Zfp}K4TC_n`8=-7NiF!=0~0sFsRYbVGQvX9SP>P1W6}PGYa4Ul0RhimrC9!(pR-j z3|MQEfim4^#Ymgdx7BX}qB2%RY9IkSGR^`%Qm#WiI!z)6b_yv+Ph)EB>(g!nRTqVJ z3d3)DV`w6zYYS3P6h=v&Hc<{0#T)co>p49CP1F{ zhYWefgnsNeJ^@Q=;%PvqB94}RLZV=*5J!K2Bk|EVL#2EG(_(kP#)d`cG}uR4^%0m% zI*56gbOhpoC;~KkvMX6YrvX&Rc{2(n04LOnzO4FdZmg8}@Y)`a`_jSC(r0cz+viVb zpjcnAug}31J)@DYn~B}m)6XR>jGCUuLiR<2mfcV`-=5FItFU+aHNEafgh2%u;Ctq9 z!~$HAVDrS6YCouNjOOx?I5qxuf1p!I;9*8LWOrB!QxOY-Sus5!ioOYSOLDI12-t&N z%SF_^i0J2Gf|pe{y&u`;U3BT%(a*(wkk#FzxiI3G;_MS_@*$O)p0TPSUKW?#Z2py+ z?y)HhQx#pK8m>ir?y+C~wvfbX5ND|376@+oHKNG3YZvXgOc6_yY;k@Yg1`)gs90fK zHkv}i@xL+2N%+hCp|*aZ4DtC%0z<+iTH(iPNN7TYOEf>9K#vJQ!6va+KH47ta*r%dY?xS#X|RLwj(12qZ-jYd@TS(u_JNNATuLd?N;d0E}G?H4=av!@PF!w9otE`CK`bJ zGYZ?%?523zqb*YsiRJ}D^0 zz(DbGVM;l-x6O-IOabEZztEHN&+hZfIaLI2xNn<&jk-iw;mEz6{BfE!(*|TrQHL~^ z(~7(bgxXd0*PAddslR>&SgPI;7C|U|N;oiKj&MpyiD4j#hL|HAD2|25Nn-lOT0nj9 zU?Yq%Ga=xGg_vUtu@DMpoZx*Sz<%8f9v@fS=bZFvyCYZ*OvK>?Ki~C$pQQ(N0YmtN z&l47W6dT4grseXLRmM-v?5nPfk2a5cAm}a34)NnH%96qc ze7`HR!+n6NO310Uv7vy*%b2KDJ#yK0un7@nfM;CwNudPh#YKAyY!5o>m zMQ_gJ3bPd+n}1<2XE98hqvRyPeeynn|9PoAJ zg4uoDt@K_l-mj!>4IaP0r~Vq!D_3>q26eMyT{jZqE?+v&;Jk6o*0nLSqMpd@VO{(f zvsx_H;3_CkTLcUZ#?y#i&tCjE>x=XjiELf2iG8a&6l$9=Egl}^I1Bkvo5DgPOLMiO zQfe>n`RHCco4%YJfqNg&zBMBM75gsyngoxgyK)=RN>~}Pd}&*F9$@3lP<3t}Tg^q( zkv~M`p)#K9wUYc2_O>*xuxVTQtNLK}CX$g2!$(4sY>;mJ)p(U+zq}?a=lfZ5x0`o& z=*zS0oD{Ltm+M8v>`c(9)Bd1k=H3pPx-BxiH?P+?m-e<(3TdDi@`-CjwiUR<=hN_@ z-Zj&5^Fr$k`hdU=#ngVoRbbWnNt5-Qb@k^~NL%p{osd zJxZOtqxlp~Aq@-e+OF5VUy0Yff0DvD{m-Dk%~1YHx1RlvxACv6kd6H>o8wQ$@*jO@ zDO(Z-h%P9$FhC<~ieHwpaZ6szY+%eA1h02$Rx37xw5~Qbq`7i z3-Gvv5P7?l0-NGF_gzH72evDhDNk8XSrtWn_?D%2#jLg5i1VFE{?YaxUXd=aB3< zk+&*bpkTmh@&9J;$#+gLG)NCb-n=1)EVPa^iX2%tY!0Ma3`=Z`0UCK{C+n0BN5Y<& zk=yoh!A@T(z2@@~B$zCl$nCYW>($ex#aA4i)}o%feaB~>UiJ5XZ7cX~sm&*E8Oc;H zW0}_q@8|#fT4DGC4(22Wz4R3DX0;h+*xwC++ihZ&6%P>pHzhN@KfK?;R-rPVY2|D^ zC38iOD4!hC)eJoP!uDsD>ThBjy%_%A)2C(hThn|ie7-%R+oPx7Y%g9URd@KvpLBa3 z49ZJ@<8^0ncXx1iHt?j|Z77^iC)v@xx^_LiziEHu)I|o7T@dbKSYg4`#(FdQvuUPR z)NaXCMV<)dE4x-pew?c~1nW?>nYqxu<93GS2ES;8uKcMPD5Ag#TPx%wK>0EB^-Pgc^6cp+ zF4rnUT|WA&LgHviYl4Y#^{LQFxK`I>S?Jm3p{`(*7m7iw#d2d-W%9kMp^6sazcKqK0SPr*}3lmGHbLjfR#=*!_1ZOuZfR)|9zj`%70%`{wrj#GPAJ#pGQ?DwI&?*$C9_ssT|Gv2=c?& zc5ot%lTYR*2oKL9!ZqZ*#GRFhv5GW_zC%8d8q@8voUXbd6VA>s=*T!bfs~kmiM6-v6Us|aPIvl4ytR0>vG%ix6Tarvjx9Qelx4%x7&2ZF~ zB^D)g5bw3J$I!f97B^6AJDgrnOHIxSny4G*MMQK}!Z&1AhzSTFd)roBZscHIM3fhU zsrCsg?$%}W4hARIM6yII6x$-veK$oE#U$Y3OmaFSk*^tNQN>)e+fUa-GK4J@+R=^A ze^bWlB~9xg$RK7B$O%aNNndkLvG)R zDC|;7tdR{pP|3sFh$xHFQQnmy)h{(uNm^k^%`R|WRVAg6JB$+nf&nED`+=R2v(^yx ziyEBA0e1Wl&aEFb3t%^14^PeeZrBv8fccCLI(_2iHJ@H zCtW<)>l=vLj3Ul}t8HSQJY-}{OZyiTHIOaL5-|x#>JIh6e7RC8u>z>GUz{=FeOE*~ zMaWxTQx`&t^;USDb~d0dP>&Hd=(t6+4(+LMHWa&DTlxURx1%-Qq6?X;iK+;sv3*@E za*O<)!yz7BUOi9{Za01{12q6+(1ky}j3xFi63b%zeklYM1e_){Q}RA!525WX3Pzu6 zK?QBB9)Lm)WDmMI1BfUQVUOA&P*EdmzG!#n&0jBdrU{0g2LgX*cl`6**0qYI(NTp+$%R^`oR zIg^|3Azmq%yc#Yy=pB*BkON$%h;zUt@>={6#SDb8KVM#e!4k5bpx(mp19c#cicfl_ zqq#?zhW&ydp!5v>viiYNIeCPl_8#GAE_$h@%Q-cH79`9BDgwn$iI2pg(guR?QVoBW zTMLap$dz7y`b5&~-Y-5me-!4Ef*8g&fSW;qM~snN%3^ICVNk>YH%Ie=5Me{i*y=v( zXt9wJ@?fMsm>-HihU?2pMU&3@>Li4L)Wd)&c#j4Bs8!XGgK;K_ZOKoJits+0LiF=1 zI3pGUOArXH_V|?r^g97=*7-*1u^g{c=_?qK79n9z*v`qT;iZJN6;T6MK`aHGs~o)p zhM*zSw8O2=?f~L=g?PNZZaYoG%2F9I2)js69RLEpQ}#9$z^wenEWaneC99HA9FZ=C zS^}#mQOob?ng-DGplxex8hNqMbs)Ufep?^`?#kN`my{`L0n$JQyCz@m@gu51$jQp# z3nhTR0$Z-Dt)`yxdRt2D3KHNMJUGXZ&O$;P9eHv@DhlJf>+YtHjGLt~tAv?{_6?Ob zA?KbQ3?(LECUXYwgh-QU(nHTi8Xf?7AH}2pJnJQ{|GuNbVUC2?i@3YdMio15|IuHw zF7oRpnJ~%XDMe5Sx1F!FAQ^9ruKcrv&@T4ODF3}(TrimNmV4|fI+loS|qt+%zUq9Y+OGljMV~^;uqFf0m2fK zw1`RQDbmcIkvds8Mh@>GkY?_<_Zo#vY4!3j82ZoT1T>vI-64&k_q63e?wYe@GX(TGoDc(f+I>52H`}`}c6jnj6qPEm(P=+yK?8Ay3y|zdlx8DTO~1Kr`dU=~sIuS3IBXCb~Nz&iizVuO>>|GnpF^Zh6&1 zcDU8~+7VM!=phT4-5?RQ9OPN6j%uiKTL|_nF1Be)6F(h^#8(E0_$04{115L6_IW8n z+Wif=`#$*XZ|XAHuFlULSyY}_g=MV)Zou@+e^6lcW|8yh40;m zq8P{`WUC?y2)@i3lyE-4MveHeQkO{@l?cRQ;T9&1NK}q9yT@^LJpOW-oaK(D4n zrxR21_MOF%5*=u(19|ZHL-3&_AWDMVsi^z&`o$`JctdHhlF3sXfJr&OxnoG?0$11n z>c47(`ME1XNWHH6ngZU4aA*jF1zp^Cq#%!WQPSdVvXM%MYlcMl3lJLv{rmPCN8(6} zQbzcC$g}>WbP0K{ShH^`b>rPio$t~|I#f1r*G4?C&0!FWtp__sTw)XQ!RC6`ga1Rq z9yoFQ6N3%AT&%tGeWMp{FcQ=A-GB`J0kXsl-wv#sA*&L`I&}2GhP`KqcNagnYf^6r zh!^wH&0a(qQXWwpWizzw1K=#ckAh|Peikbhov;GmQLwwS-Q}lknLr_18wTt&AfbI= zuKBZxy(=Gjbe4c(ori~;_c1*D+bj-mZs_cs*<7K|$9$S-&LHuyPawH%NJZLlk!m*kDT9mTZW zG(xTuIFqPbg^ceQG21cxOc4BB#upE{DijD(?N$8ZVYjX07Qxf+Ngh!9ks=t@wE%?v zU$`6a1W5vNubz1{#Ql$bgK$Q52!D>*AO_}I<0$~`Z>3c&mY_{5?EzCoEe}W@UDi>d z>XjEc`UP)28(dk^Kj3Ow$`-x~sZ+E!_;S|)uNblt@yvCf4$9cR4b~SG>|Rcd30DYX zgZhm2&9tYi$()8iw3C!mBW(NRALxS?UL}j>_`f?=YHwFgbXtrm*V;6LD0TU=^$%*+xwW{vv@Ehq1ot==t>NHxKzy{E5MiZEq8% zseNd~)H+qj75@)l2!EZVJ-s2eh7;DVQ;Lkxui2nV`5sGT=h-(iUQ}gvHn`F4Y&jPH zdIKGVvs5%%a+`j(Lu3)%`d>dOMT+etI|z6wh7a{ec59vl|L_ zt*QnVqB?3nD74M;#vz9xG`k{YoEW7?B(wkP5;U`x6??SDnIYI^i;tX9^b5NBg3`Wq zBYz@p&yc2e3EU%^bp0?#6dC+PRJCKs9l6sq@ z7ihw`lf}>4874e*7amK*&Jub?^65cicK_4R@O?Jc;M0=XBkSA1Rz}Y0HWXTWyH^+$6HKk-Tdym>Hg6<*}{lfW|LVxdi+Ne6?#) z10xNCc2<~1lYJ>?K;A8^o9DWkqKiaZv;=9mVSx>Gmnv2Sx0|R-ZK_x>23!w6M7w2W zqf>|F{%ze%*H(+o$rjnC8ASTGGkJz=WA|;OW}Lf|JJrA+Ow}dL%1W$tUSSz zzLt4swzhO&Jrj^EqnA0Xs^gmP+MDTN;@1`G3cT!z^!6BUwJpvyZ(Aqe&J{}c|K;{dUl{%$WNNmJjHNwd(Y4VI`1mLO1Jj)cIJJG08lIMa_V^?b0_ z3qT-RK+D$mn)^|6Ge&S76LO}gNQri=3M+XCM(~*Jg^HDT_!t@z$EubqPh?D0z4L2X zPp|}#sPE<%0grENHn_5;`qGcEc)uFkUU)Fl4kb6+oni-y1|bmp)Tgtr^)_XgkyP-7 z&x$1@S95g{`+>sqb%A%MtZVqSLe@da3B{$Yh4n*8%b*%i>LRY#tpu4xA`7I$`i4e;k^ zT%Vt0*H@%DtF(iJB4<&r^=!X=Hh|Q1Yz|$Y8+|R$ZDw;N7|I3%I{lMdH6^z#r@ZEw zvzS4AmmTFRS5ecfA`ia$QnxciXtyL#U_ig>nE0QmfngvwY=HZ;e8Zle1Sl0`z(tbK z#7=kUxm$M6#BnYNijJRX<281Q%RS16O0@(gs|=F4&W`*b2R<6>^hcBBaLV5tOv4N3 zhy1;_X~Fj=`nyqNe<8t|*FRUO~p3V4lr8K#ot?k+sQ6RVR97kY-poUXPBjRa=Hyntu4Bu;RL>B35kE3EO7E|)`(lN z!)tg~A@@%RbH=;&zG?7JPhFMFBsQ-vEqIx+Jda$qJb70DryUL)nK;(xSJ@K#ZtP}X z36;_}{_@C$Fr4%0Zc;>=MzMwgs27(X& zDE7zE>E1vXIVI)VeVPHAl}VBz{w|zvx^#6nBDUtmmB5%?;Z)0>2DR-MC3zl{CSRoC9S1 z==znF7f*>-67*GOOSUl)_le(o{S0-pB#y{z@#uWt z@c7i5I@!&y>l&ARo-O@wzfHGG-%PHZ{D7QT>nNi5{2xY^Apludt?){i*TdB`(9@c` zH=!NFs|U38isOZ|E3)sJ2gIqfZ4gpeDz3K-$Em!2N1HnCYAaY|(SGq9TsYw_Uq<&0 zO$`Vp3m4OxJSXD1{;7?$)%ecmso`P0Kk@^AdG}fm`hVUlB5h2QdXz-WD=WCEC|6TP z=zs1HWxvMD9hP2ZDezWWAK6wa+7O%Iho8Nd!?Ek%Gks1I=Q0*n1T(2?*6CRNAWntB z!#vG@STlXAz(CA5@m0nl?%_=5gxjq-i8v!tis4J(EpGCK z2KTj`MoUFe#L+R7&ZddCmKzOXtaU-H`$)j%Yt1;?U5Ojgz3q*J?LhCL(`d5A71bHdxs^^eWwp&62eBc#L^o;ssjdC6~ez^6OXPHSPD z-)(E3nWq#D_{z8AyP|*{)gF_4#D& z1S%SFJYJiRW<9PsVe_G^TZ?t{w6k1GcT^Ay)}#=AcMKH7RiAxdh^Lm-qd6(Ja$e3e zxsw&6&`o0V+$o*a8BI3+vk$Eb9bd-soSD+I*^#Q9>_>cQLQk9Jwb4f~;dcf z77s3y(TRS&CVE*n;dfQ+n*1jbKHjG++5O<@P{1-#!JV+xRdU9f4$?<2Vd;&~>_D&A5l&!I?G+8p8 zucGbO_4j8f=XiJq{N(T4OHGU3Pb8D4(}j%vS}H5}a&BI_Q>fOK9X`BfY+Ubs3{$Kz+{9O0h~4b*&U)$rsH1(3nRB&PJXvONmv zmKJ_VK3-|QA$K!39XYqof!WskCR$-L#4CQ{qtO}JSZ3;8vaD~GcV|p(jsLE$u0p>b zS7s~<0(#Y$*UZGG*sU0g&~IE%7s$!@Mg?mJMVDmd0^s89s+-?cL16xZnRk&I8Q5X7 z*YEWv`pk>}B7AJHByQGH&J)vI@4@UcwX%b41>`Fk!b1{Nl|1eIBqv*Z4I9Y7;cz5~ znam#_@ztFx&tB#)(z~{26Rl+B4zaQV5{RYcjkbIQ&z3pj!XoQP0gMGoN$7tKF(u;T zsoS(xTL{}b&t}14?N9$MjmA*9&tkqc0~0%F|n@{#x3dycS1V@!}V$${594>AFL#sk$^nPe|w5)v{(a6dt>#C z4x@U=7#vJ^p0+y1lo9Fl`|+K+sz~POe)IFDXrU1cqDmvlGSaN)L1eM_ksY3;!(c=9^$=@|~~fU`0cCMmXU77xCvsNxn|=C`LGD6O}Wnz_X=sNb_xD6W+>%99zVW9#!tQ zIUmi^nV<*5m;sObEyZCQn5e`)E(?iM7rHswNNz5El4D4z8L8l^r~N{M(S2o_jn>>Wt)@Y~qH`fi$J6L@@(%CGd9b`;YyyP=Y)5 zH}=ba?<4)oeqmx_VE#YtmkZ5}1gus>?_;$chY^#3K(08p;qz5VW3iN#WN!f?@XN-He|?^Ok1KgGD^I6u!0v>4Wd@HD1alO;Bt(g?TRd8#3ozAFFOi_ z!JPz-2+|8*C>=YWc2Raj3YcTybO`oz#9Wx*DIBy8{x|5EjvO}gVb`6& zdzSy=Y7w`wGmqyq1=McP=Ps!56WmZ8_Pl#3EVbyG^bZI^pjwI@Mga#@&-~OHc9ZI9 zd~lfM$Y~)~ZDW02bQ)pA@=z{Vdlo=%=yuqa028%3$u(J54CZTbrcg8=?=w956dF;I z+~#1on6KR(Xvo1O`m6y%#HgIEyecqU035+*{1fYb)EM)8*_LgR6DDT>>TxxZ-vsDW z;`AOn*Uq%351{0rbRe)NidYCui3%-Wa1yx*Rg$=tBb0a6=8p*0c+q4Ef^>J30@bUo zq2<$%!I(5nrq}>GQ$=fmi(oKdwl2mex&F8gnxFYeP7n;3n+~TsvsQY8;`k#7%CX)U z`!#MuzPKUxxCX&s4~t{oB@nZA{#>MPat~xJrGhN00bcl^E0oH)CQT~(ytg+c+KMHSq0#EiaRdA;^zyS^1`?T^kns0 zf%pL+afs97h?(PqZg3Sl`9LC`Ab=-C!sqDa(<-u%I4Ynhnv$r9Cg8Fg%sNT_Xbkk}=G^!R;H{868xbm@f!yd)mD7$wXmIQb#Z^&2`9%#&9km#bVIS|)z-qdUIX-N zA-Fqx^K>=)xH6z1z0}tu3x00q9E?bf=LB&ig%EYjKw>cv1g~Wsz@x+_ao}wjdIs6G z=V#`|!TiHsf(|=@-2QUx7_q&N!d%1z7`5~zzK$?1^FiWnV=~z7#0E>C^Y!JVCA3gB zC(1v-u>DMhW1v6Nv|zMd(&|mq+=tO0kJynG9J2PteFO_ummt_p=|inmW9`dXjmIAS zLu&5$eOq864bfYd4XVx|R~3q&I5a^s%f&_j>O8=5TVW9$vtvp973-Qa& z)8X%}2wk26k-#ABHpZ}j^|;fycXKk2U(FPm7%bFi_U-;4n%5c&5C*Fe z(kL|y)rPqOg@0oEwxIj|DLQ@IHc7-Yzo!VX)?Xzkr!xu1^ytj8(NqSX8Qh`XrCHU| zdOm!V_tnH*W)bPps34d1Hu&?|qovbQpj)G5R;00AVH+T7s%oi3qg5$OYpR-Boo10J zy~8G6BD>3F%90}&Cq;{(8F5Zs8Y@M5Okjk_mX22XtzKq7F^d_z8e!aWeX=VR;u(W113 zgW8P7e~G`~nb-rG(ug=mJN{?k26K#L|GavtERi$xA;FHP#^1M*e(-BilBxq|C7%5LAZrIC}CpBzl44A(Ld{`b& zvl@s4MjV)!LG12!uq6gPcr?e0j5Thnk^}gra|SSQ8FS|{@M=p~b!93egBY>RbQTEU zbpt0`$H-ey@78#@E70lN_*kh&eTy9mGcAo7g)2-)x{<~tqD{%EcznM(UvEa~Tzl0? zpHnzUL4$Bei-ZCtt2IxEM|!O;(Cd3%ohFQqRGPt!me^`FH|dlbtFCo(MJ$)+!P%h~ z^*r1@Itm_!(1x@N$;3gE--+N2sIPV^xONQHGvpBC+t+IBQn}ZVpzszYL(!l~oN?m|Dpr+Q``&-+y?_&y6}CQFH@;nXY|rHSBIS1cqFrw_=^kEoiMg_s0PBxG(Ri z&UI~y`*&hUzlX8dqI&+el0cSwD&4qcbLtt)Dlo-lT5@_cpT5qpHyoPLT>1D!May0L zbWtAN{AMCP*;c%pF*;q<+jVv~TSC67&gd~$aAs-zgno2u9HOym{xD}K&s^78x`V=&U_vR~`O6wfJAYVA^Fd=Ci z2DHe`egm3YJ~DmtYN_q4IjbPssBTK`Id*wEtnS(joWM_=rZ8f7&?w~He!ks(B0?v( zS9v)UODG$;(n5=gKn+(Ad(mBh%Q)KBT{tZ}1p4*~~FerTd5vzqxSu-U#L6-Sin3FCKq>%6nOJHs>z} z|IFI$0z5?GyXkHQT%vRD1w-!T9{sohTbvET%IKUytxfTK0_jn|g5UUg_CNSK2j)z` zFw4eC$F^-d`C{9))#=!_ZQHhOJ007$lkJ_It(x7MnqTq0_td%fv1c(gJ|ifIeW<~2 za8uFq;bd}(e2|RyVB)Exx_ZroGO|sVPoXPt(?@Z}ka4V!0Sy*r>$PJ}qyV z>yPZ%h7QSIU+sU|bDq5Mve7OzYgORkvU`7`i9pQAs23^>F=O!EMmq9r2)8|j{QI!< zrpjXq`ey_C3=+uRrYJoNyCseae8`TxkxSj!T6`kMtdNk%(?D2^%EPr|BBK>r~j@lflDeE%*;> zr!JPBJ(8QcaSoVY;4Q(TZ#YNR#Fu0KfB0&8?it?R@1xB2@LI5?uImDeOCA~7FsJUU zw{hT}PhGlhzB4-lr{n+FnsdOUEQ5oD^c=RO6CUfx6%*qWc%7zM59_@xxnff(SDorg z4o+}-0mi*46^aI8rAHb_w)qVs7Z_zFi8@jl5MhyhR$39%*sPS4VtD;?WEPH%k?w)VKWoZA7{LBe*z%LwzliN;-_V(R#=dC~+22C4 zK?Oi!gP~20kM^9uZN+Bg@Vf8AE$>u5e0Xx?p z6_L{5@r?e-e7H(3nv>~Gemsazb^Q7zZ8o&|gO>=qgO6S+4qPIj+4H>%b&&tomfV7T zBgC08vWX{%S(<HJJ${}JD1v-Au+$4m5>W<=} zG_hYgnR#FKb9xcUx%j+0S)7#uRqo^nGHTRa(1H8UcF&IK*+{p0PlFG__YX89&`(}t z?thb;{^u6zf957OMyCHqTdUHVOgIp8*?Rb=tqIHr#UXT!R6ZvaTeO;_7G;!}u1?c5|SBX)Z6LJO>F)%Mh~MTx_ZrG_BY4e0^h7w;WK@Agfn& zQ70@>M*l?emP@fURfaC#&{T6DrJ@EOyj-~V>-Q>#s#Ziykd~{qm$)YHak$-G<&NsG z6%O$HhN(o>KCDrY%teaGN)kv&t z9*Nb;q-Mg3p>2?o+xty&2QRYIT*-H zb=P=smCn>eG)WgTi%bh9?DW3AUL=)Wk-$2emLk{CMBKBG^p~ceKjla=UsY*8{J=O< zHQhv2bdi+-H3e;Cp$4{tF)uDKaL7Qp@qUq*N(aKn34GcV+KC7}4GatSOoM6q+}^Z) z&TPmR{OMX9J)=Rc9afCSzKlKCA#UE|xk{BZ$n<#-H0+W#%#f?dk$t`nbG!s5M^GfI z(hyr&z#igXjB09_NGk&4$}+&c;H6=sTVN!JN;#rU$SwU<(usD8Gge)XM!NxBnIxIm z1U3z0iS2h=O2#*~Vp#&MP7cA%c`yh910K@Js55B9g{jL+sL6)-A34fGHkgi;7^b-E zE5YE`aDPNF3P5je@=1olm6K0)qC$fYD1V9x0M}3iJC=T5#(EuK6#ma03P`9A<6JR$ zpk>)c!{7tC1&#+#I**XfQJMrBQXflkPuty0ka(v$133n1l$7)FWpln&5)@X|oniAF4B^%=NILr^ zHfk>%qleu6ya*i;TwWov(O8T~OjF=r7SPu+vZsn^-4{}XpzK#+C$o;|motr`on0E% zHk1z02w^7VM9YFDc)$fIlTmLy@#sbP2D^6l-j87zR%TB1&r!qfgcZ2m3CQNL7XVdJ z3ugM7tWPl?lMO=9ga7wA&??o}2sZMQv9LeFnEXpXg2I=ZTIxug=Qw6FB%`Or52|T| zx~)2u$Rf@v?55NJij`opOJK*sKfq0KVZz!dgu>-6$aE|CFHsRjG4J>!Ylsly-!PKf zEnvoxqY$qXG0u;nUn*OQImr+yQcb6`J59_7Gyu8Bi_yn1{hsf$bTf8xdkDu}I(x|M z;*5>-s?Rm6`n$zjkJ>{kly35ep4Cm_f`z$h4D>6Vns%bmy%N?@QyzsSl9WYSt>0{c z=(%^TLynD^Es>298x{4BERmOZ>4q&I z(p_hY&Y0cj?3ja_KQ_8PQkA^%ghSK^UwR8gEtyn3$BN0?>`SIh06{g6!5BIw`59%F zXE=l~b`xz*Lxt>PMAtqk4KMFzhO`7YT(9T=#bM@maOfmv4CS=_X5 zYw>*bbc;yrvAm!>dNnL-p8k8b(^oiZQi?AeQWLr#cR*gsJwSc{c>ZU|wO4n3N{Jf5 zAb>>`F=+ipqaU?1y`rN%8tJ+)^F;=p7!%1bo8(&=RoGgg2y=65?w7!R!V#4ib3`8# z@^&O{YT1$sybGMuH}+t1F*iSCtTg&2y@cknWg$Q{l|STq z$77Zii*^HQ7#b^Fw|1lKp9>3ocI_&t31LaV40f1LK}VBE#g}>5Um95~>SW88K58;`_3l zj0I^RSxuLl8n!~Z`4VJg;fH@85g3jVh|%=b^oPh6K*-F(6E zis^U&nTZtJ#3vOIEEX5pXxFv^McD*QHMbvymedsGvv(u*|iwC9~)hz!6_XBW;e)K7T?ct@czugCgd7_-SaY6f@#Yz>wIel z@+pvFL1BG!prqBB4 z$Qc1fkc8?n|2*IQlKiJ9e2;FCs^pbB2oS|47ajXqV6MtcepFvYaG;O_DS|0{^a*mQ z)LerR6MLR6`%IL9ANt+oon&OZ8bh$p5dl)*#uzN>r%ydRTVLFP#CLN6n>G^q6)APF z-fZFy1{GRipguf~4{H4jSfTJ` zBXJ3GX~zXp#OILF`7$&-YL6pQrYGc!7#959X;q<(M&KI~1t2%}7cC zk4_a-T|92_g$@7fa-W;2(bwD(D{3dUI^w9wBVqpaZ*Ja^r%NzW0(_nnazvtgfxKWP z;~lbT(V}T9xF{EANeiwMt~T@)X+^UzBP`DFI=GZDISkTAwOhZ-YOFE9t7O5|svIj# z+6a|hXa3M{h~n&{|LvGSN#W2D66bHbcd3t=YZ1MgPkU;&6c!nCpD5g2*Q*JzZK#6R zLh>(lqfD0y@OPEn>`58botM%xomWT2_pUfU)!mF3e&cR6am*~?$;i2!;ItwPrtrdZ zPZOdiJ8njijI@Lj28r}kINib5dtzCqC!y2!nEM0PyrUB8)a(k#9LR<}m#{O?($IS! zc{x#uAqNNp;W>z>I|8H4ho_*_R}498KJxCToXep^)36RM?y?8j8Ryab_6LvYbNpG7 z%vo)ewCxE?nZszSi(@3t-HHIv$eqJC7^Z5cS?G36@{_=chHP|k^RwMEd0D{w5OYO) z?H%0nnOzTZzGwEoCgm*g34FBFjAgG>{TCdwm7t;Lg>f;0Mob4W9^Hh{-F_fY0EzA_ zhb`^|(p!054o%oSnT>90%@~?Qn7G><4x3%3BRAs&qEN3wu+-jo2LRF+{wqYN7jmPr zRS7-WS?~L+RcW$Pk7Y-SE68WUGIxDXd6lo*r`*8V0{9rFJWjuwPoG9dr_5#@uygZl zXQ{;9jstx|#p>r+tsDL0@?hamAhLHkN%-PTCJw{lSEI!q&JQ^Vk@ z_gXAFc(sx>*)t?~M%nKhA4p*9>y-|ix}YDb!@{^Y%TSS|>pBa)2#aaZ{^0#80la`A zUz;4c)Y%@<<8V>i?5+ZZstyty&=0Ybvfi zLHSApgLz#5QNwuuwvRmhonm{b1+_JAYWLDQC({-wKm>^8fPjQ&4tR% z?@JRmpMCR-+oIRmyRJc{OpoFtp$qhoS*M3a{@TouTbCsjpX_JLM3T}>&VOY#T9>~Xp+cWhuH+dOpt zaPd}ubwRTFW|o4BQK3?&E4oGQAC~lg?iy{uID6B)79pa>u(sMkxN2a2M?}F^gmFMu zjoo^-5B(7*xbCKB-x>s!Kl=BgWtqt=H90;Uu4%N**3Ng6UQm1!nG66Q>s`qhc-*TS zH*ap_Bs0BBb2_AZM@#f~jd8?<=I`X<^b$mQvD9r2@VuEJDp0-YLm{`GO$M(tjCa2` z^r7`VQZ9Ev*VP6A@o!Cx@|Z-F;H+tS|1#KO8}co__e`j%j_@)P<si#b; z71sVUuuLSP=M}g+Sex(c{u741=a%sa86Bh({H=1)@SKjbsg&EeQ**)2f8KDF8Tt`Sl+z;jsPNrIQu`#34dX59I}fYV`DWY&3d zIk_BO&__|5*hpMayTak?Z9}#=cNcvFd6YOiB2A-5-$C9ldQMmXWsYL3SAKfPN05wb zCBI(w{;{)eR^Zr%Cd&K!r4)}El5N#R5Ss5@E6Vzh&)@t8YnLa^`)(u>ldSXKG~vOP zQubbwm@|uygLBs=k-gf7s6;vakdgfE{9P2LFVd;*SLcrU1jXNcB-bFj1=jMwl1|vc zq*IYjVq&r&WDiz#UGIm3$ev9;NnRH_zT|tM&zLFpTq!KQ8pa7T$O*H7!@dGxM;64L&g^>Fka-;fq7W15?6O5#?%5W&kBUH zlroyX{t1!nq)4+DvNlvN3JSaU0pR=J{S=c$DjZao$Dj5e2$ClTSY3pl;^!VfwzDpxm%3qzV>){h zFXg>PWeEJGiZFJZHb+KoUrN|cOswR{#}jn+>EF^)|4mEs?ajKm4(9%Jg>if4u%siP z)VK>tPT&eAgPp_w#lk-iQ3l#@`sH#ipha8Y&F%#SGr-c1QH%b|O0RiV$Xlj5=+Wcz z1r%s7#C~WQC;{rN>=SF$5PMEEJ9XuYZ4M{o6I(^reW*ebEOf8 zbpcYRqwc%v7=aaseXypMW6!{%g1?kHabZV)oxvOWm5n=;Zl*C3WX*LK6(2S9mg{Ji zVE^sZdO1WMv*VT&@wuHPGP)>PG$4^Pw(Bri=&G09MK8yh_#r!^xsj{|#J2ySb3p^0fmQXr@fyBy}!TfWiMp;nzp?EKx1m{B;KrhYp%{V~d{eiM`#?1N!1O4KI6R08J zYVniSqYw)S6!$gx1%9^RFTvm*|Hton{lOz%>HEp{>3^5nZ>PJ`{~Nr=^gnNW{U_gJ zW#VM}uU$3Z{e89?$KZe|`FPa7T~f*Dh1b@x z#0nMf&NjWzjybEsf%F19F9WXZ*AIeapZJObW=Sj68p~SduE1!D*-#y}z=52WVAXIs z2lX=2*@=a+{52GmkhE7-%jA86>IM}GE*F?=3RAUmvr(MRVWrNY3>`W)pQQzr@NvI} zG3Mr`*WDGcBx08qVMMq>JqX@|w42xA`(spEhqEX0W(}GIiLKt>P;Dob&MGNA*F_-<%J6xy$!|cbY^mDD%rw_O z&QI%=-{*kyZ%dgLvak_>v@!%zAao7@rKP+FT|Fj5rQa(*mplhthbo8a9&1oT5zr3$ z%}P~i^bMTEJPO8kn!rwIF;ThD=1Honcw0UoIL#@_Yu(_Hf<+MBNJ+%tP~oR#e}k`F zZVHV|XEMzT)TfOsi;%!>z-X6wi281xC!7_*ud>V)ggJeAcIrJxCN~g96hTym{~}ej zI6?G1S!cs5Jrro0 zKam`A^%`#&`6gZ3FHoH^F!T*9mcBl8ft-)X7?#ky927vTin=1XIJ&TtwI?(By@{v^ zqS~0P^lt5d)}W&VFfdra0F$#Y-y@KmjWbBROE?#t4;*bUs-{^(z=GpU2AI7Yw40EG zp>PTJ&#kI_6IH3k;mOcncXS~Q4BXxDKw$xERgFbou07@;5K;Pp(SEgZ&AUgK!6fnr zAiSDSLrqUs5Jn%q9Jyt`+qk2!^sXj+kbi^^SWTucoFpL5D``_8ofPX8a#JSPZf2ZI zQ??3`FwGnllwb(uH7HT>ZG#Rul62^TZsjnyDTI6cKb(oC2JleT3?w{d2_q@T61Z%K z0YA_|!&&}t5rTTL zDj=mN3Ua4ZXkcz51&rI?2Kj#g-#thqK4iz(+GA3A0&y{R1luU4fpOQtIJNOo!+!$4 zsu@0kbal?PUw$P>uJQM5?g$a>Y1%6`p9``)-2Bzi^^MBpjiuSd{ME&8ze=SNXylvI z*c$PX|7jCwr;{1?#>IeL_tH(oAv>=_(!`>$Ui$AQEndxP1=Qtp5C0UVlfJ`Xs`;jh zDv2sB#rY0RLaM24x75oU&>7Z|A^WG*+F1> zb{ZEfw-UZZo#0DFxkkwgDKYl4$(c)Kj_b$8H1?rGnK=Q2Gpp@+n%{52JwSLDcs3KV zJe9=a`0s+Kq=3S9T-{?v!4&02Kz!|KlS1@-2k*IN*y2F(lILfRx*@mU584zAo{k*# z5Xj!;*Ew9!{QOZR3lp`N$d`%O0Xete`<+G8lSP&5?DFqQz1}y?s{FiE;&qp;(gQ*7 zh`mia4bFm?p0a5_`1F|mgLjwPN$)6n9&A99nz}yNC{$BwKMPbpqwVuyy64)@8QgER zoPCz?VKzDRc`cXsv@SxcP(j^4yx+g0nn0+%wQHB1cdk(94d2=c<}!r7lz8bVr`$rf zF3EIa3K0QIa3Ui3G3!k6@|D_r3l+Kas;<*xod*&e>~&VYx`WEKojDxrp6`6(LevG> z-XCj;ZCCJ)*A|p;wxTVR!S(p&(&OUn*s%aI=(6Z!t_b&}zhL2&VRBvf|86m?B!X=? z_3$(6My;~X!q}UyNFg=tZ&^rJL_@TDnKTZ7P}(RG>Jz~8Rl02mP8ou2E^S<4#Q(v9 z%Esvja}cUN!vrP1KmIE`;L!`ur(O&Eht%A%Vy+L_D6%2sFn0ILxRkPBeEzk^qP+86 zd?U)I6hD~5t$q@OImW*)akW>&F|es_>l4Lc{)zW|7h1JR{lILM+tAtTc;s_y+Yp z%xQ=P!UHMwU%q|W3t5Cr=daXK;^kPaAnJe!cmI{32D6lt0;2?)Ox367hx`liCzq%FRM*s6 zA2)}x(G3bFGwC?rh1e^G!S_X4-!SRo|2}l5KWotwW1~mXb}C_?Pi^dy?kXW$O0wA`Gdc_31lHfLXOR) zT3VzdlOfmJa|Z;JDdX)@`*a@N>Ce;Q>u5~+Mo1)zc zp5$EJV(Ff5tMP9clGv>6tQM8$<4g(+_A@iwEjV zt3CAI;jH*9&6DtOn^H3R+&F>^L;ywly}{xIh=AInZ6s<3b5?le-vi{-GQ2 z0^z1vM7)8XywiL=Ra^?MP&5;$P zw7pEPmPs8hT>SQB+HC_b{a!+!xgzZJS64OHF+E6=I0@g0wVTPMo6~0}DB6tf(4Ss$ z-}^>_k(>{DGJb+BEB-vDRv}2$mL&w32bJUP2d{mc{_q40w+X$b@$3)+D&7T_k5k4~ zO?oZGI=(f)9=3p^F%ia(udST#fEQ^RS%%6OAj{tS{c(;9Hggxg?_V+1OmAA5M~?dj~&Yx(uc5#EvM#=^EgVvlLZ|c)JBdE~YG&%k*>@|G4|PCq%Aj;gi?O zKxq)|a1y!m9)=pmI^P#QCkmHM3XVUFu;C$MEy~>7E$(9Ep>P)L*4}RCDAoQ$D?aHd zlUElpV5(dSeDMO{kE6FvI`1}L>cuL;NYt7Kf_8i#&@)M(rZ@7~pg9(B239k2w?&dO zQ7^RItH{i;nu<;Ac4G|n4FB1^p?Q!9#?QH>{_nsL?pXO6Y zVO+V?&EmyR91i;6mM|e6M6EaTOnET%e#I>gR}EIeTb?rb3SZY3eF5&j?E2S;F_mT% zF9MID1z(P6u&xCk__m$>qqPdgQ{zcZj`5r7pQ`k8H*%lPf@CiI)9=6x3+nuQ-JX z6oCwe*K%ztci>F7$l#LI&d}Uxjexnw7%DIJ7x2#`!^w+VQ=-)JC~w=pdEAs_T;=2| z)e|=H7;M=aWaBG}ibH&f5*K%6`k5s{>ACfuUX_+|kK&l?zI#+5#D$9SVk8M_Nq}Vg zmaE|S#FhJ5`~{a{4oQK8fN%VYsPPK};+rjOusE5ve>nD=_NDGMUI>QZW7JLN>dA;>dWJhfrl;zJmw8(CXzWc`2GPT^{-;Sz?}2I{d;-$=!`)`6V;8+$^|v zPeS#u?4?`dv1z(M6Ogmq?BXy`B;4R)~bS_e7?Y-DvG5)#3ai; zg-_RlwH6rDLDeBW+oKN@AsXlH0n(5VMndSmr2cERG;M*d6i76PkpL6O<MYeCjL*3-Pc9wUqirZDJ3$q_p?L36zG))=a+FoorZ^ zfAW3^bs)t(ftu(}-!`!UQ5Vpe)14_f4<(;)5;A*2(yvbt>5>xBr6O4oob04V&DY<* zSnEMkp|>k@t`jM8vT{gVuc=5muTEE(<0B4EQf}Y*Ogdiu9dhNn7Sq5@|PjlsFigJklY*V#86D#*K7{ z{27TKy?Ab;EytZWI9D4RsC8w283 zsUWc!>6RSRtzLyAtQWP*_eOT3_= zG}S2crj)e=m4JYiK3Go9ze2T4+qVooybuESicY%lXB-0xBT1Ha`D=V7PfC|P>F7+( zs1%`SR67w; zh7V|JKrOsBG@mF=4CM%3(!s>DHk9BS2Ia913WsKpGkE!2|L=*RJTO86@beW3pDWhl zt$tNIq67+IQYkkPrTbUBD7t!58!6TZAj>?7L=h=?`Cmy=@qhpI_ZW8lM(jtRZ_T2k zo4*STH|?rQh2$@qf{c+WnB(6OsRT?Unf<7NTDa0u4e8?9O}zgSmpcp=ppngWdlyU& zqK8RmNK#fJlII!6x1;geVli5d7N501hN!ThmM%gP$O`A>l$b%esd_*Ql`WQImRR`yinW0JReBFQ9Zbn;)=G<&H1bBOww%<4_sHT)84j zM-+72+dIyMx%g4++2#o+zgD(n+Ma!Rygk9K)~)IeUAdUy7d21!3wis*K?M>IJGPkOWc%GR>wU}XG>F3ix=!lI$g#JUHA=K+_qz4Vo<`F zkpzx?0Ow`M^$4acnWOYElwl3)S?smnt~ye+l~FajxH;++#eD+QU$ba(z@CO-kKx8Z zPgk@?6@1mRB-?9oQyOj-ET zbsYrP4?9Q-nTad%$@%eM^z2mMr(EBbW2n`nlh(xuew*x}9s`C9W2MDmVw!<&Bxu~X zZ~ILCE{y}~Jz-IHz!k@$HOf!-jf0trHWiMc2|WnAOHs_%pVdeBF5dO}5k5s*dsDkDo+`r9>_avWG{ys7p|NfiaAXgFk<0` zV5MWAgo1uf{rag4otklM(`9W@OG7wx5kbH2tP3xFXVh(D%$in)e)9C;foFWrJkMUj zj(^4O%oBE#AvkrwjEte>)qQoHvIQ-+UKWnjSk{eHTSr%5nssV{Qb@B)j8bK7KO)W( z5ezGzX~o5=jlSLx?KGxKP`w4@lJ!S{axQJZLSpaKS>JU()4Q=^O$Qs1j;N(jjr(tC zeC8F;hVwKw{4{a?KG*g?^N5;)LB|Dz#V9_;KaBQH{4@$i*X_wcH&U;eQC`|=SFnY) zt3)X}zG@takzH(GWWUlaMnPob8T-6dxKStf5b~%lfTvj%GI8V)1cYfPbvHr{OLtXw zORf3Ur`KKEHMfv7X@b<5&`i6b#KIv%pPq?q;o{Wjz|FFj7WQ@`Vz1>49I|NLmVMIY zsog-#kA#CrBT|0>+R=eaa_vWVs6B+Ga3hVv;*ef%*r2uZ=h6CXibmMsUg#JiDrmZS zHL{5*YvN&oXXnF|75oNXT%9v9kSuui^a?(FIif?iM|Twt3W6`{+TqRTN?5 z;jN9mv!fsVNMHa^l}l!I1`)9IIGO1oH(@JP;KU7(d$iWRJeksd9+*ZUQCe}AzSW^g z&)=L^wA)e(lEu&Axw;8*FbnxmIxW^=yQQkTi93Z2S*k&RJCq8L^(_@A|GiulZXDb{ z+5x+mkM!QjHV2!wkka-*|zCK*zee|_0wic zpI=@k?D4%)QG2}JSM@;w@S|M0fXf2e$!b@>RC*k}veN16U%T`WH46e}{g0-IJ*_LM(%&ZU?HTmL@fw2pufFJ7rG2Fs)wfnM% zArweG09$hOC1VGdxW1JUfu=blD%Bg&<3t%RS@bqu=1^ur${$}u5;cxA5hcL^~x4+lCLox}&Lc!Y}zH?0Y7(YEcfAM!<2xboxh$+`;D2TTt>KVlxt z@NT=)Lb}zN%16W(H}nFVMyhcWz1QX9=83bxcoS~^n-=)@{0Dp!uX{aZx^WPngEwZ> zz>@oOi|a3oF(7F_q5ZgkT@T=}dB$_leNQX^PbUy}TgehP>Fje0i~uSC{mSaL~@9KFli-3Fnt8~3ZCTOXr_R0dUQ z>f98HpuHvZkW?MZD*oy)8ja3o~$m;AiXbBS~I@xJ(V-oJG3N_oQ6kY0fOh=+#?_h|SJ-_DI6raK!`Zl97|hRiuH_VZ&+ zk5!GLsX2q5j_&8}plhwZ4gk^s6W(7*cZ!mziKxL1$3A9DUry7UuEp3_mg-`q52?>w6y zn{OumTa1qgI&doJ;rw=PP6#CDg$+VstvB}yj z2KqvNUfSm|h?!(Xh{d?P59S`!orklFNEm`0Pb6=Yc>GS&4HX#=EP%(=wuql9pMZ*l z_4H^Qm5yo4K9XZmFL|YL3_Xw>!rj{WeS^0XKeii>^5e-xV%Fc^t{-eLrsQ!``}lG& z0d9xPS?NAR*gb^bU`}7}wI}L3y*@rq_o1ti=CyW@qx=IteBW1vwth9ZsOnm{en(c$ zPmW)P=U2IJufTu!-_khfNqU5eDe;YMACazGJUQ_-_5(imWFM44Gz&fPcZcjL06&K| zlKoe$@*&A*lr7?h!J1J)1h|>A&pix)cT5Hhg%!`JZ`LlaC`=B7ptl@6c>V9u!l5wA ze5+k_4FBVIIGbWAE|;|0UoIMsvX6-iP>gq{+U6dG_+(Or0^0NB4br z2UN+*2)-p8JS}`x!oO!JEHM4x6xv0pTVKphvQkCF=3c;p&W^^iyr(J*$2+ma<0sKf zlC}zA1YrmKSg7El>>e(Hu~UHa3C3BzWBt;ICjs%U8uSJ+KL92Ers@OYBs(?z?;`#G ze6#QWMtUX|_Wv5`r!;^6!5KyRp3$#YFx)*2JK1>YB&FXSaY+zsLCX}1kbE{i zzpcfz@M>m|DFc%pX~noey-bg>P)dG#tOi|d54Jf`tj6u1yUF7ELL~3_IrY}28Ue3S zO0->K5F|+IUF57~B8F;tOnZ?~J`LSKGdb49>kKtBR;(AnI*Ku9R4JIVUO>p~sOL9L zBU?(_r8>83AXs5nhApyCx~=h-1V&^A`ZO4W5!s4x!Oozf|4ATFQM;j6!0itNcKm`E zW@ZL&s6u%cJ6UPK8{K6Mkiwq?Ii{$xlK&%f*OEH9*&!=qeFbufJ4F$wK zZ*E*KpCvB0*nRJXUm%gKNux@!t#X-SrE67ak)$joNl94< zqDR(b{S0*A`)sEv8aPA+iab&~0~-1r9gsM}BWGi(C$t50<1T>;uoif*ykawW0XAkH z)r|yBgYd5Ks!)ChVIFo~1=%RgBo%5-q>;%T=#wQ9^aiDaG%#wsfCQ1cE2(MjLbSjk z0Q{h>h2$5R1rXQ7nsoyCgRQP0jSZAYJT@xuW1+|(cC{R&bohEOG?B_-x^VIqEU7<* zl3hz*LWgpNB7~nmv9uBu7odzh>&Yf4fYu2l`u8LRBNODF4zER{4(8lA+jTlvbmml937%yI*I!7}-x$Lv*z zPNTYsl=QO#*I-I(3Kh1#ucXM%BXsCrT&f`-Y;UN_J(-EV`An|XH*oZl-Wy7Uz|AH30$lL2Ry1WGGRr79tsz9q~PO!7jJYD z4?%zWGQLq>Wp-gIw^4}$4gt~GcqS;9U}LB6omfTKvSF)^RljnZ#5k#fE!<9Jn-+2= zsT+&L-{#VoM>~*|Z83p>ije>_BW!r_epao8Can)SR!d`BF%O;1uAV31`+YN>LduYf zr)GQ5CzgS!Lw~5|`w52R(eM%*zvk^nGr(>@qbCfYYnfFqFPLGh6NuiDVIuli@8#+ z=osfENA$taOco4gbiJgIcmq9b4JZ(Hwn z=iQ>D6{E~SqoQJ95;3Sp=cudXn=u!Qo!JDG#iT}AA)@$UXA*BPt`MUE<)KdQqz`)= zcRTajHZwwQ`7BY+1E#0LbvZ}7UiUY9uP55)th%Z0wXse}G+9$|YAX~uDZi=yl|+rg z-Wbb=7vH@B`Zc!aqBK4hT1?nR+cHDp=(DL82uB{=LC-~4#OUMuQeD7j2@Yu!hi04u z-N{vHhI)|{UCr19iB{hgiSEkDzZ*?dVO*j^EG0=$JmOf(Htv)gP+DbJQRf2l#}k)3 z(v1a@BDSVZwMt^J)>LO}0`<)^J?MJ2*p(v+ML0DzUFj(sw=6euUG^TWg9y9<>B|=$ zip%>)&&LgVzTt`rR5azf4ZKBbL9DI_*>-@rL5Ri5qDf|++b}0o?~;r5SF`psHsvx$ zs97V<1j&sgVdCTLh%izi7;w$d(J?g?k&NX;@(j80$k!c*#lVVykA6ZcRCP@yub+}cS6T~At;!T_P@ax zQP{8a1PN~d|NQ>+gDtt^@~98g*Ff&o>aZpl^M&DfMJ&Nvi1@JW3Y@99(E+ z2|V84pLAG>HXS9BFK8uty`-Fi)Tkk@QCYcC=$Jy$RbvYqxC;i zd#%^lBVQS9*)6A;1dd8^p*23;ivdkoAP_gqglaZ)(Vh^OM5rn-ld|=f5{Z~Ge2m>9 z#_H!P$fAyq8u~X#EzBBGgAE;D)0cECv9s96^Kt>Qv${N<2G`CGha2x)YjYw-W8Kv+ zuLw;rgw;~73Xgf$7_B40RQJD-y+ho93T;bmTPw%CoE_tVdW{;ZwwHgiL^t$dD7^9b z-_TQf1}0t^E$YxA?jBc$DW|m7Y~<9`Og8_`;(18npEpkVp4tn@j>R7lA*lrh{_Q@F zULeZv0V zbpPoUEAwT4Ez-AVe=BwN{o5{EAri{-iA&E3TTInk|jK8xaTvOEgu=nHcO%Q5XZPBP`95bH){ShNYDLo5Rx9r3$U&>2(DWBS965>-Y|P8Tok~Idsh>Yk zaVKh|zclE+%DaRKnxG^8QQL|+ zYJ_288+tu(Tv#NtPIX}nqn?=CcBoKLdS;Ak7D0ZVOEo!|n>bELLL`9f&uqYNvoYhS zX3PMRii#VD$u=sR5~-!*6Xke;E>(50lRG3x%Y}^-VVl1zw*O^?B8+d1NOVu+ErA5b z=%}Thg6ReG&cx)Fw>@WE#7o*v<3gnjt=_buYPQYcMNvY8tTT*z?{!V{N~3P>y$r#& zsF+CmnrZDI1`u7Wwh)UIF6nb(H-(px*1qtO1^N$I&m6W{dFAA_Y zTwb~iXDpM1rmD!|dYoYF5w$m27Lnn*p}<6v#NwcTW+a`BrYHG(G4_0!R1p=7MK4i~ zrNg=fAs$=ni2e@{)>A3+Pu#?m!OLoKQnuCG8=BBKEemdLO1wOD< zR+a6R@ZoSU464jw57g87-9jxJDlJ2w2qoN~pSC`R9GbPBR%M%Qclo|09EDGxcla9^ z!ty_7Z2kufVd3QF`ZElf`Kax*+>hb2U%gNQpBuq+8=+g`LfNntg~q9bYWipZ;6jI% zE`38GLKPOp9aMHdqkoPqzGFlMD40D?T`X`xdETzrN{i-kYW#Q4LVI1{++51H3k=qe620 zD9oMBE+V&<~#|g^f{ZhU-WYf-8bWa@fd)e_HX7 z5=Gqm`m3XF=;eK--^~J>5E+PIIqD$Sb6r@8u=nq&>D5> zNeIbx=hjLwi3X4=+)($dJ^ag1vbXPnCZ2B%brae|G>-p$jX%5ccUR-!35TT65kXH7eL2BMzGNo?wmsa z%TUARq^Zw^i6*!jD?!Mdh{uW|L!QJWokPsbB#E){9K|QkWiEPm}e;6r_bMU2OJ^wM$R_GH|5^_g@xP>Dj z9w7x2Y`ONYLV1(^*xBI4JLK3jawMWnBF3bW8agUP6x`(zx@AB*Jb_(Xp|@ZRl17-K zsa29Y7QXo=zL);6GlM_FD{k#~5l_+jW3tYW%235=((nYXpm zCt69~4^Ks+diT*-N-A*&^KpJh1UP1${mSwP$v@_?*Xu`bm@4!{V;Nj>~fxftdj8LzilmZw+tGRKVnhl${HU z+!J9PuW?VaPMgLaS9cxFM_X^q!rmi6vukksrn9avr-FT0%ONpR+ZTNgQ#wxRo>%u3 zUj-B^&0sBz@xffIChq5(DaE%A#yusv1oEd%KiOCDGAay9lN?F!YM!gIpka+Kj ztl3D_w9a&u?uWQn)3>j-bK^>=(jTSJsB69H$>t;4>7}e7zrYzYqiEtX>l?kqjolKz zTj%V+II(Q|IKLI__k%;*b@aX!$934@+fqzJ>-VKuDAx9@st31w)cWlqU~Bt#L9C5E3>Kr^>2g&&h7{$xkJl9~R!2^P zjsi!t+tsQ@!9~WfO#9RJUBTHvfkw)B8ope1Q%c}_-ERTu8jHn|jdn-!8w6$V$Eq$X zj%k+}I8H|M(^&jV?Wnv+Jxam^na^Oii|PkPI8OvKcB4{R*U2~oTpicXXVhG_99PA# zWH|}y`4FRk8WRGKZ}GCxjq}gaV#WxW!%1GS(O{3kDHm^A)7oA$e;B8(xMEjP@x5I2 zO%txm6M5pFy(sXtXT2ctwVPe8d+=p&)D5|;@t16)jqw9SA7Fk?J)EO_<+6Mc%a17s z&I_*r#>aY(T$nMYO{e>`^c73qjysZ$WoMO7IZuDRb*T-<5XfdsIcut@u&y$@n+}Rb z5fu<8ACG6caRl`gpj)N^^q4X2((Ye@KXrP3)$?;(X5bkw*=C@`2>)3}U46-@$!+h> zuqWHalCo^OGPdUbBSkmeYlQ5{f<*1Pxho(5$*nIMEBbc2WBsP!J;EVdueBi)Zx+YI zF-s^SN5;%qo)>N}egBzt3#J|8D^HvFgIu4s%luxK7fdBayC>W0nz^)ZGq+SCWU4&) zv2)e;W{5#+`YoRi1tr}{1M})3#UqT%eU{7N7!^Q<)@91B!fF1`s5}KWhUPVN2I~X0 zAMD${>Y%JtIz9emhzy^y?FF`&_uklz9J{3l`}7zRf6Ny-GfGe0Gf3_`UaWVmaA(cl zWr!pcxxS*xEXZU$SNdi)#5_5~iGA;2mu}-;_Hm~0(w%LOr~QcV34{m&2Py)_c1s(Da&Wok@@f-MZPMvS!iSl7f)QLRT62BA-Qfa2 z+y11vz)mj;MR(qk%rS0>b@?tm@cR#ZefXU-{EP9Sh$GYXwon=t&zH&7_kaq7VAc|j zYPnF@Qj}8n$Y+V3Ez`H4G_K`VgxK?a5B)n(BJ!Cx6Wq;xrm`!_UwN@&muV5ld)qw) zj@!S5?MO`YLf$_O9Xc^#Ha`V$+u79gF=|lVeWSu{+=z)rVb)(IKnUfUM|3XS?94p$ zg`Ejh5-CWj&Rj8b50gl9$l=Gzsrf#E$hGymoA0W>sYA~u%R{^XhDg;+{I8uAsz07Uha()@3XX4e1exF9>*TjpC8 zXCt7yJ>a+Gx6C4cwxO*xVKmX_YqJdxPQxdKN^GL0m%ytW*56@?Vq)n)8`KSMbt8hI zs-nLmq3vQ9mkrwp!OKMvLKYIudwX}pKKwndkr5lD{*1L{Llb4r89b%?ozIz1=fgpE zIgfMsOZnrtIeQ}q?q_u?&OV!dsMAD9H#(gJRIE=%-m@|o4VGBi4ceYh)+^WPzPh8p zd2>Y9QxE}{FSXNowr5#Pme%!4!DWPSiPOqkMn+h?ZA6pfh6~vH%;1MSrsm5$l{N%x zjWOoh5>21wZ7S*lwRgJDsY5?nL}E%%FgTt|M`)%dv@ro*wn_)P5D{%*U=V%Oha>Ty z^D)JW3a6TUzm`r;5k-%wU&RPbs}-Q&$RR}+V^!b`nu{N>jcJW?WtrxSZ&%Ha9F(RN zOx{lgT3WO5j1nVE_FMSYOVuozIZ2q6;FV{T!_9u4u$ld7ab*5=u<_I3cRB;TS}AK3 z&%mEqN4*I9F7~elL>24hZzu2vTrt~JuE#V^S{t2gnp#(fTw7dw)@`hb)_o+N_6ho( z7Zx`iL#ZIMx~#EnxlGOK;p%u7C)KMq+oa9tN_|cvE0F z0wUwc_-crZh)mZPigLO@Li0>}l*yt3zdiHK%6y}QH+ty)rr9%7!-Z@a1X_8siEafF z3Tdw#Wykilmd%chtd@pk4Fm@_9AG(zd702Eb7 z$GI%d;GPIBT5nY^8>D9gKSV9g9MoLSZU-d66N>VXgg02Wa+>HK8-|og?!G?K@&ULn zKO|}p9j!Qm<_WH&Aq1l82F~@JCDTh@5W8P8pUM{AJu(7OTv1h^L-#{ac{^0002Fpd zz{JlRO2>lxN>M=prHuB%?h(d`AlixboNaJ+L!Iv1cMmlgH@<`X&4))%VmEA`_mi!! zz46~)bbZ89X&-BeD`eFB*i83*hsC-@!`^-^4?BFl%Hnze1y(|iaQl05vKgU%$p9z> zq~i?em!HQK!=scUreu;3tsgbEWgOcbkXmxjeJ|}mW@3Yj!<-5zL%f(qE5`1|ZbDc! zDBQmxcTl_7igx=O4@4lU6Ri^w(O>umR20=spi2P)%Z_PiwG{wAUn7rY7N`q3HN`i+ zJQMfFN$sIv7j=U5El4*aONJmmJKiMlcg5&PyDL4!q>q=kuBm(h=N059=2qTPseu|X zbQI9Fuk2P;b=;Bdo7|N_l>Ta!mOuDKS7k$|$_?TNQc%L5&CXfmG?p_p5ZzDf& zJui?c&bdb+l~N0r2$eo{vsgY0jtCaQ?t3N)+S+WlbR4k!CCJ`7os?WnRo`0va@0=eqNzggSI2^# zk*o@m?I|Y~fw4H2?Ez1pW^}BAeNVQaGzip#Y9m$N_rtjN>g=G9a2>7qQOmR!9qQ;B zNh|CGdJTeBO{xup9oFSsvt|xDe(w7^rpwvY(iy!_7@$IRWTrf+^WT?0wl@@-tfm`X z#j8=F0TSoyC%W2+j#qF<0YS+CrpPh<@ij65_gRprG{S8R|7v;5O%ncN7EhLgSu_@C z1N_j8a|KH52>gkV>}7F!^z-*oR#yq?-!;>rdBK9OkFC1Y+*ocvU0~rxWh0Y`q03;I(}dGMGO=js}H_?)|yfV7@)7>+bZvF zy-wykJ2upui&_=9wuj^{^iZ%&T(WmrQJ*pqKQhbVFvH@>`Fgj#GGeY4luHhIYsvV+LD9^tZCxx{GrQIi#+j?qs!Y zh$DQy~r{b28z?hq>Zf15;aw?xY(gZRtZ5&@+8y z&I=X-p2siGbWN8lfe6Y(tXRXie#-MRz_K)((RUH=;wu%e`FxxOb4wIs_gIhgd5E&GgDnK5hP0uLYd z%64tVdm|rbr7{aW07g`)q+WVCs>NDCXXgAXM8WP?>|ilu=U9VEuW=8^^zn-i%cziF z+ibe$G>LK%0aM~9g&~1xHP`_A#-M_$R~Rl@AY1Sc)laYvI>!jFXJ8CRoa8B0D-25a z_spCyA>a2kbpk7JH04|)6FX{xVhf~5w+*OWUti+6&8V_n%5mpz>>_)lmMaBpkaC8( zpxz&hWkIVCGSY9xydHZ-cM_={B=f_l^(Y`8kLK|=Zh208-*~!|l5vB+Nph>$%&D0e zn943Q$}3sQ9;yp^YNQyp-1ddeEaEt=cwk$r*wMhWLyRq7ghyBHQq+lTQSVEiphcz_r5BY!Nh`|hdv?ktrsNN>QEg73nvX0tIN=tM=%aQK7Yz7E>16YA3x(ZejNL55Uwz@^2vdAv-GpH?OO2O zER|?0{epnTMpnm@7(FP`k)4^@P3CtPWYY(*9%u?1Li8aQ7HWtS>VCd2u??8B~i*()tb zGlLM>K|B5^BS1jcIUzNLsL!;&D8A&gPB_>MrEHIQLI^Kv3~_(X`Cw{gsIWhQHpl!d zhb?YLp91|jh^4tRNf5m;wnE9J&_2@5A8UN%tT0eZfs zid#$DH?B!Gh?8Y_6U3VJ)r`UPYBbv>FLPRAIJe<1-AC}(30?Ok1h^L7+r`sM2tQPC zeOAab8~G5n*7PVtI0Nz~-5OECx=SjYZ%yD$oWzbIrXvd@W)`6&B2_3Eb$6`B?#y5j zSE*ZRlL_@Q)QtqbkaBXYC3WtOA&eoQOmZ6(#->$gxR_4axmFR_5u@gu_I?h10n@+h zIxfpI7CUnvh{3d3uj5_!%GFs^eXp(os4ipaxY>{p#*4rB1ok;dVgG0bQW8B^kY)Ya zLMn)1UC47H9q_bf%r72Gl<&7138m2v z)#x*!ITTmcX&1eO2jP=%NY_G2M>3LtjcjQ2ZfTq*=s} zgx5<9m=UELy7iQc2h%@y6snN?CeJ12Ow^ZkPS8KnOZ6{J_wu_hkS6CX%4;(=FB`Vegbc;;6dB02&jAC1bX~ z;Mn0zPf74D!iWx5O1&a8a4gwdX)pC#xOAXJ1y5>BMOki-(!#6*wDiI$?vKubT_2^7 ziEId4@1CRin{HMx3`FA)Z=`DGXxY#X!e$tIN(GeC;crO0yoE1EVf5X4jK>*ox2(H(*dvn@9U0b6`I+``zJ)Lu zPsz8`c$6ict~#L|D9gJhXO$$leV=~GJ{$g2L{J8^EZjS!g4Po-;&mlS=8k7a z3$ERQrlv7G7CC^YAB6#4>lC|APRzv0D&EXewl)xz~KjC)}il*Ul@u7hSLCn?c$Zozwk zE1t$@q?1k&^5-Lj=DNlZ)3{AX4u*4iwPERiprf-6fkjQ>2pUny!uM8QElUuusB@=d zi)jra9lm{`ITT^wX3ay>BhTy;+p0J`O2{q=vn~+e#8KPBuOM=xn;s6YYsbwxSIY;u z+ES%p0O(}LqmEz`iw&Bkrib=kqo9oo8Bu%4e} z&H3Cf%ce@@rzlLc6rAq!j!q(yS9{uv9xqDH^4_*eNiWmv8R{k(D-)*9*YNHd?qNXr zewe}#Z5@rK9lUr%q1A7!NJ}ejqIoNc$-7vy!j0W@2$|TtVzMvxPCm8VZ*dkENzhD` zFhksl?+Z3)^apJ4cU9BtAGMogA{ewXuDq;tqSRsK{F_F2EC(i7tgUX?Q+q~Zpfh=O zW#yNY<1FP@>WV8zAfP_yY*8{iN1l@nP34PVRM7TCJkoQXr9J>uXp%qOtIoz?!x6)H z2(vY~cYGET8|M5>>A9`zao94vLYhi0XwWrlWVQ=dRZai^aoypV{kZ|b&6HU#V6H;I zpkGohzrY0>k*YWca~BKNC*7X4qm0Qm(=B#OXx$!~L6hP(XPd|6ZNSk6wZ=t2 zdT*Lu%zNAWsRw4G|9o@;50YsV;@hq<-4E)thIcXCV)l(*vBB*NIh#bA5J0mP6KJX= zT32uwS?b9n603KgwLU}zGr>K5I5ke~ZYme7cjpze-La>eo5n3|V)^v(5(Cq8WB7$D zq38F76bg@iDgTwxW`gjAzDHW0tAJ;63;pw!%2ww`-}Rz}RyY&5n@qFA+Q5lE zh?ABcZhg}v$;cmGmjRrIfK%DANgyLyt5X-A?&(N^j#)Jkigh>2 zNyYSjiuKFo7d3*`V=lKuy?)lqS*vCH4+77MVTl}H;yyhcLCP#r#?+IYl`HVRR)c_p zB(m#F|J@!g2Qd?|oslI1A0OjCU$*9C=Kj})t)`spZ=1G^asZ&Ai6PMNkG)$yK{tCt zV=DlV*a%=|Varc?_O*kQ*usRLRD)fPNzPsbU~VDl;RsOikXJSKurlU0Ar%yW<#Xe4 zv$3~%qekpzV{PliFXkups;CUIR|B!8oFcJ^og;vlje&)M znckR*jg^?4g@Kimg_DJgj+ljsiJOs$nUR@`o|&D8iHnDcjrboQQUTbv5}%`qDUY(K z_&)}G`^8Uc4g}isFfzKjx-z)3GT1qqF*0*=zmZ{KWMQFy>p}12ZVNPYqqlV;`<=)? z>4*ZHj2$iPffjbQ#J}ho8reAm`AJEC4fLaikJ0%Uf3fr@7Jr-nFY5nR|DRZY`-}c1qrb53W@!J$I@4cSf9uU7;s`JV+BvG) z*;xySI~v*m*@!z}rAuhgOO>%Z77N+i^hCqM-3llR7Jrf%}Gpi~S zGY=aZ4?EZUw*nK>??nF8^WSLv#sT1OXs~keFtcg>iHF}i{oCdLRPetc@c%OLkHzp? z-~Vmdn;8E~^!Cn<*1svIi7_L<8ejvk1v08~#$x zU;4qv_(#*9X8TqNv;bNI{;3?ltL<+pB4iExYwbUE$J*l8>fy0Av^C==b)z={m>N1; z14#v>Y=Hpl?keIpNf;6ca&6U(v^i`zN68akR-*qRZm z7~0rd1BiwG;FVa-=}lE6oGnZM#J{VswZ&iA_`UY8-2FDYf1e27+vJRYSH*v$@M{IA z037~W`^WTtpUJQLjJ=)XZxZ~UIQc7~-#GcVQU5oWqMY2nG~#a;qm9j972<70$;t7E z+8I0l`V*u?1)SeR%L8CxHZ^^V-AT{MZNg5^#?8%2&&6iKO>f9)%+1WqYQ)CE&h^_O z`RlcRQk>uEO4&LA4Q-79|D?-l!ej(61OVubS&aeoY$hD0^oB-;TyH5dnAi=C4Y@fv zxdH#A`=@LF$7S=o^xp2-Zx0N{e?2k&pa1T6&HZ2g{$s-b%Y^?-?*9h)vo!u=`TxZA zA4T-%4*w^vKTG33mj6#&|4~GL?(l!&`m;3tWBLEY^&ds_=MMjqxM2UloB-Q5py~RC zDE|)BGO_-FYJWkgH|x`en3e4p)>Zx&k|k#S{kQxZSaBj|W&V}iP?=G|9$+hE{L6yY zW_<&azpBJ7fVzwd&OmDm+g}}7xPPlvG&BRefe`@XoALV3M#f*2PQ)zSztd9v^~#gT zn`LZmXC@$k@W%2lq|M6oC(JEwVGRU0GKyQjK_5|o@h^J+fZ?3%f66{D8D6L=7mLOW z|82bb+#zCyO&*_Wb1)z-?gJRD!w3A_bg}mhU-Q3quJ12ifx2p0^fT5FbeNeTRtvIt z`{L{2hxS~gh8?=`&P?EmJ+C6~5l5Fp+Mqn8kWAf?^rpTiQ0O7Vbme&=6o@5qVc&6B zI#JoUa9R39yzT6>nsfeRTl8#$Y+K=K@}gG7&+aSw4=&vt@Y(o6iu-G3jI}3Ns5YSN zSH=O(o{H^rV}sTLA+FGXZfQ7#Pclelli4`oc)BH<)2yu$pBqY$S!t-2dTt4<{ch8R z#K4EsH|+wJg=&M6BxR6r5NniFw9iL-hC`>vhk>u6p-r5!jCyPEXH83cowO9_H=RE* z51oJNUkHZugdRTM9G=btuk3xA*K~_Zj#k5~xE}4VN`W4Q$Y~Xrur1}Puo3!218Dh^ zrdEuJVYyAx<;?bZ(|z-wdpu{RFHDj;kd$lrXHVqkoB_)WH5-e~X<3F&VU{vQ0H^J^ zG|<^a{|sypF}!%wLrYaBwl&0egxsPsdR8pFuETm6PXGwbm!(b6=mLzn{J=R45Q#pW zVrK=BPzbITQZ)qjYTYaSt$FyZ zms^(Dqfxqt&#s^*q82g~T#`+JuskQ5PAHui8rEg8HXd&Dgp#KIW~QBLEcAu`TJ#m9 zaE>Pl=Z$cr2T5WcJ(VpW?;f>1RjvjTWR>S0Bvp`-p83M7U@dR?bz++_55?)`s?EtG z)1hr6n86-uy&XX{55bj1Xc5|f^zor-s?h#2g z&?QjViG40E!HAG&$GbrIxW2(-d|gtL1seYf8ZnfO(!W|sC!odzP-5Or*qGjIqH?!S zs9zj`0}y+m1LS!NtT_o`0SiM=A9Jn6qGF+O!8eZ5Y5kQkI`|`7W(9h#VE0jeLKPo) z2%&uS&(MNt8rzF}fo1}cpXB)aW&K|T3*cb?Q~FWKaBEfOWD^qTktcLL1Frxuk0}10 zHqJ@eFLw%!us3%Q89)^xQEFJhw%Y@-ih3o1$|_Rl#lkWk#CtFAnN6PwPQ`g2@CsNt z=G1)nbbf{UJP_u5wr5YG2f^gdWdq-ic0FG`-$-;)nx3|?mI%)ZIq`>QsR5*b_!dvY z?_+Q_J6O*^lYL#~|rYdQ3^NJA|%Q+#jKN|KQ&R;OyYI;kWD zWd7x2ta>yE)^bL`DFPL&A^Q6qXX$K>rrPqLV`Fa=T)Tr=44095tO)Imc^uBmRJN7K z@8Sq*q&EG`&d3hPMxR5*2aeQ?cckXPJlIn?91HOr5eN9_zPG7GuCv8YGu#>9N zZb&brVTxw)%V#<>EupJ~(5Pv&FnUIf?fzr3kr;!kH+Ru{Q!KL&|jMJCpB7rjVZ zx;~V(;j9;EUYD7T^lxjz_n*PX_nHv#`+CX1iMdIM^(DaFBz3AGh(FF&_$e*(c|rpQ zblHLNId{+gW9Oa-i0maDC&@lqt^*w15&p;7eTLto&FC>XNBGC#RlmQx>R#o23dTV4 zor9|zNCY;*iLUQ!pk(5Q0zymFjreD2p!G^)B zc_Bjfd;~pbT&J)f+~|C8MjxQB>FLl%Tt^bbdLaVgBgO@tzEU@7xLA8o)o+Okbn@ZV z0e=v`-_3d3gT;s@kN?}p)qj;&mxJ>!dabG|lt4}b-8TRJ`2$V>*khEph=lcV-u{=@ z4r|Kf>ADh941d{NTMK6(W0RVVvh9Z}De5`2R;g~&UFQqp){ESFQ-AQdy= zivjP}bAuQHZ}GK(*9o;)UPRIIChD^ssEeSpEb2O@2gU9pD!dAe&vFBMEF4h?p$Go< zIcfz&&y>lV>!)=b$fKGSD@yZ&BNb5V;#tXMRN2URdg_b5qzMb{O!+aA@=Dq&A2M=x z)bfl~*Ynef{9FrM3VlvJW_RHOnwm(}9g>nND`G)=&p`@p(=MtQ)Fr_Pmy_T4$k?j&yI-Bq75uH$;U3tiB7^&Uvw`;=|Qlu6NYHfTeM_3 zdYjX3b_qhX6ouOfgRu^!z~!@}`I{!mJOkx^(NW@!M$3W9Z|#?0TO=4aVty3gRMXNd zrH9f+fS_8=N=02|pDA11Y_L^NX%h09VAE!Kt1;y_>3u+O`3WVu2Y0=RxU4i`iD&^r z{i>2w{kRp=d+K2*T$SkHawEZ$4a)4I;IPnO8lOY>(Nue2K75k~fnl=v39g4_-Y*n~ z_IR#DMc+hF<7Bv5m*@wh!|v#Hp8>{Px!K6MKi`0IO9KIG z4dLEBoQ$q4FS@t>ew3We+xGg!e<$#O(K-`J-UOTxL~L8pb5O6teqVsbS+g{iuEB4{ zLzXsReH+psY%~y}`^eTSquj7F_^H9q1GN_hLP8;_q8NoHBv0R5PO&yF6Y*+bE|bFp zg+z_2A@Fer4{7NKxk+TBY&*^C+d<20LNbiM)%9Q2(?1*z|Nq7h2iISWA01VhDspb< zm3dkk|AG1`tBhBCb6bipO!xJ0d zgvP7UcF72ERkLC-SaW{Be&;G5RD5+%qy7XI_~18x6a0F=P^j=;fzhDcxi9lN(|e|i zf@$n=IJ*qO6SvcaJ0^y=4h|YLh~c~;9*L_gY1@C~>ZioqFLfHf(N8Q7uji*k{KyJU zU{`(i_~MPQL&ki8|9u~IAM4A5@RZos0xpoD{mbZmj&vIj{z#bb4KUsmXzTBU-*ck0 zQqhSm@*S{fRtLw}k>W`2m@`bOKMEsls;M%8MmS7}Mj@0x37xa|CdJM^Ap#~uD?d<5 zEfrN%A3McQ7{t~>8RZQX@=x&pP%cn@3s9n(`H3D(c;*5$VrBmX+8kr|=ZDY+e%D;j zy&RB(t8n{(?F!*>PDXBHheXIFbs>3iVFZ$zMEZ%9r?S52a@*tY#=6KsSh#8qd!wFH zhn~hFtC`1wZfYWs?O@c<1Fc-ln2U6Z{sK>OjTu|r9W z$1Y${g;A594DI^8tYTE%P2s|M(uIh1U#XEu__k*Cv!tHcCJ zD4r2fYgO68?0!tHqHjSmTiJ)d;wj^7v(ntA{3u3Fv&y}D@6cXIMKw9WF@u+J0}aDl zHm5*ADb)ceqE%3@H$6|*^rxLR%80SaGu7FQtb2VGXnWBUHec=pd8W&|)%F7gsc0a> z`#Y6q{;v)fb8s{N;aigomwmgvuP{NkJz+d{mP4RQ@{!r4ma8Jxg+9L~*nTl{a%e%o zl$$oMpKPz5Y;VQybE*vAPa0+X;Z~J*;_U5AAcWq1Z`zxwFzW)*YD}wSA#BG6qYj`3%60qN(t*(w4 zO@26>#%fuXEuM6)9jZ|hd=%=l6_PHf&`xZZcVX)UvY$+|x_s~yP9mzGXj?1mjWMsk zgR<*gb{ac8eNrKBRr^HhS1qIPwFRb^ByR`7AX}}RYEppu;|*M4Qsqpu674_>-y{qV z@1*4DBF}jE=n5Wh(z3~G7pnxz2Y3U2#t^KX(4}FkR5P5K`NBIo^l*Hw)i43AcA_v^ z?=hsHf$}g$4@uJ(vt-z=H1~r`@Zp5y6AzMgEIK~>wHzcnrd9h}1QNF6EM0oE4>=Uj zBJ!L)CL1{3DXSWsQHXIITKtz(BWKB6>(jQDJ`!zEQ8*Nn{aysq0_|4jF>{DfbWxLC zc%Kizzq6)EY9@QUe)yQynzeT&Y2_&k!hh!*bJI;+d&?8!I-MpOaI5y|d!mq2C>IWq z{ioBCObD{g&Dm6ltKyQ+E{an+J#*^=tB!n_Q>k|^hLFiW zuy~HSifp^$B6GkNd#oR9+zv$oj3E|=AAA?qi- zPD8qKz$oj33kR7i~=j=Gw~5v@?CJ5xdPt@8ZQ!S1m8s| zQKjA%vNH>v6LpV$vA)sc@2C>~a)C)b@dCpHQo&1B^S7G+U!4&8x0}6c(r`3BGrZ^- zK`<(|5UQ^cc8notPv~OcRY!Bg55i^54;7p$&Wea58=OhGF*4s*x;0)LTuy!|JphENlySST?% zq!IdurxmkU!?R;DG?c}RtCq7T&+jNWr4+Q3MeYkQ?koQ!t z>2n*KtLI5oB97MLk56t4s+TTVF9D6@{OrXdJ z><9Xrdyaj$;}gI#`BtIaJ$zBoJ=Rp^uGdG8YoeEu8AZt-)GObZ3%H%xDlW(SdMWxY zWI?QeR$wbL(Q6xt_Xnoxt6^CPJT*8qM1eDn7^ME%sZ(u^0Mu?^Oo6vo7^U(nvF)Lw ze0f9jqu&(ORi=?TMF9N~Y#)|m5*?QbpGOBn>wPrV>p{qr-)B3~qRo@<($UqUyXX-5 zS|9=o*VtO={4t9cfF<&D$s;tliIQG+#<~k$H&Q+Q1U?E_wuQ;l6ZH9C7Z#bF$V#`}6$;7NM!HeDr1Tz>2qti(9h>uN3#)*xSw!D(Y)_VFtLmoIR77o&4 zIwOX23@33kWQv#)0Kz0Z{gJCH_+s#QHb3(T<~|}4)I>;Cej-i=U23}$*05kP^SQ*~ zyEBDZa{OTB(Pt5zU(5C#hs7Jr$Rj{L>=F+LJHG;)-h=xqd<0FOIM{SEs6T@#qgy)g zkz`msfCqmg+rJ}*XdoxQ=(42GEalzVyR6CY+cq7(yz)g)F_OT(YG)WmV1}83(m|Qb zSw@AHlxURp(D)SZ)HYJmAD$6BTCrb@kG`#MrrCY2i7f zEVZR&XpB)mOR1Y=vjfUX!|0rFYqa8tHu)b=xax2qnv|l9vBXTsceg{nFAJq4+Ipl2T+YaMjmaue@xl|6tljaQVZ{;3CRt7? zHBQSgep!x1;U&8_U}$hfY- zh42lQjXv59Hi$c-bxYpE#>%dLx^T#G&6__g8}u#t(p(Yeps2un&3lBOqq(dBlP&rr z)0Lj#>_32_?8M3#*3WaNsafB~v2+ynEywCq#jz=SpgS#nejOTp-7{yNp)jh`4KyhD zkXIIb1ETkqSugBGwwjljB-9}#JIZlSz}R^^2K(-6oBEffKgC7^@wY4CPn)p+gQd^O z{b#esM0MD9nF+b=>iz8j`b3C~vKRg|IsGFTuBsT47*YsX34_UE&FXFSV>E&CB`cQ2 zfX>+ISPVXQs%ZWq6zZmyoMX`8cnMx}k{Zpz+BDofAkI`rt*J7ArFA8rE zyA@TgfkQI>a@cwUj0v|mXbK?flG}EhKNpd@m~HK*vM=_y#M#WUig1P%fEir^mn>3? z9>uUy2jJGEHMDX{*w{~}y>(s~2zVIwBLiBuy}!wFhgqcRkifSZLU>$3yEY;KK%13E zSSQewioZa|0x@tu>u(ga?mNvU*b_N@Wb>oP-_6-dbpWR)c7258s`Js){Lor+odMp{ENlcS&zkA-p4NsN@8gkkWMi4BHUT$ zU7W68245Z}WbD*4I;Kar^L_Ifr*~Ve4o$zEV%8nn*8E`Y@-E2!G!ZaU3fmT$a)nF8 zszwqhSJD10pKvaIsqq?J*z0H;X5<*e$S&~{zMwM z{&(8l!*dJaVwRGjK%X41)kZuIPc`n_#C44?8zvVEdOywA%&_je-rY0GZdXaOO|)Wj zjzqp zCs`S0L_%F^fFy~mXv7pzT52LbC++3#ezv=NH}GwJ$?VMOZqYNRfdPbEfrx?eT{0MP z8r24r2iLSlu*5{oPY_5qKhRG_VV)z`;6Wvy+W`lM3MGNB z3R9J;?I0k7`n`MB@;oxV52W(n(F-F^SV@cf$s=&Y!_ye06?+|x-m=IP<#=N*DvcbQ z5Nh{sWvVO=Su@{-xp{`fks0+$^uEKoIm{>3PS?`b!cE6X^HC~&ZN2BDI6Wc#27AB48a8(q zdQ$zY`~$>bB*L=p2uXXy2ZW~)jM()RDPxZWgj~$qs^g;1-F(YE91B|6CMuehl{LB< zKRLcRx8ul{=NZBKf}iZdr6VQu^m{k}HW32ou&PNTMI$W0&;My?YXaewulJSfy!uKL zcQAbSoSRQcwAB22a~qe^S9j|wzl`nzC->K~@K>f|1+7f0cC3JNp8D z`N*KC?usin>rZ=uNbj~Xu!?zVxbAY^{d}sj55JEei+;l2N5z&kgL%M-wA!+QneYQ4 z2gMi!$H)Z*`jaq$t9N2@fpLBTyZ1LhMw|n~41*f-Lt+tPSb&H9fU@xJHb7bt{D%;k zIVjZ!C@usBzu^s@W|&hyC^OS{yq<3g2SBgnih!9gQn}pAiW9-aVpOB)UI{vfcoYgSdba>}>~qX~%zaGkg8zgm(Ob4PVt?8UjgOQP`hI|Y zb8HLJ0j=4+4P6h;F7U3W?qcyC)r-F!&KF!TCWlHA4G8TLWFQP?$eI^0A>1tLCg~Dl_(||n5i#>Q5nl94#`{8StM$8-h$#G3|Y{$B{e2RqN`X2 zs(zZGDKjklXc19gR-Xf`1=au)|G#$5JgSLnkK^5}6~(6lwx|`Xf(wPoPC&sIG58Q9 zAWH-UJ4q&CB+1}R!V*EOiVKy{s%VwEuvAb)A%cPe?zmH}2!aZ-i>QDiE`Yw9jKtVy zPkraT^WHylIGMTk{(g7v{O+Bbo0;!ty8YVR%=fK7v_556koBUL+)Lc8RHIo}cNKaq z*krp&b;0Ta+sx&9s><6Wi(7$Z^;eh&m{%j3R84@!Pa_e{3H@STN(Xl{EXf3)t=(dOw*7E^bbcAI@-N;4Zi zZQ=Z%{WGT3Ow*VSHRDaG<*!KYPW~jha_ZOq_k6bb`FrFAIjQq2w&@Ur<~oSoOhIX3sz>W`)0p~so*?(SP3)=#Ye=7D(de{R_tvGwKR z-B=kdn|4&@$%?eE3#~<8;#*J9$~7x5J`D8wHpcc)fFdX+KVQ=nbZCKR02;^*@(VJi zXAO&Ur8XuoYig=h=}*x1NY-Z7Cn+YZV>iasSl6(sJ?*Lj$^$}<#a!~dtw#A__5$AF zq(d`BGaGib99Lu!Q4Lx3=OU!xzl8rz%nf&J={{L~dTYn6mKm*rMon#Vu}#Ch`oOz2 z9oCJb8~odPZUoB$D~oMP zc0Rk?+ZlT#2Cf4ymWi?0UlDjdsJeb|UvXd0Gp9#YUA)JyoIf(TW8r1i$+*Ix*yq}> z9c5}(VcKnd!=Z4}GtOH1T1v)rUmAm!PgU4mV(w!|Maf-zpJeY}W3!4lHGN|4$kdU> zBM)ZZ_U{OG8jp?)7~zobDw$WgEj!Gt(mKb<#LE24dPnm__3$6$2``U#3oEG-VX6{4jr3u2bQ=2_b*PPj0=2=#K=E1s@gvhl}($1D6;WkB($qbXlqgqDw zk4l=yRu?M#ZQcs{1TKNgDrD$+*E2^;`4X2 zevcE=D=e&!h6h0YVZNxC#IG*1pjig-g83J*QLarqo23AytKqh>dr64U+(V9%NZY(GIWyBvq7hB zN)KO`&XKjsJL=$4;VU&S{{G1@t~IxrYmpFermB?IzqcVfetX{ZUiWX^hd(@dzv01{ zng_YPp?eglXWNbObxseEZp}$e)zb8y5ZgumAqSIkd4*WU$;>q3Sj)@c-kH8-{MtSE z{>3@|duzR&BA3U0*SV|l#O5*0+tRi@T~oN`VWq0#P!(F0Y*99@oZXl2oL~AhtgI^0 z*vOb`Y?0KdUO&!wM$NU*>o;y>RQUcr%zkJ}rkXqc=<~#O`|Y-?Z3}YpE7Kb96m~v4 zh@~I<+Oa-1t#?wR=wf7Go!?>a8l#)3J@jCxu~E>b#F{#;92Fx_6M<}V`VcJ^j)dMs_T}Wy64(DuRSIB(~E=4hdT}nDQs4? zKaw4|S=F(xm`*XJDI(c;RFhrr!={pqrBH|D*3P)xwh1rb`iXnyG$$pGuj$y$wefjf-nb31HG+Zg_>)S2ge-9Z(mC^c-1Q8Q0U5(q#Xbh{u}i#>bwGs_J#V z-+wx$W$^tq2}vr8qt4NBs=^EiPC}4Ft@cNy9gXWkF zXpYHRnq%^x(HxWiLz;tWM*}Peg=)#7+fnHb47!7s<)Bg^@K2%JQfXS2gGm|C25Di5 z{|3qN*C@nmF6A!~2%S<%Z~Q(YZ3B<@Qu!O$13CY@Mc|--gh{lBE*);(8|T>OscIw zED+c-5wVc%K(nW?Vdg;Vfd3oC_-lU#oeDcJX>41DfC6OfRDr!MEE2P9MIr`^MHRwg zghJK%>)8hY7`Hv=+E|jSs!eOZlW^P(;FCJRw5O|b}(2@nt<(IOxl!|aF%iHelN z4?q!QY5<0K8-pOr-X?&^y#J4we&FhT*H$;W-&OFx%KOlO4-xnffe#V*5P=U7_z;0V zi0dauY8UuMM(_Hap;OYo7IDqyOo9G^NF%^64_`6(kXTNIX3qyWIadzk>|n72cEP#| zEOG`ifD&IPp?7wMTrs%#j_tQ3(Lp#%X#F&t8af--#y7AnER7n#zM%BE{Wiee&d*XwE% zA|gQ4!w~3qy*+4;3?_t1E#U!t*xJG+N*LFo8DKXQmWs(Eyv`&nsSY%VqZ=3yhddg` zV6bs?LkZ&OPo1M1U>q*wGH`e<>C7daxdzT$y_Xwl<3`%J8Q8e#ZMce1$df!Ak2L3z z<~##)-fIKBmkhw$Xt*>Q;*p)}N4olvu6_otetMq(vd}ja!{rbd|1vN}1yTZnNne;O zh7EiHMh&hg2-a5t>%sto%X1(MpijZY1tFv}V&DwA>Ggm_NgI^3K@DtBy$u4wASVxp zk>(g_jv1I^uMPBGA~XUp=MY8)K}k9)Nk^rDqf+k?O%{S{6a*o9AUl|RN5;SeXE0H* z3|JqM@9~0h83E=z7fh;@$q7paJKhW^kEYHic`?nB_W) zOR=ZZm@HQ|jmf0}PRMl@h2`Kp!=6c{aVQ)vg$;gHgWIZX2GwO2g*n@W%Vu(!6pnK+ zMLW?B+##Yfp!pc40QuS{_VXbhR0XC%mOXh=3+98Xksn00q*+qI6ig@p zT`w~>3a!1LT;%?#_lPx;W$Wg8$h>lgwC%H+^77-rA)_M%zPVjlGp0??%T12j^~IR6 z6Cy^hNKE`SIivHVnX;-XWUUH+CBwD7QvQRH!{W`WJ4N5woAENU zI}=`fbABzqjpoInHVxA_F1(%RJUTNV|-y(MNZpiHqx-gc^j-s)9-+{ za$gZYeUO*<`8C2qA@(kxbFN(D>6jq8e>$TfbjyxGUwy>yldONomn?wnaYYo4h>eX{ RR4SEiY-DA{<+&Og{S)T(3@ZQt literal 0 HcmV?d00001 diff --git a/examples/l3fwd/Makefile b/examples/l3fwd/Makefile new file mode 100644 index 0000000000..de462784c4 --- /dev/null +++ b/examples/l3fwd/Makefile @@ -0,0 +1,58 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = l3fwd + +# all source are stored in SRCS-y +SRCS-y := main.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# workaround for a gcc bug with noreturn attribute +# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603 +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_main.o += -Wno-return-type +endif + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c new file mode 100644 index 0000000000..3b6e5b6035 --- /dev/null +++ b/examples/l3fwd/main.c @@ -0,0 +1,1118 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#define APP_LOOKUP_EXACT_MATCH 0 +#define APP_LOOKUP_LPM 1 +#define DO_RFC_1812_CHECKS + +//#define APP_LOOKUP_METHOD APP_LOOKUP_EXACT_MATCH +#ifndef APP_LOOKUP_METHOD +#define APP_LOOKUP_METHOD APP_LOOKUP_LPM +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) +#include +#include +#include +#elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) +#include +#else +#error "APP_LOOKUP_METHOD set to incorrect value" +#endif + +#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1 + +#define MAX_PORTS 32 + +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define NB_MBUF 8192 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + +#define MAX_PKT_BURST 32 +#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */ + +#define NB_SOCKETS 8 + +#define SOCKET0 0 + +/* Configure how many packets ahead to prefetch, when reading packets */ +#define PREFETCH_OFFSET 3 + +/* + * Configurable number of RX/TX ring descriptors + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; +static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + +/* ethernet addresses of ports */ +static struct ether_addr ports_eth_addr[MAX_PORTS]; + +/* mask of enabled ports */ +static uint32_t enabled_port_mask = 0; +static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */ +static int numa_on = 1; /**< NUMA is enabled by default. */ + +struct mbuf_table { + uint16_t len; + struct rte_mbuf *m_table[MAX_PKT_BURST]; +}; + +struct lcore_rx_queue { + uint8_t port_id; + uint8_t queue_id; +} __rte_cache_aligned; + +#define MAX_RX_QUEUE_PER_LCORE 16 +#define MAX_TX_QUEUE_PER_PORT MAX_PORTS +#define MAX_RX_QUEUE_PER_PORT 128 + +#define MAX_LCORE_PARAMS 1024 +struct lcore_params { + uint8_t port_id; + uint8_t queue_id; + uint8_t lcore_id; +} __rte_cache_aligned; + +static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS]; +static struct lcore_params lcore_params_array_default[] = { + {0, 0, 2}, + {0, 1, 2}, + {0, 2, 2}, + {1, 0, 2}, + {1, 1, 2}, + {1, 2, 2}, + {2, 0, 2}, + {3, 0, 3}, + {3, 1, 3}, +}; + +static struct lcore_params * lcore_params = lcore_params_array_default; +static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) / + sizeof(lcore_params_array_default[0]); + +static struct rte_eth_conf port_conf = { + .rxmode = { + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 1, /**< IP checksum offload enabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IPV4, + }, + }, + .txmode = { + }, +}; + +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +static struct rte_mempool * pktmbuf_pool[NB_SOCKETS]; + + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) +struct ipv4_5tuple { + uint32_t ip_dst; + uint32_t ip_src; + uint16_t port_dst; + uint16_t port_src; + uint8_t proto; +} __attribute__((__packed__)); + +struct l3fwd_route { + struct ipv4_5tuple key; + uint8_t if_out; +}; + +static struct l3fwd_route l3fwd_route_array[] = { + {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0}, + {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1}, + {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2}, + {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3}, +}; + +typedef struct rte_hash lookup_struct_t; +static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS]; + +#define L3FWD_HASH_ENTRIES 1024 +struct rte_hash_parameters l3fwd_hash_params = { + .name = "l3fwd_hash_0", + .entries = L3FWD_HASH_ENTRIES, + .bucket_entries = 4, + .key_len = sizeof(struct ipv4_5tuple), + .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = SOCKET0, +}; + +#define L3FWD_NUM_ROUTES \ + (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0])) + +static uint8_t l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned; +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) +struct l3fwd_route { + uint32_t ip; + uint8_t depth; + uint8_t if_out; +}; + +static struct l3fwd_route l3fwd_route_array[] = { + {IPv4(1,1,1,0), 24, 0}, + {IPv4(2,1,1,0), 24, 1}, + {IPv4(3,1,1,0), 24, 2}, + {IPv4(4,1,1,0), 24, 3}, + {IPv4(5,1,1,0), 24, 4}, + {IPv4(6,1,1,0), 24, 5}, + {IPv4(7,1,1,0), 24, 6}, + {IPv4(8,1,1,0), 24, 7}, +}; + +#define L3FWD_NUM_ROUTES \ + (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0])) + +#define L3FWD_LPM_MAX_RULES 1024 + +typedef struct rte_lpm lookup_struct_t; +static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS]; +#endif + +struct lcore_conf { + uint16_t n_rx_queue; + struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + uint16_t tx_queue_id[MAX_PORTS]; + struct mbuf_table tx_mbufs[MAX_PORTS]; + lookup_struct_t * lookup_struct; +} __rte_cache_aligned; + +static struct lcore_conf lcore_conf[RTE_MAX_LCORE]; + +/* Send burst of packets on an output interface */ +static inline int +send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) +{ + struct rte_mbuf **m_table; + int ret; + uint16_t queueid; + + queueid = qconf->tx_queue_id[port]; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + ret = rte_eth_tx_burst(port, queueid, m_table, n); + if (unlikely(ret < n)) { + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +/* Enqueue a single packet, and send burst if queue is filled */ +static inline int +send_single_packet(struct rte_mbuf *m, uint8_t port) +{ + uint32_t lcore_id; + uint16_t len; + struct lcore_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +#ifdef DO_RFC_1812_CHECKS +static inline int +is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len) +{ + /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */ + /* + * 1. The packet length reported by the Link Layer must be large + * enough to hold the minimum length legal IP datagram (20 bytes). + */ + if (link_len < sizeof(struct ipv4_hdr)) + return -1; + + /* 2. The IP checksum must be correct. */ + /* this is checked in H/W */ + + /* + * 3. The IP version number must be 4. If the version number is not 4 + * then the packet may be another version of IP, such as IPng or + * ST-II. + */ + if (((pkt->version_ihl) >> 4) != 4) + return -3; + /* + * 4. The IP header length field must be large enough to hold the + * minimum length legal IP datagram (20 bytes = 5 words). + */ + if ((pkt->version_ihl & 0xf) < 5) + return -4; + + /* + * 5. The IP total length field must be large enough to hold the IP + * datagram header, whose length is specified in the IP header length + * field. + */ + if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr)) + return -5; + + return 0; +} +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) +static void +print_key(struct ipv4_5tuple key) +{ + printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n", + (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto); +} + +static inline uint8_t +get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct) +{ + struct ipv4_5tuple key; + struct tcp_hdr *tcp; + struct udp_hdr *udp; + int ret = 0; + + key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr); + key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr); + key.proto = ipv4_hdr->next_proto_id; + + switch (ipv4_hdr->next_proto_id) { + case IPPROTO_TCP: + tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr + + sizeof(struct ipv4_hdr)); + key.port_dst = rte_be_to_cpu_16(tcp->dst_port); + key.port_src = rte_be_to_cpu_16(tcp->src_port); + break; + + case IPPROTO_UDP: + udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr + + sizeof(struct ipv4_hdr)); + key.port_dst = rte_be_to_cpu_16(udp->dst_port); + key.port_src = rte_be_to_cpu_16(udp->src_port); + break; + + default: + key.port_dst = 0; + key.port_src = 0; + } + + /* Find destination port */ + ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key); + return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]); +} +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) +static inline uint8_t +get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct) +{ + uint8_t next_hop; + + return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct, + rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)? + next_hop : portid); +} +#endif + +static inline void +l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct) +{ + struct ether_hdr *eth_hdr; + struct ipv4_hdr *ipv4_hdr; + void *tmp; + uint8_t dst_port; + + eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); + + ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) + + sizeof(struct ether_hdr)); + +#ifdef DO_RFC_1812_CHECKS + /* Check to make sure the packet is valid (RFC1812) */ + if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) { + rte_pktmbuf_free(m); + return; + } +#endif + + dst_port = get_dst_port(ipv4_hdr, portid, l3fwd_lookup_struct); + if (dst_port >= MAX_PORTS || (enabled_port_mask & 1 << dst_port) == 0) + dst_port = portid; + + /* 00:09:c0:00:00:xx */ + tmp = ð_hdr->d_addr.addr_bytes[0]; + *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24); + +#ifdef DO_RFC_1812_CHECKS + /* Update time to live and header checksum */ + --(ipv4_hdr->time_to_live); + ++(ipv4_hdr->hdr_checksum); +#endif + + /* src addr */ + ether_addr_copy(&ports_eth_addr[dst_port], ð_hdr->s_addr); + + send_single_packet(m, dst_port); + +} + +/* main processing loop */ +static __attribute__((noreturn)) int +main_loop(__attribute__((unused)) void *dummy) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + unsigned lcore_id; + uint64_t prev_tsc = 0; + uint64_t diff_tsc, cur_tsc; + int i, j, nb_rx; + uint8_t portid, queueid; + struct lcore_conf *qconf; + + lcore_id = rte_lcore_id(); + qconf = &lcore_conf[lcore_id]; + + if (qconf->n_rx_queue == 0) { + RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id); + while(1); + } + + RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id, + portid, queueid); + } + + while (1) { + + cur_tsc = rte_rdtsc(); + + /* + * TX burst queue drain + */ + diff_tsc = cur_tsc - prev_tsc; + if (unlikely(diff_tsc > BURST_TX_DRAIN)) { + + /* + * This could be optimized (use queueid instead of + * portid), but it is not called so often + */ + for (portid = 0; portid < MAX_PORTS; portid++) { + if (qconf->tx_mbufs[portid].len == 0) + continue; + send_burst(&lcore_conf[lcore_id], + qconf->tx_mbufs[portid].len, + portid); + qconf->tx_mbufs[portid].len = 0; + } + + prev_tsc = cur_tsc; + } + + /* + * Read packet from RX queues + */ + for (i = 0; i < qconf->n_rx_queue; ++i) { + + portid = qconf->rx_queue_list[i].port_id; + queueid = qconf->rx_queue_list[i].queue_id; + nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST); + + /* Prefetch first packets */ + for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) { + rte_prefetch0(rte_pktmbuf_mtod( + pkts_burst[j], void *)); + } + + /* Prefetch and forward already prefetched packets */ + for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) { + rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[ + j + PREFETCH_OFFSET], void *)); + l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct); + } + + /* Forward remaining prefetched packets */ + for (; j < nb_rx; j++) { + l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct); + } + } + } +} + +static int +check_lcore_params(void) +{ + uint8_t queue, lcore; + uint16_t i; + int socketid; + + for (i = 0; i < nb_lcore_params; ++i) { + queue = lcore_params[i].queue_id; + if (queue >= MAX_RX_QUEUE_PER_PORT) { + printf("invalid queue number: %hhu\n", queue); + return -1; + } + lcore = lcore_params[i].lcore_id; + if (!rte_lcore_is_enabled(lcore)) { + printf("error: lcore %hhu is not enabled in lcore mask\n", lcore); + return -1; + } + if ((socketid = rte_lcore_to_socket_id(lcore) != 0) && + (numa_on == 0)) { + printf("warning: lcore %hhu is on socket %d with numa off \n", + lcore, socketid); + } + } + return 0; +} + +static int +check_port_config(const unsigned nb_ports) +{ + unsigned portid; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + portid = lcore_params[i].port_id; + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("port %u is not enabled in port mask\n", portid); + return -1; + } + if (portid >= nb_ports) { + printf("port %u is not present on the board\n", portid); + return -1; + } + } + return 0; +} + +static uint8_t +get_port_n_rx_queues(const uint8_t port) +{ + int queue = -1; + uint16_t i; + + for (i = 0; i < nb_lcore_params; ++i) { + if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue) + queue = lcore_params[i].queue_id; + } + return (uint8_t)(++queue); +} + +static int +init_lcore_rx_queues(void) +{ + uint16_t i, nb_rx_queue; + uint8_t lcore; + + for (i = 0; i < nb_lcore_params; ++i) { + lcore = lcore_params[i].lcore_id; + nb_rx_queue = lcore_conf[lcore].n_rx_queue; + if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) { + printf("error: too many queues (%u) for lcore: %u\n", + (unsigned)nb_rx_queue + 1, (unsigned)lcore); + return -1; + } else { + lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id = + lcore_params[i].port_id; + lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id = + lcore_params[i].queue_id; + lcore_conf[lcore].n_rx_queue++; + } + } + return 0; +} + +/* display usage */ +static void +print_usage(const char *prgname) +{ + printf ("%s [EAL options] -- -p PORTMASK -P" + " [--config (port,queue,lcore)[,(port,queue,lcore]]\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " --config (port,queue,lcore): rx queues configuration\n" + " --no-numa: optional, disable numa awareness\n", + prgname); +} + +static int +parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if (pm == 0) + return -1; + + return pm; +} + +static int +parse_config(const char *q_arg) +{ + char s[256]; + const char *p, *p0 = q_arg; + char *end; + enum fieldnames { + FLD_PORT = 0, + FLD_QUEUE, + FLD_LCORE, + _NUM_FLD + }; + unsigned long int_fld[_NUM_FLD]; + char *str_fld[_NUM_FLD]; + int i; + unsigned size; + + nb_lcore_params = 0; + + while ((p = strchr(p0,'(')) != NULL) { + ++p; + if((p0 = strchr(p,')')) == NULL) + return -1; + + size = p0 - p; + if(size >= sizeof(s)) + return -1; + + rte_snprintf(s, sizeof(s), "%.*s", size, p); + if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD) + return -1; + for (i = 0; i < _NUM_FLD; i++){ + errno = 0; + int_fld[i] = strtoul(str_fld[i], &end, 0); + if (errno != 0 || end == str_fld[i] || int_fld[i] > 255) + return -1; + } + if (nb_lcore_params >= MAX_LCORE_PARAMS) { + printf("exceeded max number of lcore params: %hu\n", + nb_lcore_params); + return -1; + } + lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT]; + lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE]; + lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE]; + ++nb_lcore_params; + } + lcore_params = lcore_params_array; + return 0; +} + +/* Parse the argument given in the command line of the application */ +static int +parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {"config", 1, 0, 0}, + {"no-numa", 0, 0, 0}, + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:P", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* portmask */ + case 'p': + enabled_port_mask = parse_portmask(optarg); + if (enabled_port_mask == 0) { + printf("invalid portmask\n"); + print_usage(prgname); + return -1; + } + break; + case 'P': + printf("Promiscuous mode selected\n"); + promiscuous_on = 1; + break; + + /* long options */ + case 0: + if (!strcmp(lgopts[option_index].name, "config")) { + ret = parse_config(optarg); + if (ret) { + printf("invalid config\n"); + print_usage(prgname); + return -1; + } + } + + if (!strcmp(lgopts[option_index].name, "no-numa")) { + printf("numa is disabled \n"); + numa_on = 0; + } + break; + + default: + print_usage(prgname); + return -1; + } + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +static void +print_ethaddr(const char *name, const struct ether_addr *eth_addr) +{ + printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name, + eth_addr->addr_bytes[0], + eth_addr->addr_bytes[1], + eth_addr->addr_bytes[2], + eth_addr->addr_bytes[3], + eth_addr->addr_bytes[4], + eth_addr->addr_bytes[5]); +} + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) +static void +setup_hash(int socketid) +{ + unsigned i; + int ret; + char s[64]; + + /* create hashes */ + rte_snprintf(s, sizeof(s), "l3fwd_hash_%d", socketid); + l3fwd_hash_params.name = s; + l3fwd_hash_params.socket_id = socketid; + l3fwd_lookup_struct[socketid] = rte_hash_create(&l3fwd_hash_params); + if (l3fwd_lookup_struct[socketid] == NULL) + rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on " + "socket %d\n", socketid); + + /* populate the hash */ + for (i = 0; i < L3FWD_NUM_ROUTES; i++) { + ret = rte_hash_add_key (l3fwd_lookup_struct[socketid], + (void *) &l3fwd_route_array[i].key); + if (ret < 0) { + rte_exit(EXIT_FAILURE, "Unable to add entry %u to the" + "l3fwd hash on socket %d\n", i, socketid); + } + l3fwd_out_if[ret] = l3fwd_route_array[i].if_out; + printf("Hash: Adding key\n"); + print_key(l3fwd_route_array[i].key); + } +} +#endif + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) +static void +setup_lpm(int socketid) +{ + unsigned i; + int ret; + char s[64]; + + /* create the LPM table */ + rte_snprintf(s, sizeof(s), "L3FWD_LPM_%d", socketid); + l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid, + L3FWD_LPM_MAX_RULES, RTE_LPM_MEMZONE); + if (l3fwd_lookup_struct[socketid] == NULL) + rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table" + " on socket %d\n", socketid); + + /* populate the LPM table */ + for (i = 0; i < L3FWD_NUM_ROUTES; i++) { + ret = rte_lpm_add(l3fwd_lookup_struct[socketid], + l3fwd_route_array[i].ip, + l3fwd_route_array[i].depth, + l3fwd_route_array[i].if_out); + + if (ret < 0) { + rte_exit(EXIT_FAILURE, "Unable to add entry %u to the " + "l3fwd LPM table on socket %d\n", + i, socketid); + } + + printf("LPM: Adding route 0x%08x / %d (%d)\n", + (unsigned)l3fwd_route_array[i].ip, + l3fwd_route_array[i].depth, + l3fwd_route_array[i].if_out); + } +} +#endif + +static int +init_mem(void) +{ + struct lcore_conf *qconf; + int socketid; + unsigned lcore_id; + char s[64]; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socketid = rte_lcore_to_socket_id(lcore_id); + else + socketid = 0; + + if (socketid >= NB_SOCKETS) { + rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n", + socketid, lcore_id, NB_SOCKETS); + } + if (pktmbuf_pool[socketid] == NULL) { + rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid); + pktmbuf_pool[socketid] = + rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + socketid, 0); + if (pktmbuf_pool[socketid] == NULL) + rte_exit(EXIT_FAILURE, + "Cannot init mbuf pool on socket %d\n", socketid); + else + printf("Allocated mbuf pool on socket %d\n", socketid); + +#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM) + setup_lpm(socketid); +#else + setup_hash(socketid); +#endif + } + qconf = &lcore_conf[lcore_id]; + qconf->lookup_struct = l3fwd_lookup_struct[socketid]; + } + return 0; +} + +int +MAIN(int argc, char **argv) +{ + struct lcore_conf *qconf; + struct rte_eth_link link; + int ret; + unsigned nb_ports; + uint16_t queueid; + unsigned lcore_id; + uint32_t n_tx_queue, nb_lcores; + uint8_t portid, nb_rx_queue, queue, socketid; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n"); + + if (check_lcore_params() < 0) + rte_exit(EXIT_FAILURE, "check_lcore_params failed\n"); + + ret = init_lcore_rx_queues(); + if (ret < 0) + rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n"); + + ret = init_mem(); + if (ret < 0) + rte_exit(EXIT_FAILURE, "init_mem failed\n"); + + /* init driver */ +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n"); +#endif + + if (rte_eal_pci_probe() < 0) + rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); + + nb_ports = rte_eth_dev_count(); + if (nb_ports > MAX_PORTS) + nb_ports = MAX_PORTS; + + if (check_port_config(nb_ports) < 0) + rte_exit(EXIT_FAILURE, "check_port_config failed\n"); + + nb_lcores = rte_lcore_count(); + + /* initialize all ports */ + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((enabled_port_mask & (1 << portid)) == 0) { + printf("\nSkipping disabled port %d\n", portid); + continue; + } + + /* init port */ + printf("Initializing port %d ... ", portid ); + fflush(stdout); + + nb_rx_queue = get_port_n_rx_queues(portid); + n_tx_queue = nb_lcores; + if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) + n_tx_queue = MAX_TX_QUEUE_PER_PORT; + printf("Creating queues: nb_rxq=%d nb_txq=%u... ", + nb_rx_queue, (unsigned)n_tx_queue ); + ret = rte_eth_dev_configure(portid, nb_rx_queue, + (uint16_t)n_tx_queue, &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", + ret, portid); + + rte_eth_macaddr_get(portid, &ports_eth_addr[portid]); + print_ethaddr(" Address:", &ports_eth_addr[portid]); + printf(", "); + + + /* init one TX queue per couple (lcore,port) */ + queueid = 0; + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + + if (numa_on) + socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socketid = 0; + + printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); + fflush(stdout); + ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, + socketid, &tx_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " + "port=%d\n", ret, portid); + + qconf = &lcore_conf[lcore_id]; + qconf->tx_queue_id[portid] = queueid; + queueid++; + } + printf("\n"); + } + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (rte_lcore_is_enabled(lcore_id) == 0) + continue; + qconf = &lcore_conf[lcore_id]; + printf("\nInitializing rx queues on lcore %u ... ", lcore_id ); + fflush(stdout); + /* init RX queues */ + for(queue = 0; queue < qconf->n_rx_queue; ++queue) { + portid = qconf->rx_queue_list[queue].port_id; + queueid = qconf->rx_queue_list[queue].queue_id; + + if (numa_on) + socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id); + else + socketid = 0; + + printf("rxq=%d,%d,%d ", portid, queueid, socketid); + fflush(stdout); + + ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, + socketid, &rx_conf, pktmbuf_pool[socketid]); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," + "port=%d\n", ret, portid); + } + } + + printf("\n"); + + /* start ports */ + for (portid = 0; portid < nb_ports; portid++) { + if ((enabled_port_mask & (1 << portid)) == 0) { + continue; + } + /* Start device */ + ret = rte_eth_dev_start(portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n", + ret, portid); + + printf("done: Port %d ", portid); + + /* get link status */ + rte_eth_link_get(portid, &link); + if (link.link_status) { + printf(" Link Up - speed %u Mbps - %s\n", + (unsigned) link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + } else { + printf(" Link Down\n"); + } + /* + * If enabled, put device in promiscuous mode. + * This allows IO forwarding mode to forward packets + * to itself through 2 cross-connected ports of the + * target machine. + */ + if (promiscuous_on) + rte_eth_promiscuous_enable(portid); + } + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/examples/l3fwd/main.h b/examples/l3fwd/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/l3fwd/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/link_status_interrupt/495672_Link_Status_Interrupt_Sample_App_Guide_Rev1.0.pdf b/examples/link_status_interrupt/495672_Link_Status_Interrupt_Sample_App_Guide_Rev1.0.pdf new file mode 100644 index 0000000000000000000000000000000000000000..db4681cd4541e27bcf4433fc43e00f579a14bd62 GIT binary patch literal 70985 zcmb5VQZjIvAv1u zpIc7OE(Dwm^fIP)<}MZl9ITA=as-Tw^eUbXru6bgR?056e0PNs&ouuv8|&1#!= z$czY|sQf`9n!EH$sui#x2^1TesT+OfX5ErmI}*_2jb=P}vm<=w z$fTr)GLj=9;02lWXvKjgfR>p{6u66ECFViYE)X&j#F@4tM>5ITnLZuI%&$*rk|DWRqCesS~>RRm5PRWACzK^9FHw!=PV{2 z`p)gd9Ti~B9%{nNDtLo~UoB6>HDq5=z}XkH1QUy8FI;ozvD{y%k5y!ii2BI3(%%xP z28Y79UMuD1s_SoD6U|IK^c<}uyeY@sn|1baWRZ_nag{|IqlJmhp-Nx;FtzJCAFD~K zAnwL{&@A)r%V=#fb5!%SM|+R%)vOznT|J%myj1hu4k2aOclM17j&|s*r1v+YF+jl4&m z=?LbMA;v%p>No&465}{!J(3Nkf0GefMvN&FNlfGHfcLluEKv{bT$QKPcbouDVR1wN zOn~(!woNrs9K6#E)bW3w!7%hx# z*Us^2t(AdFizBYZ(Sb|5gVTJ!j^DnIw;$&4WO_s)sXvbJO%!I)aSL#K!S)Mn?mQag z3F7vs`#WPs(5h4_qgc*HqkL@BlDO%KhQ z*8$AoB)WOr_{C6Yd!S>XAXKJ&z9clTuxBxf>I7CG(Eq4MB2Mh?ZgmsWN+e;g+ zJ3XDoC&~eShNuHg-vy<%>9N zO$)k(Sp;*5dv@3YGP~{U*)*h!9}>Qh>y`@WV1wtaqeR}KBIV~L>D*1UzQPEc+kj|L z93Mq7j}nT<15owMEPEuNiwz+PoXbPxQVgNxV+h5EnG7KPuDNMl(VG3Cq6GMQlJ5WU;yyULxK6XFgX7S45oj>U}9qUZ!l=$2CRk{5Jdb%$DRsPQAXUP z5lC#2p?E>0IWh5d#7RoyOj2OwzI&^ZhWcUdGjw$s23#hN3Jt1It0mFk9?e%4F&(`? zGfp$%0YAszJpzmR;K~3F1NdA$q;vCb?`&XfjJV#GAh1wqmwLywm_p-^p?*N`CpH_^4-=AqY{Kr-@_yNHHNXcdV|BgF0hJU~v zGv~j#V`pdj!yW5?N`Y?G3%ex_6hEMy0e-=*W9Mv+Oo{S^sks6jEsfGmqK1#(B-b() zMrEzQwDR#+FtoT7)Pz@5T(`F!V_!3pLgXPyA!#A$B$5>Sn)|CH5oEcBB>SYq^0|AD z*iLj>)V$6jNHk@`nSw^1BEO6$9h|nMadbM&rOC5HuEj4teQ&ss)##d?GWs&)%$xX6 zycl9xI22NAv0Fwpi%KWi*xyBstF!Dw2q}es`%u}M{@AfZjxIi;)r+i=9OOtFnhXUJ zLY>f%mho6wvNPU2WqApCru)?K!vB2va=-Xaa&!LB_w*lsCEU4RQd8ngYC>|J$DcIo zaSS={6$bByVmgCYH$f4DLzNDcYr)1X$4RaCGW8aztK zcZh<2J2;lM!m3w-MT$?@EZrX26N0YMY-B4rX)Hcv_ICb>mzBUf`Z?`sCiEI!uEkO# zBdIlf4HaZOKe3(Wdo8l5qTFIfTA;zTj*FBla>ByEUgN&m;L#>3Ga|#c@Z#$|v?HDF4J4UUOT|<^b%5~{aM^a5ZKXIP* zK@}HS?@x#59Pu`ZZ9(M(`kchtjXjO5nfPP^u1%xF1#XR}oJLSbN&K>P`M~bJZ`A-m zfJ4pe-+!wy#($tOMt1gpi;R_#<*x&ZL-^?X~NgL6^^%|RK5W=%!0=-@hh%neRRMc5($uCzJ(wJN4a5rt{@GE z>&RIVJ!toGYIuJq6oRAx5s6As!qEUVs4nGvvDhW%GZ=b!)p+r# zr0X)F5x zz>Czc)P7;IH%4Pw4=Al(o^qztTfr&&Zb#1f_>hj=9ZF+Z)em_pNW~ooCmrS4t@LK4 z%%S{Dl^|Tv>D2om9Wf%sm|r!3Q72^?`*UgQJKu3+PRcU!o$|u;fkpRzI;O0TMvq6s zJ$*aZI=#Buy?HvOBi=EU?P?xOhapuz=#@k16-g zhyUY!v#az%|GvTY8;SuyFd&%zZ*z?4pX3V&ZG z$=Ld}-upHcO(3a81{K#<3Sn#>0tKBCF{*Do>`>GOUiVq=8%=1SYG8Q--hAcy&D*i= z-cnZ-)yT-3#g?tdVFdz6%IjCgX;Tu(DQ*RmOfcE;Vjxmn;U69!Jn41P2lVzox$&`h zX{6~0Kbo*x%tr%OWiS;c83h9d8C3k;`H$nfM}Jz%*|uXP6rblALwvldy=lmmWbzrY zc4ki2Y}i)N(U*X6RdatIh6h@$*i}<~Ut4z#UtQOqaF#ON4fP%hccxN?*#9XL)X}t& z{+2qHe?T4UznNqF?Iu(rJyI8)QLwfhOpjiKZfnxfr`u&f6!yiyk0e{NIx<>X0 z>~8|grL`zYax|i|;Rw6^+ii)71+6p@(z?shC-kl=J*77+RDW z0|Z&WnHi<~i=$|-;17btX3l4|sXMgAW-`9BSCwOQ0EtFpZ||}kS)v;j2Rp;jO7#AO zYdM+1^KX&;6Bpy3hl}yAaDnsoV3Eic$-YQfb1BN&J=tCQePt%weKTI#dl% zGDD5+eh?^n-5dX`ciTFn!_>#yhn}GcOmq@|89NH%x5>!`j{5ln?XD}@7k>R z<5~?mD57K~ACEZ=9Y!SMKnYAp(*c6yiG_UuB$l}qpDj*7yBsIM*WEL3xHG%%K{MVN zpG#KvuL02t^*?nFHtv7W*FWmZ{y1p=GSk@qKQoQvKb^S6s+)F;0tnmk>tkX;W;ah# zs-!6n$e=>Mfa}OuM~GyJHYr2n&&h$z$I?mStnC`JbG9#MIGN4OYFbW$tZmyNgO90N zF>p)qjE8HWE*RbAj2?KRh-({Ge=@3EG z?6%pX_wWazUc=O$>wuxCn%A3C#BJsZg<^_x!Da>7l2%(n|3xEjFS#CEPRnfyv?jlf zYZxxUy%wI~wp82{Y!r_x&5_$b51YI9QYkBsYBy7x7)2I4f|!kjHRy&=$q~aB7QN>J zXAa$sdG-+T@Kcbm8nswh_z3A@fKDJ^$v*5AXtJco#N<*1*;}}TedsG8vx1pNyqubc z@WXma`svlZ5*W&w*mGQ98D=Mmh|`ZbM*sOj z>v|u;GGyJgY2(JMWp~<00LWQFa0R6j4d&PZlo3t1U{ZJ66*$S&RKB9x?Qyd-ZdlLQ zPt>*VtipPKL*=PeF-OW-=O|9<8Q5gs#W zR$I)~=kwP!#x{>btQ>^<1m6m*jxO-3fkMKLu#L!4V*{{Fp)uPJD}uVZA0PJj2;S>N zUEztY@^~oqdc+&F?;@)!ShIJtw`W;*sQ-t8rj5V&yyk3E3^FcK&N!Fm`91WKTjpA3O5 z0T@Kp^5Y0NaNC|ZX)HANsdrMq=mf)strV!Pq8;TZk5xOJ_bfZ|ho`P15_*?KJ+AR* zk3HY6&lP!%?<1@+Qzi8EzB#E8ra5lg(zxK6rw3aQivrHN+OhnD2jjmJeaP< zv|EW8`~LCD2y+EvJ5u($8C%2MJ`14M%7_=;7N=uozRfx1N0MvRYdybPscxJGtLH0k zvja@&jN%68mv2xnC2T3?O}a@!K+-}9(O9550uaGfnSf9a2Cenqoc82iXSOC^2TDXxWYzeY_PsfDbea(* zg)v0@NVq~aqX2v~>m?9cY(LY>0oL>o3!_Dk^nX(EPKlP^#l)Pb=Y(BZ(n zR>QOabuzh8Zcyt|9n`@E0^a1@6};}*E-)=fmK%nXA*>F;gS+-qxe&oNR|=XxlXZN_ zq*PzwI1`NcE`xZI2P`mOga{-A8#n_wiVci`e92%~*lg4YfY}NcC`0iFLVxurtAX0^ zwAXZ4W8}*O*cegwRM><6-Nbzz-VRUC0VvGS&oi)GKl87z+eP_?gQxw~aCiB-+@6G? zCi(-42t-u>m)lqN=RQ1n51GX`b9$m-C#1!|XJaJP!aNaf5e|V{Xh7626AMQbYKmZ7 zGAxQfIc4Aq-~}+I+D0@YUp-;c0cl$6a@ zwjgvg2WFgF!I~UR!lraIupnGCub>G*Rk0@wLBd;)11BBEKI17$?0DRa##V&xg|P2% zQ_rpL6u}pg@vtK#AC?4S6xH?Q{ZMl8?{lPmivoKhHQ@st+t1NnvkI-2hG@xytQqQ` zN`z~qBg_^&sC31MI2k({y-(gQFEBL%T`>(nOrCG~jy0 zd_nxAk^r;{d?U&nOsYddNdOuJo(W|>D3Q7z0Y-J83i)bZilZG&qTUji2>yZIjer=R z`aI@T`x(>fJZQGsC>jk=-Kkb*K;mW#FmfZ`>LRGf_Joe2dA%IU62Al;q8uq=reGo-vlpoA^p%rERvja8y> z*B@{woeP{6m(q@3Vnh2+5-2l@MkHzmV`u0HgU!5aS2+;`?%c_KgRXlQJG&+OL!|OUJUi3Ywjl2a@!(``4<-qgVIcZw3PjVMv~@ z*QcvO?w$%VQhtvB7h&rU9$M{1toUT~8#a1|2_f59f>bA2D-4A_V`LcK*ZtA0`uZ{H->yQ17AQxM~-q!xQW%-XWK?kApchPsadMfYJ zS$KVJtFuXCR9@M8@5$J?@)@<~r3U||mLp8z>lNSSH$znYVb}LG@AZ}AoK_?59O5en zvDY$0o%0`gG)8TXzVtBL<3puhR%=-s@I6nf$}&5P6Jaw(9`+LJ*s)Jwkn6Z4odV7j z-c}cS1D)(iXFDRL-oTmHQ^uX_ac4UXzpfcpP=>)DWWQcnmr9jCZpz{n|4)*_4%{8k_#$hwUd_eg#=!nqv6O?rm zl+ldCgLNE#S2!iaXd5Gr18_D|x@VF$N>pyz5mMR7`V15wg`6>2hGEdPQLb=ZB4O9V zfy-(j!*g^-yKewM(%B<*pHe0^>l!VJP)kuQXNV{z$==g0GInPR* z!8(ej^C76iV2)yx#)SDAPhdn8RmhC4%8>b8tXaGH!Dq(Qv1-QjlvIaRCvFny3-?yK z2n=F92#iXts?eaJgUMz?j}hG-!q8BtvoT7YW@;E9gWJr&AWs!Y0G?|poPjv+4RL1# zU*k^2IuW~CFLecX#CAtoR}hF$q$P}SXC(h{XCMc@Fu0bZXK)`r0G`U}glB*sx`r45 zTe?H6CV2|dVM9YV$aN79v?T%#Cz6QfG0?L>#y8MI@e1;UrOPj5Vz4WMu{l_n0llWG zVjnO~@oo%Fiq@7V#4Ejmv7vK=MU!49)2Xjjxm*D#=;Nj)NU9rEI%He4vuPgr(Xf~y zX-pISXi!IM6`PTlm&fO4ZSg2CpV#Lj>Ytyg;`ia>`TDg#)E6*eN25Fcbpv4ET~amk z9KgOanM(Wwm(Xp#RBtR-qwruEcv7t(V-;w00*x}Ho$prD-~Myeak3gXYT_K#j9e|- znnU1vKmfzvV8{Q{SMm(2jdWrp)FKjedIC&bTNx=-In&ovO+W zKewyjXiH#iQ$`z`n46VsQI)ZURz;N?xPBh46#uuDK&;E!;=3=HnSiagh6 z_7(|kJTxzbtB5SiUwZ?- zla^hr`-<9nyB*4p^vt>CPiKvn8VTo#DQS5r8p@St=_y&zsCJSw@N}}& zzlPc20Fc5A74S-S=Hb<=#H?jBEhNmGio2}zocy2`P;+3)`MiP%$pUxnvOo|F0DgW9 z%6|jpzn=yF@<-TMm^uIVa#4)-rW4W6RNqzgqpT_%C05tEJM7qBXm~9NpibJ1jPMcB zA@D$QfMw!euIyiETgxL;O-p75`L9xUNw;4kL;?6_V;@!go}2@wTEWqjZ(O z$&l+8qp2n@e*BbEp zI2*YTjf%CDd$W#sz9I@OUYIiFOi=^0$WEaHj;}~eFO8Ea7V7l}Olv|JugBdowL}p% zKBA>F1w{j753@=_3X*m}b2?k9kWMNG;u4TxO7zql*+LQaRnXFdkZ!#e(Ws>h;0N4g zj0-wy9;HinDU<`vHs6vl4Dsh`fj{p|?qaGc1YzRX5QoyHxNX0SSD(`W6q46VSj9jC zz!Z8C$RJ~l`yGjOK7Q*Qf(imoo0d6w3!{(N_7VlN$E~QEUPd24F$zBbBl<9PtzEp&~%qwDn|veq+b4WkRG6oNJ?7?O#wY3 z7V7OY+!Tyn)w=v5z6zu2q5aV(Q#6w+vHF)F0?&-9$Mq6sci&ySGH^LHd@j&yCeZ;$ z_%spMfLqk1$bE`ANMk>~!a%)M6n#P6nUPoGKpIuwlqx3+?=VffWdT4bI)XLL-MI>i zC?%a^>Y;p$a!aRcT0#v-m??Awii2`r@m+;2M4^>BfgI0fngGyq-QJX`l!fDQURggB zmW!e|rVhZ%e*PQGp?s=RZCqgxgdqj9k@EVAfkW00bZ_qF_WQI^aMn1G%t!)z@ zHij;Q&-$Nj1i*a-TawapC2c@j$WZsxs{;u{bqIMmc>>`i@K<1~4fVD3Gd^D{$$cTh zJc9?91hP3uXyX&_u0K`>fv4Vn=IDfZ28(KhMR@;kSu;xB`O$E43RWst=w6r%sTKqD zT(r>2KbK`>d4hgL^msBNc4jIk`Z~)e#Q&UMQbEbxK7(xNhLFQ;I3`?_FWvG>0 zz!Wvnup>sX+8NQ*&T)%_*6|60+z2*HP|8W{@j`@UW*Je_@H6Du17i*HNX%TmV<4@( z3Ey=}*|M6I5ikt5)FgJ@e7#}K;rEP{V4m9Z6>~+u)E51NgO_ZQvx!;~`FQ=wgO_U8 z_+8b+xIGzg#w`I|@htcwfj7){t^mGce zXa^a00JueAa@!ea=f^<%7;iYt-|_xBAKnZaH^Lzg26#-iAXUHlNBu=?eo_Y#z@why zs=FYB2zCSv-0t-a-Rkq_S1PMj;bAin&c=uq<1RS_&FIQMA-nhes2(DcLbZ-shaJ{^2z#hG)Nj!kGvn|Ux}8btuy~o?L!1+8qRoWn$h`r3%N6xzug^UPHiiP2gGv9Cw!|zolVKdmpv6c~LdikzJCn}~ z72#1I&?bAA6W+df?UXj9&Or_BC>x;)K;mN3`6dDH;=)Ks7=BV*f25-0||@ z{YPTGBSHFM_H9NeQA9f1B_o4)DS4TSxxmF5^ZSLoY@(A780eX=yYTNNY z27ii=H^N*F)6RHn3e9d|1fNqw-4KenLt}ddxq@5=>S-%a!^G&@kBUKF4*MRrQS}2} z+FMSDIMQOSUNg6$4k%|}jNZn!!HZPKr3;N=OB~30iBMd7twDQ!C9Y_J?Y%~0$` zxF{p~pFQ4;)p9#MN1X$TqySDmW2sS7fV*Rb$e|y8*=C>*-1KWPI2kj*F_sRn*AJoV z8ubzHIsmYK@U(Z~#ehGRAGK&L&w~-SS}r#!YaU_w5rWsRDn6?fiSPPiB3a@_WB}Se ztQFy_4ZsG24nFlL$RH|TG^XEz zDnSNp3aaz_!bO@H1b`W*J|(~V1v3aI7;7Msn3_C(Z$0a{fKmX7je82fP)hkw4sXwc zf#tSe42-wevu`6ffbU8~r~^q729UR~S!m1NeHGAePWC(h;)6i$y^MTz`^_uk1c1oW zDr@Q;G|mh~T|}yL2QAYk>VC1VuIR6tr7Vo`-5Wc($fjgn4}6e81+qzyQCJeX4_}S& z{Ul5VUef68{%QHLhlhGE@i6B;L|+qznO|T8HeIC?1Rwm68nNF%ZVEZ%EK`FFN+c{l z&!Yz~(++h^4VV&Bus!QTQ|@4g6O)%Mj~-ayh@fAg7@~lG>G~{}2+I;MH%VQzv5s~O zmqh*I#={}*N0v=r$^=*%aDy>D;s~trW5Tea2aF0Kxzy>D z6QUztPHh5Ctm%D!T%#R?;buSQK4IGu^|C75zS&R9iJqhyJBT76{1NX+83l_Nlao)H zmZLp1f;5S-sd$)$1wmmI-8)5tG|5QfLfXa%qL>wfWBb-5^n`ba9aItFK4U)VLD-(j z;!T6HR1IIBX>cAG!y)3FQU!3n75p4gpEP_YKurX7jA6fTqt#Pv6>+kZ;cV1upA}rI175wRhpOwre{|cmy!Y zE81OYlRCkl0`7Dm*^6dAY)PXx9A^;Vr%4xUz$FY!WcWTjwkZD+nqKPVSylen`^fN9 zLF&Mp0n2P^=(N!HwfVITSS`|rCGs*DlE*Pn;1*m z1ACXkD+YFEgPN0O@<{{_1nM_phQ^wP9c(a7riVXm0Qt7De?Qls4QjfT#9s45BM=Fo7 zbz)j67?mzJr&&#crICX%NFS~3Uw2T#;dxaqWL=lvToNxL$^8mblsGjvq?Mz?l!B&N zapDXfyD7#=ug(Qa!uW+xQ3m_8=KpqpXyGKAdEbA&=n1N11quY!D~YyBqD@6w9#9+* z{M48XRQs{!Ql#%8(6f}RKt#x%_M-jHtYs`91arQ{1$K|?#rdFN*TUM8F`Fz{;8(W^~Ms=fQcpzPYI0sDEL>K%RGSS8@ z!{BwsMgOzff`5PoBR(5-Ov$8Vz+4-y7JU1jtCPPoB!Bvq|G60iIhwm7Z}!}3J(%ar z+CLcF)r8y^U{+5W!N}yTkoG%0&=n{Q=kD5~oQ|}_$+eu=U-{2H66-})L#E2w+b6>gdW;2G_qxPo6k+@M9#DV?k zd_U{P9VGbnc)+Db{R|GK3PR4qMVH&cU+(^s(R;~E!&0-d(_5=+ODcA*CE(9MLLi35 z5{A__ZglTy!~RuFiG`GjKQKXyC^Jm*v~62b)K1mas;Jfi(RfPeQKr_>%?7>qUTPGvr0B$yHvaWLR1kL5hhZ|opOH076gliVr-oEY$? z-ABiM5lCD^YNCSSilJ(WwT6m*3dM22aPDTk%rHJFsTUDS_x;@Wsxzj%UT*a8MHP#GP5Wf{?xvCBHREy}H>u&n)4^g`&@tKW)Lin8FMDd^;91eV4PvaDj~2JwWvZSKKSK=iW9(xiN_po~pns@LUN zz+@2jvx7mn>U^%n=VO_qaZQ@!X!z3|-u*}BN zhqf*-cVlRTw{%%6ueV7;T0@@W=f))vJ7f!y-S5lvKu&gU?7lhG%e8tZ+*KXyc5N*; z%DDYB=z|h;k6%if+Y(=`L)_cpa)xH9&#miI3gOh_nSq`6sp*;wEXa7O4T~9PN$WA7H<1-Zoit%&}lRp3vuN5pZb0E zBt~b3ikeSm83($zWfTFMyl{?Z=O&-#ejQ4P%b?Ua`J+)ZNiA#+V3}>OrBXXAKu!X zWp!?8+IDN&q0M{bGwaZ*Y`EFoj#rlhUTeK%6N<-ukrmHvwNhzE3Ab9MQJBj+u4%Wy z9q?*CGwacyWrdM{Ze}_Q^_=D09>+bw=02=JF-5CXYKKq%nOd9n^~f^?6z!QTwjuRi zpWJZc^l4OJFrVFzhAK786%yk)z2#Ubh(vaQ?!^~C6t+>SezRYu>;I$e&<)z;o*fcZ z6+x07m`E=)qGLKS{UVgZ1~#?i+}x&e*MupbIb+L=LA2hj&TXc3+7xMQ0>=1$CQPYo z=w6?-e##Xs1>9S~ZaE~@O6pLm{6!E}NhW6Vt1DJR3P*2qzsE4if{JkVfb*>|nPYXQ zAj7SWU2UZ1%J3${I`U*3mTOeX=C!SZbsIS=V9B-UXJP9v#>3gj_^1c;?=L^}xQfU^&W_4Vb}i8Lyx0(3?MoV}Pa=xj+p!>in^Pvgms(8kzV4zWQ+dNjQ-vn^Up=s)rDe!Vd4-TdD2<@*vUDtomf z25VUnQmwTl>mB!jm-Tmbuy|OWTv?omn$U)ub|qqy>CeEf4N=Jr(*K+ z9P-sSMJ>hB_0GA{jI8HA+iW!v=w~_rS3AwjO^JNx-Hw?xswvH58jNlBCstHbTPSRh zdFzV7cnB3bgbH)fNR=!+2BjW(U#MO^DK%BwZIgSCO2%feHvZ0(XdbLZ5fbR1zO5nW zNp&ZuPity+^`e4zYA-uZv4_<5nLe}K1zSGpDu5oOj#k#{f`!Vv#fiED7Adi;*MwuI z^TpbD;X*TeJZHJfo6mG~ZonuKnSjhovtB2!vEWTyfN$a{6JL-VtvMGlm0U}+ZfQy_ z$*!Uo9bJDtq2YYZe&8dTXAyllRWo+a5}1VH=!&7N{wOPyDS;+g2cJ%ue|a|~$pq_b z=TNlqNUU_oMd|ep{P^Vi%igrIDZBi&|MGL0F8*ngU%@Po@AKuB_~<4jSOS`%$HA(u zorYLMxogEMS@WLUpORM70k!lx`h;G%mna7pycLxdD0E8#Z#0|>A(bc$cKm-qG z33S^`Xkh#nSI$b7&O*)KWW7|EC9GV%MuS#t;lb+GH?)Ur`{gNb$3hZPmEP?5dmvq* z3hT|t<8&g3omm=h^VD6e%2*N3R#9F+J4l^@>zlEWCdQfzK zVu%AuPwcITO^Ep!7<8=DRtR^p z@5?1)aarJ((rw>&kxH@eN>VhfwQjcx-t)k>$J$sAUB;J`=Dn8SXP>$C(AW8NeD2e6 z!|G%KB4Gwuk|s*Q5{lFwuafY&%}%LzlamL^WB$gN=1uEhVZPpqhQGsK?1z!zHWgC9 z0RCp0%}z73b+9$A>XT|lO2#EZknC#a%IuaY{<(V7RG)NYbffF4!exj_3kT%v>Ft zSMrIZ>zA(x^4)j$`Ye1=uD_`Vk42bbPHWzG+^I{kU24 zdt)xZ6E}_gXve*khyHyWU9CsTe09uz6}8u=1JvT~F^_OaxDV_ve6gvH`SnWd?57)Q z6J^L2Ym`5+^cIZu+L*W>gO`k0_}SRonoPa=OH4j#>w_XF=i#$9lbA&ay3cZ!`hISy z{W_lhOZ?AbDn9nZ-)*!1_qFeT4O^Hv7+L@Ku;o%~GYPv5(f3q+&v6u0D408eedNLk zWh|0ThE%nZM*)SDnXJP)sY~uo@pUwNF$2@7Lrao$qGG(nNi&!XYde;2J9cym>(T3O zZ_`|?XGh+ro=XE&dLc&@?7OU7TCMB&X!)otQjwbVckxlEA(zv~BXvuT3l(Y=cUj@S zP6JbLB)I-fk=7;ZNsP3dWqP5!)L253>ibBVm3sY#M6YI`^_&2ZAaRz+}RR6I_| z=ikCBnDH$%@GDM&5pV~=V8k$m4;9XxE_$fjBLtl>aJmJ$y5jeZaSk&q=O!sLFC)=w zBP16JP!n`ypsgmj>Y$e^Lc$@S8%Fa+3(1n?_y+NmzjYMmqILTv$^#|n<1}UbGB_AI z1>T{zx-vODCOwaXpE!lf8zen1=O0fv%i1ByPvH3te(nfjt~GGqGy@uKWl0JdMkiQ9 zRm(VJk7&J=P|ImnF)s-QJs#XAz-Gei0b3AGrg?QP609QsYFld?P|CQ3YEQXxNiVox zu|ta(Brm%KthZsLdab7QafpEr*O0)d4c|wJ5XJDi1Aewb0Q^np10OnrMzbFQ;oA7o zDwMYmEFsy;b(c11Ar?9>>{sS2S;jA*{JU^YfW9X%){SX$WmoA;8L9SY-^^j&On6DS z-j^-c1hQzn3Z%_wo3@CH9LuT|VVVOWA*-T^{0tTV%-YrDJTDQ?Q|quG#TkkdYb)`Q z?X0EFk_5#WqIRq^#!;Q;jvszVl)hdm*u(2cRsrOtjQ|I^ko*n#@=O7?sh+=F$O5%< zmT8l!A;UvN8E;%+-tc#U6``UrYJCPlRqRXEPZ!ao;tVjJe7*D^YdcQbXoB;|4wOfo zfh2Ds*m2{;pob)F-bhjk089&uK>jY2|cRDiOhq! z5;>O;v++u-l9C>(j}!?O!xVD99qdUu<)moNn_91%p$Kb|N!p(vAUtYdjK8Qs)hTck zU=R(%HP<5!UqKkw!*U6n&oVFC(Ul<+=`XiEuoU1Dix?TZa)Y$6qCljH9h)OiqT(y5 z9b}x~?*dp4<@fGW>*BSHToql0%9I!o074L1=^l*f7$KGoM}HWegmIovJCcG-fpm02J{1!#|rCH%}?h$50 z!wI&r!T0-|BH|a<$+dF2myGxlDVT<56={}KzU8o1iqFum0Ykn0T4O-|cgNA;!!<4U zLDOLFnPLx|)f4l*$u0FunNZgD+)AyzaHd~%#w%k+UWIKrErx4h=xK+!R*@b|uiG|v z25dtG7GJ-PW*z>Apg0qe8Lr_)7O1)`?*X@CCFYqtI6 z`vkt!mLM7hLxY8M$>df(TCWcxC~%+W_7v|Nidp$ljxgzPFg4tS`^Gt6PInUC;nkgM zzge!Us`LBet`xskdyzGrR!(|V$2M&<_|48$LHThEEmo_^3Xx{|N&NAWfxr3J`j3mA zLAtaq+!I-6Usr`$EtkUSZqSJ7(i}3VLHSlq_zgU$G1cfs>>pK+R*d}BbR)Sn9s#sI zR5sfVFqH(Z8#~QGJIQ%W)K+JD{#ce_7$=0Ig_f zmO+T)6J^~g`VzV?Z*UPkgaL@~k?%H$dum_qqZK#`MA4dL$I%S~Snh)F8rzJrpzlWJ z+M5l}j`q$2_hTAP#I&S;E<8A%g-!%K$Hd6D?#FoE8EkYRe9@0jtnm8U_yO<*bX`8+ za&KRI&&~MT(#yp+?-G>t(9(Jxz+TbJxCuAkcYK9CUiKQ7WS(a|e(V-HyKN7~eGi2l zc}it=nK*kk%K_+sy~g^>GjwzQW7=0`Y~vde>LESpgECR9faX+J8K=rFw*vJzuWQ0bOd^EfVy6UyklqT^&2Ex5L&1%&7ng%RY-?{7;e} zL3(Vk56|(Bzdy$RqAMVp*WvDD>HqL`4$PT`VX}@rv2F8%XZ#v%_nrDsjsi_vPX5IbX zZoMy@*A-Ay%#~=D@t~!j>YvY+=7F|7?0zQ_jeRIJd202C)_(pn z$&~q|25N%$iIi%{By6yrn>{+)65Co2IWd-gi-n**v^gWS$ER^qqQ^V8s@I6NMNl>8 zr>C1=X0B&x)A!0^6IIgi`7qkT--?R%Fl_hVB)UJoyZ(MucCVZ4$G`0_d-!`Rde}Ek+j=W+HTkYodtNNoA3ypk3Ha18nSJg$G_T+OKD|_Z?IYXrIhZs2Fx-#5 zy&!ZLbq2{X!|U%3PPl#>`R$!D*uxAO$4@geGA7ffevH(b;xs2}IjQoq;%W7k_jscm ztAnT&O!s98Ts_!_-F%#p5lkE*7tISiA`jh~D(6`^e)Ss^nFfEBA1_0YNwuF^w`ZD# znDP1)lQQixEF&V`IX_Gu$ATva2+rP}O4HhLQmcD%oWB;-&nUrfQ8^`GgWDKaF4>7d$`k|WJZl?#(^^W4V16!O39SSsoHV~;~Li5 z*375fXDbRRJGN&{nJ`onTsc>7b=I`DYxKVF*#n%Vie37VdDmJ>I_3E4)-Z%93~#6%)ma^{{~0LI)D`#B`(xRnq6GtP}g~d zia|in5H4rqFQbxs1-VCCFg>B*Xv?SBPBsB{4P2un>FZ0^@~F@d*3_G2F`KYFe_Ik$ z%Z@NV`bR7MMZ+8`34Ouv)93;BsNdeD0}_0rW&QKEo}3zBA6vm%)x%I zti)&eFCjg_iZGsYQ`P}_Vc47Y6e>F(jI-W?!8TD6L#Ge;*v|)q-BSor&B*;8-{ zPo8g_FbRb`hskI;T1&OK7G~n;-b~8SRP1v{)cm5O8 z!Xs=${IPdTST#@U`W~tV1!^J`E#0OudV`s9_twpm3ksYgY(-+!NbJb0BONl>_2B7L z7-1GbLB@?p6bbna%=$=D-P9;2#!SVMkc7eR#TD@%tWwc-;Xv8 zd1kb0+dPs`y~qL?BKC4D9uk+zO3inmZy5^=lM9|zaLk(WD9lZyO(D%eGo<1Vz}Z$4 zkJXYMai&$R0YLlE#!^-W#Vfz9k*jeI6&k6HBEqp7xJ*?ihZ7^aB&Fa=!&)TO;t6{K zX#r}y11ia5sC-Oj1VdYx<{kj4Hw!G37td_&oLMb%9Ktm;slNch-iV%lZ`qY#4*H8! zCYrwbN1U9T(NWT9RdQK5`-dx*)b6o5>WDhpmBJMkN9GQ2C(5rc0J#HgOBTllG!73= zYFf4ja>T3aGeXw#)C`3D@`+oh4U_TMQ6ODF9cq^oX8eZ2d?yM0VY>p36ewF#hP|R zquu%d=&m<4*JpCDElBlMA3P1l!9gz2ZxDfeWKC z4mxUCCMO#(t91y%UBT�vQg!xrifL5cZV3$d?7NB>2*x@5WpMr`WR&xTmH9EF()rgU+EndrD}`10 zS(3Qme79qaH@7E5t^H9llE}sTv;wuf__x_*o?>OFFW4*DR4@ERIO875#r z`s@?KVQ>OpU?0rFB^pt*8hPq!{Sk;#tdz7p=R@jCe0P5@3W%5sva%L+-V#95NnjBh zE+sV-*Jt|Wh)pbu!p+`ynt5G?htCJ`w~%}wJr710@ICu3Z|^^C`Z=%_&9dRX8!EXj zPfTS&91gkQX4X`8)^NpQZuyL+2kP_Eb)f3Sj7Mul`{g3(O68zvLH)TGx{F%1&Wabo zD18d)4X$EWU$rv4Pv9D(r2|vEE2cEcIrZHbk1Mqj(VaduYavuSs-S z48o?Fn|TEUi#W|puV zRyVeJtAR5fVEg_qjdZ(@T4P+YG==ou*8#!g>)@ZcV`1GgqR8l^&L zkMW?m?2j=sa_`t>MSa@&0IS|=@A2!T*bM}4{#BNsBLO}G8*4rEC@w+J{aNe4;=Wpg zy}rU{@#w+%P{4_Q3KGDz(}5YIa^wMMF(=%Y{8%EeM1p2k;t*s3Yj5GT2|ot5^8hqC z2M^#SnD`KD*c=erEKdXA1Ia7>Op9JH7;TI>#SEp>@Ts)uA|$BgVWe?7?~@8R-OiHm-4(+-?#x z6Z5$(+>vk@dREabLmo&(VZLo%8^~Jl>Sw*?WV;KMn7eLL7=7tXgOX@t<(M%PQw6lW zZnEb`zuml0qpL9A!cK0NR*nsgy;lW2_MYKAfkD7%4?GMGabk7FjlLahE+N-I9>3S* zUWQw*+!`d2MLA^dMr0dreyril<8MfgvkLlfDtRd`#y1pO!DsH;NRVy9g555qLSe1* zR8AIZsHwYS^LmpQ<(st+ z)}WBXLVP85-I^h2;#FB6?ns<6NG+f{!+VP)j%z4-R6=;u{0AOZV(h?XiE6l%8?dodvBdX62vW&Z&&zSu1MZZMr%|(SE z%EpVX&6&5%~lH!{Q0EW zOq0zgOOLY+UaeH;sKO-DP`dJHVsR>lf9(l%Igd3s4!8hK{$-B) zXuEO0VUC-v690JY5)d8ZxP>BRE1?PCMkRGf6ZEJQ?3M^8$)ve5gPHE9a~aMzR97&T zW!F4L8J3Vv3<@z=XRRIpyLb8FR_!yB^_Pde98xifc@25~iXxk|W)T6NZ1YbsA!0~~Xq+*^LZ< z@6(S&km&LO^QK9}Exc$r_fEww50yeL9O6+QAhSnt-o6}Zu&PgrvkhiWwv8r`AVr}-da!^dG^=Yk9xmIX2 z!F`{?+|(zmJOSHwhiAaE(5#H}`G`^$zCV(K3;Giql`OP}dnOior8B>(1Pp7%BBI(Z~x96y}d2QMI4NI|_CYe>3(|J=Sh>n$VSN6SH~gj4ac!wT5H3u0nU7X6%0qb>VIlV6&-8CZC1IE&1PWjClXKEAnffP*!Rlsa znqEmE`VbIdE}i0UVdkwc5e<2yBZ{MPa>i_56wbI>GbzS_4?{UTgIBL1%|H&dGC)Bk2Kw{b7ugTIOIzhn-YS>;zfe!8-~a>xVsC ziLcw;9bq@gyOXr~e2S+I!(fU~`Ljy;;z9k~F9@y2{%fen2L0SE&_v5fuDi^rV3}yR zkg}+PLKs;1R0u08_vM*c`||{i{vC1yNZsvf`*;VRQrF8~=jbg~*KGvt?$EO%ll+k_ zP)_h9=A_3QrZ@6nQ0$hvQAqV0U!;IsFbQ-*8non{yq4{;Z;pOpKfkY@%2w@=;RIT1 z3;3=|H`q2JGPb2iGA;IcRAeJLA)d`K9h=@Hm|BmUlbR<%%4oSgVOt8|19Je{#cVNf z4t4a_x^a;)S%rJmwT0Q{ERfy?(;X%6%2)Pi_yFamcwh!>zx`QIC_r zp3Ks;mgcytv%Kcs12+`API0hQz1;r$iOde10&K;*-%9H}ujgQhRVESR7l6ja7Nbxg zlWQ!&8seyu1NBChad^gKfTAMWx87Mn6rpKviN>nT5!m+}rC@e?U>lh6S>H#%Ji8{6 z^?{&L5!0G%hzqTWea2VR(i7ju=3`&w2Br&Y$GnE&y2$##?o-k$p1fEn*|CXK5<%PY zi&*wxrRCC!8A{k$?{B%4MJD)fmU5o~8f$Fi3^;<%HpCyg;E<6#qnr@jf;7`@;>u2W zf-}0TPoi_z^6pLGXB1R#X4{|LcT9c(kK6SJBPoz z8Xt8i4!w!JvY-8w8{wmZSi**fL91o)+dvI?^s-;Z(x!*0{9nzV7V+k+r`F06g}32z zbs@d{7aaw9?bx88Ic7*-z!KTh2boEh6z?Y};@fRm{N4WUXo=qXKVMVAtlNFr?`cCz z-2`n2(esl2ry$@$rrOW!KU7_{KhZBePuHbsN3ypgSvy0auQiV(T<8C^jz26T5G|AO z{}jd9*4 z+9%E0kl~*lQ&XbWZulo#NL#szP+K5;Mqt~ ziaS}WI?>V|W<8Tx^ibsOLNX#$=zB$pgu^&gwzP~Pbp~CvH6>cLPeaxWsU*weVU+`- z(#q*%a7p``bgORWcv-U&+F>KWGWB9D;(|GOHC;8TlrgLtO`7I87=GyK@=!D}&7Flu zmueK7Mw#G_nFdr~#JkF6_4IGbrY+4AGUlu_QuW*<5785Y?24&wRLwy4uzwM)sM%Cv zX~!eTRQE#jl#n(!JUp@qOcr_)$8!cd`olPKY7>Z00eA=)PCScE`pD6jh;czHsXz2% zEjf1Tf*592Mj?=5MK|NTqWFC?dU1%^eK>}~x&`BqlZCkp`99NvtXHm}=zI!U@e^bA zpxzo8LyH16O+P$pvSHSy&;l{^%YfxdgOp>gc{&@)o%+1C5skJDl~(67Ju1xT>?LuQzA`%I962zv3g$VfmqgMSUmRU_$Gh!KvvztRh+ z3_um`$uS=y3X({K%?0@9q812xfherg3&8XnAy8*LRVbZ@7d+;wx&t#BqD)ecT&-0_ z4uPd8qhjtDY5w9tr&ymL&I|3h41u}89$=N3{NjLHFpJIi2e9^AMua!=25sr3+5CLRc;f8Dx}Jle1uNPQfU+|&#R|~vD>Bu9#PIN$i zp!R^FYOFT|E8?EbWHI&#JI^H?2-9(WKbVzR;w;lRI+Zyeq6O3NhVU>$M6B21j5)WL zYv&OTEJWNtMy!gr@$edC3T<>0ik@och#|>fG<@&oA*cfWLo}DbSUc|s(Mn1QbTT%f zN1RFY7Eupsn&`3zS&XG`m;-$|Xn`6^9{2z}CJ3Jh?L<6Y{#iqwA%=Y57|b+6@!L?& zi3l0r3FUXQxyEtoDSM0!3qi zbhur)0M!%c$^uO0QATxY2t7mMAh_v(XC#wkK~_VBm6DA z(W}qF82#eSxReFRs^sMlhMnuNf;TKRwBocWQ-`W$NA&i7_~U;57%Ki$fL!5KG7bbQ z9K-r0s?zY(#9Y(Fh)q}_rP(SV*CV zpQxF9WpbtXLUG^BchsjDZ0R6qXc(AQC+<4t12Yk##COu{x94O%5DhZ`j9DccOPfuc ze++kI$dey~A=5LfQwDOBY)38cpkC56w^~_6I>$y$bu+rmm7(-NGfIrELK#eY!4FaW zt6Tn06Iqv>K(tlYhxpGfjYR5=;7^Cc9bZY4=(hpr>EEL6)?@~l$5=?|}& zhh`gKt9@U|8l1^;;9heCO&m_EB8jXcr=}VlmPDlnCMW9MWX(e-R{B-Jw?5ZpHJB9Ba+$kJj1 zN*JkfNU@__R+Hj?$!g$rlx6X`Py+g!*g2mmccXZ-i2h9&J8968Ao2d*6(*j z-DgO#U5Fh8HC>T=szzG1q@lp@aeg(w-V1vGU&=HSN9ct^S@}Up*`*i8tl-qQWaQ zPH3B#PDP<*u(@`90CsIk*`r5vOP!Z;pu?4MI1oy4?&Ds~{M6CHQ)JehKx>HS3qc7U zP{POJS;8z1#!34=WJG@{HlX5J3^eDDIr_i_QGxX^$bpf_{L>XoEckqA@|C@H+9r@$ zTk*RPFNB*mA664qMJL=oXgKh0u!W!{4g0(J|g}gykl*zOp~;Q`vhs=ocg5L-Az(hNvXr? zeIbmiGb~p;=EB&u`Iuk0FG+dsu1#=yrJIpp`zXx+kafW+cIML{Hq9A+5$@OoW{X>C zV#6LDIHB30iAPt2<`7*ggX&lwci3}gi9U@wHdoM70!l>cUtSpL^HcMY z&ZJzZKjjhpjrU$dY#K8iXm8XzqLF3&g+;?+=m|>!qS5II3bYE_D6T`7K<$PojSw`A`>(&)8;j70 zOH9$w~EIcr|A|sAo=LoJSQqyMfLT?73aK zx^8S|LNmH_tz7i^JAKH7=3V%FrmD&go}Y?5z8}~f<0@nv5qGGc zO!9U?1Nn!;GG2-gOYTTGkKirbuIw(_S^v+Zq6_hx_`u*YN zH-CcY_2y8i#KfXs;!+)AXCZRtAjQGGxOMnS8JGESa7GNWvmiMKk$8{mv2{+3lFe_)uM#p|r}q+SEw+n0v{w!p<{V#yBHDNL zJ!XS5I5y62$DmHp%p)m39dY61w@jL^`ha@C)GhVvL;o$?iWkh7I8{k4LzdZ&pSZ93 zj*q-=pC0?nJF{k275ac%7M<#ayvv9QGKY@KrE2Ko(>Mn5n z`Q?d+V*Q^UJ%}iwy62H#ecmZ(@CX|P$q=SEaPn@_4t3mge_0d5!#ep}V8F(9Yk)|< zc!p261TMe?@`sej-Nm_6Oc`32Bb(*xNs!!?Lu&Y&K;il3OvJ})NUI?kZgskjwMbuLIw$R zF`$lQP;t%;E-RRy;A1UWVTX{(`2k@!kX4Hf*zF0-Kl|j|895H8`l^(Ygi8fVFl6;H zd{rGKiXZcG6Xs9B+s>e*x17jgIV1sm9R?-Z093`x8Wz6~1mW5XhzVLC9K^*DD~s|{ zFAr~BIaFJ>A{nhZISHRLl;Gax?;S06B0KpD1?fwc=az_C#>NY__+%!=K@+Z>WHQ<& zJt)bFeRwqW?zh+W4T+*LIPYdvCBT9N&BspoCY_X8l8bk5K5yHT7C307>@IZtE(bri zn0lw1<3%~qtF_V0x&d`(x!FSTvHLE9JJsw3{R*#*=<=7}BA}>@hrNzd3G>IDQ3D{F zQOp8dBe#dof1RJ8WM+_UgchA`o(UH{u6?z%4RxF~wC%SMLk=dPPxCvfj4Xet1|iFf zslQPpy&PFuhIb`?40x(2#^63Q7xLIX9AU@--@2_HjBtH=G};a8Ymt_ED*w=|9dK8s zgYi5v$=J>2+WHaE1BY8PMh;+ej+ni}mH$P2l5FqpBM5p&3<@JUBLVv&02O?O6jE5< zY#Ve3QNYtH`XUJaA%P&6A^$-$0MZIZYo%6tIKj~#rk1es#5urP9g6Q z+`{|#%P$vcn1)6gRb`j<4Bo-B-SAuHWVXS0ZJ6Wt_ykSmOeAtYsHOoL{>OhS6&s3l|{(GCun7t*No@_ZMdjiYEOz9SQA`3 z77(>(Pb$<#4`AeQFiJM4K98{SZZIAw$L6By%0}lymo($$NpM31M0{;*RfZ#}3)f1f z{{y{<*ZDU_DLf}80Y={Jl6;|cP z5jKVkhDRG;i~av}vW09Q%|D$y;`0@62tPCstx4?0vv>LwnyeUu0(cBJ9PS*P32Y!dV%lJdlslQKby~nF)aU#i`s`$Q zL4E?=0VCu$?(x?j7Qs|07Sxe@8Dp~-afC9-WC`T^7ywk*SP$-DyF~n#Cx5ng5a4nm zNzl$YWQEEI{VZh7aiagxWDd$mkiTBY0L_F?xPv_DjZ%>I_=^SYRN)M`DZW$=YFeDJ z%FKI&GlTdr`QPrrpLSIVEsC%u02D2MprqjHUu7pgPn@uEcV?)ujO#Lkpr^Tn=?pR} zuG=wZhfp{GRUsG^(6@vF7RhUkgj7>cg@F*5Jd8YQeMITrJu{`Yfy5fBCMob>e>`cz z%2h51B^pV+BowGtKXCMQGWgyE49e-}NQk4b1VRS>sU`5xpsHx`^^S7Dc#sE>Dv*sa zBEx%w*joJGFI2%4vgr*4pzU<=);0OScG8TndI&oZu9J!w1IMudNoG0Oj2mdB+4#Iz zj2k%N{M?eUf7HblVnBQ!7KM8H)ABin9uZtTG#>q@ zA(l4)VEFFCv%?D}622ui*uM3SwU(GaUTed?E-k{`pOlE>K_m(J9mvu}mMwjh9phFw zFG7jH@z*I{CLRSzjA8egvQdB#zmBnVub4?FQwORUl=08FS+=sHl#dK~IV$(buPlJp zJdJ>6@%=yQqSj*Avd`G1?=cmb<@WOXJene2};ui z0Qq$j$0OgiXO3}zfjVhND)fcu7Wj^J6n^*NfgM)UInoC1l!vbowW2uNgj&@1MQ3k2#K2J=iDT3p857seB-a zE!+ThhqA~$kSsv>>dx`DA7PHI>N80{6zbYxsdNOaZ?zGfH8*r zq(Bw6jvv94$wdYiIZ!yFA2vo6FyirOlgchqQieakUi%aV!AQNZdBR?qvU<={Q_0g? z(PURPuhPWp9EVv2sXEr?*ZId>tPttBztdb!vuNBH3tz zpHG!S-ycD2%9iO3z)Ld1A~_AXa?D0Rq{`G^VDTQH;5aE`+o&!3tf*R}mzd3r9i0SS zdgTDc3>!%hoC#WCDv`X_(&0S6wQ42XfKlV%?YC#Mxs|yRYw>TeEW?<|ftn67y+;tP zh`m0ig&Inc9^`m~h(GPmi{Ngsn>?f{f`(CRFE;Hwu5_fSWaao5aKl(?pzouWQy8VW z_Jq8;s?Vs2D+(vr#$&Ez^hkB(gVHl?r{znf>ZZzS&PhK%|9vmC!7ncs_&02maWUbw z-jV=`DH&oRq>Aq>^5YgKDdUbFS~-nx&*2j>N--H+M)YJ=*>*Q!shc3S%y6{wF3}JK z-p_2)@#Q*;N9wds7funQDO1}W1#_POzS>l&mZiB^@vP*kUXodd0SQ?zJ2m*!@t5ZV zY(Fq?qrUP~raMuP8aaF({B*$6Bm8JZLFna0^MViHZcx9q;_T#s91lJ3Rc(d}Y&jp5 zzMVNz4Nps^t;Yl{V^z_T4e({Vg#`$q*?$6nYfH>eSsbvZ_`m|UYkLYzLB zWmv0br^>d6GOIOf8P=gvt93t8C@2~{R1Ifzr}!7n8pI>(LMv2lAS7&GGnAqG+2HAh zU6%)gTt}uY5HuZmlN`%_l$^B?Z=@-Ls124B#4pSR5v54m2*Cp~4%h@3aGC_js+9Lh z?w(!$sCJTN=JHy2Vgta0@18OVZANn(v!uP$ELw>Nx}@8Dov2({)VA9iO{cFqG%tCu!G}YKANvCkj?y z`{Uh`vQgncT9Nj4ZkMHgVe^-U+$WHX?IFk!jS+G4vJ2DmfisPU^b4N4H6R@lwc@#@ z7)C*pAs{-QcdBC+J-NK9Or|?ohaY^3I4#5@Amzn;PK1p|h3&3eW(uix;ey*=ocBH` zJ}Pt=$4WifJXyUQzA*&7?7Ux}_fJ<Ra4RDVRbqTrNh~bLcXO& zaj0!uCDzB>ToW(3J$yMbdHh_y_uEtE6c_UHX0AOq5(qmZXAeTQwHT+T&v~;$@}dau zH>P1K!Ey2a%Zbx$9WTH&dj8+9>$i)S+sC2l4xMj(mSQ~LG}}D#!umMd3B$)W$Gl8+ zrCDaair;Zy5Oh1C)#mbY2qrzEx!CvhcAaXxDzalQ{e@kb^pkCiZ(iC>Sp0bXy9L+o zt8W)W;r=@Bb*mRxj0F|a_I3r_Y8bauyGjS>ir8HE=IeyP)&rerbX-VqrM~&z3 zh5!U7P-DdGvH_&uU8E82tLl%g%8g8-(MoEuvJ-;0i+Q_pidr*wj1ge?Ah9;RI_&ew zaw@RT3OB=f)O~ju7ijrpz^K*!56}k}1a01-ArXZUYtJ1KDJOV>&GEN86?v`4YCCTz zD!>6^e)O9B0UljDY|d0hs1t3JU0C*NRXNy%KI7-)&Cpgq7kMF@uyk3aPm4wEmx@~$ zW4k)}nWH3?ijpaxcKil67ShM6j&d;Zz%DS{~AnM1RF^M zoyIy%<@-}l2P~|$0qI05xl_*}PQMIPGE7zYc8L-a1WFiUc(CvQ4R%fTgP;ld(W>uS zpI4_<)zr4pplTm`Z5F;JT1i2ssZNn4s%QsCF$g2sUE8nvP>w$AwcjZ>sm=KOc{)V}7D+qhH>07MF> z5~kw2v28qi2C%lB^8wjs6={aZ-T!)jVh6^sWFS2!wfbLRVcH3@pv8fW*2!Iv5PJ}A%0dPj|DKA7w8qqp>cRk&~X$JAK>>D!u%`OSr z-%5`2O1UUiFMAI4llF^(r;$*dgal;+m0PT$IGP13BZ`;AoaOWhMy!3jr$n7BB8D?H z+$}9WCd7Xpusef+`-p3_pc)cwTLmjfAOEJJN4H!l3x6!_1yCd_hjePER-AIdRy-4u zPD}_cTe$(V9?7CZ^N0MKJ*dql6lR`#Ve`RL6DRF&?A&4_ka{I$1#1vjarbNu{ys)$ zlMeQp)n&mYdPH}V3Ub0qxs5}6yC#WNegjI zRE=#@r4@Us9Fn2J<#alnw8+DYg}WR(x3YMHH&<3&g!`EG-Mvp$9>|1nX{w*Sz0rrI z47Z$&E~f`ibzYFabmJj*&;_Gk=~*J(q1c)Gj!xWCO`}7i1>c{ISP+*Pqp&6fR*qCR5ySp^{fmO1l{> zZj>ADjy|IJ7+VF!)nI=ZCKkIfQ+ZLUu0+bdh)%{6J4W&Gz}HD_9*wN+6B0`^BEIzH z{lcg0f2{j#)z?+7HCQ#0Z$VDYFJCp|;z!oA{yFC6&71G!ReGYXajd(c#gJC@Y~#q@ z&{n0%dwe)$6?$PSaAnVc?^{!Up`BJ*$#9$h2+C-}t1Fen+(o|}Xr(`2>(#sK+p-;h zZI9Ts4i46r`zl5n(Fsa1pJ>CE74uETzY==N9=$?j)rOo8<0-dtvu@Y1@w++z-}?WZ_FF1$!5KM?bzQyTXO( z-O+}TJ~UNzZ!0xxZpiDtpPJO%KO{3VIGCyHqBxkO4u3@P)k`*(lb(-WBGrDwm`4$( zfHv(9|8%QtlPZdu9t>CNIM)FhgMX{ny>w{Zp#5#8tB$fwC<~2xtfu|F!N84dC^391 zRR#95NGZQM5RsbiL+#Wy}WWKz1gmZ>wt;R$|v2B@+sS)vyk!4zy^QVxEv zB$?M7kW|MLpx+%WfndcRx>o#VB^_-$7c%o8-n)QNK{v=A(7hw8s0NVJ2f`!DQ1i@5 z%?M*31kX3>wr(R4pNyWa!>_69^au;tBIA zsiQXH0i52bf@rlz#(p<1B32?PuTUyVR2GG5GfK5kP4!I6yn`*SCPXKt3fy|I1FaZ| z1~;okIm06Z)J7`DPxkL4RKPi9`D(I+;dXsjP(5W{voMPjmoKx-=3-97vW2Q1>p?y& zmNNV?#iZvIV29nCNRTcZ@85;F&=Bx`a3S>uus~>f5GV+?P&dxaZ)}-xN*2hPrklxe zsqlHwVOre)(Nfe~(S1ruH5MWkrJKG2zqdPu&XIt^MWIElgttO30ne&zHxdBN@i}v z+KK$+yi`1Z1t;plXJ1mbIkH{j^>l5m0V7Bg!)VVDCKPD3lJTVPz`-;ONE9vMkg#fL z(BoT(v1H;iB%H>7?KdwbJJPEY4_4v7;Z!G5a)z3`{}@gUL>oGiKr(mWo7@&iKf*bJ zT!NG4Nb7XAV~HP(4CWccJR_9NN0A!6dtr>E%lB$nqp)0W?6USIfg}6uNWl<$NKCMODucNRk0Om=wm+i- zm1ZLx#*?Qwf5W?%S26exn6K?LyqR@23Zv-AC{c09@G1TU^lv{$`gaOmu( z=EPmT<`-9_SrC9(7bMxVu|_d_!dyX`~&^;PcYGT|m-JnT3O zll@%b)X>d!2X6N^e~>qy(axRa?M5{Mc!aj+8*351VxBPY!2V#aVmUk^!J|PaSVEJI zOhHyo&p#48CZ<(-_MNUxAY5aK8L{tXM4y(R{|@@9P)k>om4If#ZJ?odq9ZnsTUb?eOJ4pIgeG3}xL6HqIn=`z1>KC-?rm2fMjW6=oLn8TAOx<)p zE0ahMILq7h*zsmkec(D-#+tg}F(pS^OHP)9sVh^FO2?njVRsMLUEHovmQxKp2QWe( z0fNMG%Y=Y-{4zj-yY=%>5StYsDgM|yymDRKTDMfLMhkE1%P&{1C$V}j@5(=xt0{~V zcUEDy2)n;(o3o~mqzKqj>HMwDInVB( zSewQb%c&e>L-757YCx(m#I zvUmB{0e0+k7k@s7S6{tIV}HH?H`dJ>@iuI(5^}PLL%GAy&a)7|PpqrAx8maiN9yFH zHcaIG^32QYL>Ho@VvlP9skq37aRE1SgEn=_tM6rNC#kOEY(Ct*ZrenCm(@Fc8-Eh4 z^y%8X9NW#M7=9Ct7)IGO0=9vjOV9m+BxZqjm#1h=9oow%IM+fvYOx8fKi^Uml?GcK z{!r>$b+M7QL$#J*J=IxI9!JAWjzO=L@egOZX4|RZe4Pz7bLB1}V-Z7HG4FI5xU%6zK}`mph(NGIPHC z%Qs~(s%fjk-~$VXvQc6t28nVbs1!k34gn!)KtPX;`}2-GxC7R6vuYkMEr#vo>%Q$u zO%lGnEdJq}a&WAND_QpD$OHRYnMIUAbHbtmzox9Aaa$hvrV)-(3Oyf_Aq=6z48$d> z{!?~X$&zV`YFzc==0`^4ARo~zg)AzSZ%pzV)U2o#SwU1!^+Uo;$eB$y>bKY0i-T%( zit&v`{BQLp@Lw!18{VsSb{O*^e(oAk4FkRV-#%>G5O{t1PZub^d49uRPtxtI%tSB)Aw)y#}41-tyRR(2W6l%Fz}q>6TEgVMCL< z*IN^RA5o#h!>Tx@4w@Zxfr_G{x@VLoLVCBZ(aFn*qL=vW`n3Ys%@J6<#7XdXJ~u9l z-X*(_Y!%u@zDP??B)z5uD1X)F@sv^ehkUB`n3J*kTC z%>%uqUwWfQ zChiD)Yg%DR+qYEL8;fXcmKak?8CTm~lgJ7*RgD5C@2nA0G3jYYXrLznd%0As6u(JB zP4+rFJLP=mH#yH27gxa4@$X>5>OM;~^>IA}1Zt01Ew^L6A5U$XZ)dXfrKxKpimqr# z6E{JbS@YKruW>tDGvA)Qa&woGiK>QhrC5cS-g~^wbz-D@%^ZIxBUKla{W2SZvUgL(EY{O=lNpzy*Trcr(eZi zj`4|w`V|_-GrvH}TLIl&-iMBPF;ehRv*~k@Ea6Oed*2d$lSc?4`Gdm_WPpfI{DRHN zoo3ttwtKyFSRWqqbDyjNVfHETk%FUNv*bPbltu^Rzs8RFu`Kwtdc8tLf*z_lSa%1M z*G({xs8EKIkN&=Koz+#VrzjbNwm3l{%190ieK1+pyRdRiB5-iXmq#dtpf@Lkj<7nx51-$8y)jy z{em5_cCLIf^H`lWzC#k$rbn4yeHDdjiLz5&ppD#91IWg~RRx;Tc!d-Tt$a~L2ZyUe z7DFexPQ7}U@HyhFD|~tK_lnl&MofeYKM5Z3h&X3J@6+c2eb4&-R(L*RREM~e^xoP% zetyXejC4r38aN!|G*b>@h64w(c3knjQd$t2LhIDL!g_YkW1S@W>KTB&o{0a-YOOQiVnd^YIZKz%PDT{d)yMv1QU4tV`bb9>I!LD zpl?)j_EJdrrpVVD6{Vot8v=y&b+;&fc9N*$gyMG|r5)Vb9 z!K;`&fZ)QDv39m zi;Pn!D!zX!+R9>i#@U{Um}LoKHt}0Nz$90g`yAdNsbst2%wnG0U3>Yxl?A)`!G-qX z{ws@1GwxVyaK;iylW8T`0Ha5VOyBTn+UL}vgmyX7Mli$ug zy2kFj##DW~?*#9_u^))9CMsKVl^4vuw?a3n(=O1TE%(||R z)tdhm`)icD#p9PJJodI|jJ*xu_R&3a-9LBkd3zYDR9(XsNG+$G@10Iou7LU1F*hk# zQ!TKvZ^U1<8B*+i^X|jIZ8vqql0jTWH-or&+{#l8(>D-JPS^ThhAr~8MW$Br>Sm`l z2`?37A?pH2xW0H$tYjtfkLB!3I9=Cxvs^@#@&ZRs8eGUa$+A1k;;Vv?5-BOr827~D z?0$NnH7A8jeo0_HOZZdpe+UTF^#3Lxa*fxiK9M@UD}_n$uCYHC?N)h31<_U8g@PEkj`NHt+!usnIg5%0wl~b zkqQjQ27X3UL{c5K$LCLSHV6}TsOvN-CN9yI^E6i(cn4+;0|&I3J({b8RArO|WE~L5 z!RoO_{(l1ylZ}!FxumPpp#KYikopB6Waa_6Uz*Yh!FQey0==_MDcwX2^#gUh)dvM* zj;eHE_ZZ*=yx_#uqQU)O47})@N(NCh<3km@yJzbB_I8Ih!*O0-J+|&1U=J6H$RECe z=*O`i7WAct{cCiDk#SLY6XBd$b>r;uqd4-K(3p>2^N){P`#!&8y z?b!0P;3w)xw0d>mS?`iwT3cW>DCgu;THP?YI=MP}T*4EH_i;?%p<}&3mMzZWzC=2+ zKm!`Ae)$Jj4#5j(1}JC2OZSxr?lmTV_1WKX0Cke|8TpzqQ%?J^;({&HeQZb`ebo6y zDF3rRrc2hfP54TOS%?GBe3-405>{s7G=aU4SZT=4Hk~9ViLC}<1slJCw#zyYq@ut6 z#?v$k3nIV$XH>>j3_;J#MF(jU@f*9u;-$zUY};B1z=>evdh|Jz~OkJ$!ss&3p6FvJ5ydd1FS(n%!F zA>`ZfwYyCZe6B$m;-_Zx@h9kWilZGV)ZA}u+*Rhsr0NLA5@`qwFH}9)v~q#&pCCFu z7N>T|Km)9?@xbGo`#K6>fsiecfw#c`thI!^vD&tcE8-mNZ(kxPCGlA=UtEb-kdy56 zSOrort6=*`eR802Kvo!ih4+Jip+En>90Qp?39BIHc6f6z{et8vj?($4*EDT;il<2| zR+FYY%3u0H0m@iLjd{Px$H@8P7>T58p_7ewr`l4)>FXi&N{Z_yak z-=u(2OQ`pJ2)gOQYq~*m$d~qFhxv@F} znE!$w=vXXWhHH|d3*q51qmU<|f0F{e1zr9$+?_f4lw(Six#~_a26ocm+!`j*n#)B# zImdma*()5N;I1MG?bfq&`?R;ZN{@Llk)0+}({$P|q90r9=o?%njGbPIA6uI5InT-h zo^Q<1;53{UEO7THg)7jT0`bz-tg#nk?9OYiGppcdoaobGu{aL8X??Ec~sf91I?N zkh@e%HP`?}qRLG*5);fci(%`2b0>WokCq5PKI3KO`vh@J8AClW-;hxgkR^kW)WL?Z zw~30``jYtYUu?Kv2dFdD#m|3eR~zTPj7f`(bG_2Ep6Sap9Z_{KZH!nxk?a!cp2OB2^nbIW5jfA{+FlLJyAHMv^DtO z1?6OoTK_NmkeMy1GYgQKb`4RMU{YJ*(wwGxezWl=()=ykjtEnjZxAt-LQNr=dlZk< zMFdlhh|^&mZn$Vvurq?bhOY%36}?>Xj(9R>+#Nwe4^JzGeHwv0UWh4IW7ww2!j`;l zYhNiWoX-=pTyG&aX5@}7qvFnWB(~+EISR#5If%bwRWG(S%uXk*u&n?LzUg88s?fgM zq(8F^oRAUs*!{`E4#Y zZ7Wwzp6V#6m_>N~>D;bAmO1v`VY%qRtf5`;F1fuYBDD*!pt%c1zX>FCy`KD-CC7>f zyRpo%)#*7~Cx7V4CH8qr9zA&vOM^0){!d<+V@ zIt@R`Ei6~iFQ=dMVkv?J`$M`q(^8v-jX#kZv$iLS&$iNrWm|d5)de?OY{PR7`QVuQ z7zjJ}=_p<@c5xqYgf3Pc{Jp`b>zKUQl~UMZnuLb)>upGGgh=}h=~NCtMa+0>?m#te ztpvB0m-zwexV{UsHf3VX)0C5U8{Qds&>8z3g!Qd|KSZLP7-6)axq^X#0SKGNt;(8@2rIV$g@4?wrLMj|qA8|PIxR&V3JH?xo35({n#Yig_Em}4H8yOV|L*FMml2am< zX8v;E1q!Hg02pm0iC*a(;?vK2qNkfgbj9?tjF3c^sPnh*EO`Nj*peu_E`)UCW>%uI zU;)@at9qLGRUUO*u)p?& zv)}tjE9q--&tiahqaBjP?dX42lyW(fA@1<3s*8`WA-Xz-u1D}AG!*bNL@99+eQiW^ ze)oIec>fjN(HYV|}?hdcPmc!6GGQc9#17 z^E!XnQtEk>gSiPKr2WbM$m+B!+{wL$3s@grkuLm0Ms94pTzfL4yLw8WX$q0ybx|$E z978_f@~GQI@?+FdW5HYXGv@z5Uyu>dP1=eFUITaZsNo!i2=57fPJm6yPy{};lfebk zD|||6o3gP+;zIQ=dsN%8&naJ;AI7jlcvuEC%jpVSaMq$=r>#+PF)RZQ`0fO2dV0r` zXv!EUC}H5fUO+r*8}C$gn(#0Xe;@@Sc#8BoqhL&ww7Dfvv*yDL@kx8ia&&EizsYbA7`x?c1m-KkS;}_d4%iatc1@B39TKMr20Xtz!hIK# zBk!ByOsf;iUJgu3m6`UgK|tI$Fk*p@Iow*sU;Qxp=`Ai{ZjUHN033^NYzNow*abg! zOO3*0iAv;iq zKmzw!%XI;&!J+KF3IuwX#*zGV3H-Ak7IvNvJ9$sxlwM|%a~Gpz_6|N#oMzD1b?)MP z>oXEgjFR;mm)@ysE@a|A^u-8fezBC}^02UP{6ss`HI^6}@XcA%D8hgE_4Oh7lA65n zvJsd68J`wVALr!f`HqV@eb)_H5DcJqfS$WU#GP%x?GW>Lv7~oY*ZystwGiMu?;?%w zNy%*a+&*ufQ!$F`_-oJDUfff2HMhkQV?5MEQ~qgSA&fr)Po!tsXdWV*O3)B?+K5A! zH*L8-lHjU z_CpiTdj-FnqdITepFOXBeD9EL?>3HOATljOQF3?|pU|ynsr{NkN@JR?@K<7|V)$h46r%%)Ky`9Z(YvShx7q(mAOGLSasQV- zW@Y|AYR;q7&m1>I5x-|j)AT6hQ4J=82RJIQHzfXXjMt|}R2Cse0t!H|(gEegZjDo2 zF?$ldJo=Y?vwyGmB}J9DbW+%33z# z5JQsOOPeHbYgKvY93ooNWe+ydDzqOWpUew%tYs=lG5IVFBUat(CjY4+MVi5OoVCri zQ;H#38D<_!n$;t%Z(5m9$M~%m)vuKI%jGb*{NTqreOA4;v4z0CE(UbP3V;WPeGIO* ziDyyn6F*T)jqp;4*Af#v-LnTfky>O~pq%{bVjGTeBR%9MnR#h{YNixf4|OYif?&}4 zzI;dI7L6J0*YGP`F+ar~OeR|8iX564=i~ zcDjZGu((4UsKq#`ZNGeoTE_!f;SOq}ipjpIJi=@iku5Kzin}sN^2n_5UDYfu1;uV$9S_GvKGKZS z0uT6SRAD-u#Fq1Bg``LjYz?_y2!WqhQhC{rlPY8WdY7_oHqL~i!kTA3iQ53o2#Oja z+c1)m9}|6^csXMr(6WCmDw%#4gt*TAN&{fKs!n3TewM)z{K6=|m1+qk=#Za5xwR!! zAAk%BF|Yx!L#DUqbOkeA2==s9KaL~CmOPYqW2A-M8lbZ%E2JAV9#HsLSJmEgyi9_N zR&EU!U2#pm%$Bc>4aue-9?Cl=nn#o%$z>P>mx~zqpX#I4`-I;F7c;pe0%@pbyXnXR z9bh6B9;>i6u%QMRYlFeJbPn{E->dwgcsg^d0a$$zIV3d}D_FES*sX#ipiP4HE}l6P zH()jrrv^;LtSCF4#N`uyb{<(m)cTGmvX&owt(v-$?>y#p5j^0npG;E&MykrbM9dg) zT0)Y^Kq)l3_#a3j`sq*>oH_BO@94R&%opWCS-kC{HG}M8u@j4t7#$caDR5JN)A@dn z^<9!VFUY1I=q! z#4ZLp2}T2$(iD}W__~(N)-(CPvKSc-OAgd?V6KR|F3n*#GD%2I9K{UcNxrImxQ>mK zIH!ucF@;4Vmqa7%4BllLKu~wim<9_vnjLC%C5gxU7|~Y4>DGzu-`tsjO%zW7<2LTL z@tFb5fSg=%qK-uYN!4~pwzxlDgl=~g1RfL)tAJa7$Tsu{F<2%rp)Z~f9}Z8J>>Tov zGe`eunxnyee^JgccJ7d>Ou3a#tlxOpS<yf0>nNm2ak5!>Yh& z?wVb26HDr3bFNXg7KK|K>(1vV9ZE<~6X=1O%CYg(bH~-5OH-W;rY+_>l`UZ$!OD0%PLPUb5-aFjj>6Ri_1E-(@2F}ML;+}ipt|;<6@>unUmo5p$bsT zjieAdhyWC3(xuEr-xRO+P!S3R8fb;tF|@-##(9Z%pwJ5oOZj#f3-v{r-8?%|3=NBN zdr8NDu#f;5y2=-yEWKFt$`iOUymYkQW5>nY3uZc8@J7Sr>zf`|MgA4E@vr0*2rhpNm3}>C$ zWR|w+Q33)&dK0Jloe+jP{f*x}pAFxUCOqCQm*h?+-8@Bw*+zu8J@H%kdhyBr7(onFoy*gfT4+5MPe5gRWRSpOhlg@WZm4L@EI)WMX+*W#G zV%;CZc4HXX&Jeka(_m5V2dqQO;!$UtN2ZI?!en&!Jj&8};sxIVkO32dqtBD8E@Wox z8P;zpTkb3aqGMg_t$^92ZY)`T07#A%I(EK-nTz8Pl(oLgnup=$C^T6J-j%92mFOnC zamhk7xK-;qYWyo{0PlDi3Qfq{?+k=7*f&uwJWZ%osqR*|0oO{KJM5gQdSf?Ib${9I zJ96jGnu&04wdwE#VDSXhd zYU=-SL0-oUX%J6Q^P0A1*S1NmsQ!^@HYuP#1DCmR z@Ob8F_pZtgH|)g;X+PuZjg;d40xq^QnutTk^QZCA+=QaTb24mn_jog&bHGQ8olUYonQB zgL22nwh)3|s{~kNcmGj8{fGU*Gk5`zk;gUMkLZx|8kKXB-^!k04kdb) zcKyM+ZO|Gdg=Po?^VS^?K9V^qoy+Ho+adm%W5b{gkXr5GUevpPvO=HO&w?H3v?mSOb~SW+Yvi&idMic)XkR%Z6~O)h2Y>EkM=(k^XEmXD3%m633IgSq`IjhGdP` zdZ}W&H#TTz-r%Itu_vG;vyk@RmRqL}nXYw(+eZh1(8^N%MhsEba|HWx*E_hL&2a+O zOllPI3vms?uEcdOeay|n&{mj@wgbBx8KHu%ig;DfxP_A}NcID<6LjB127HEgKp(K} znsq1E{h$f6T@Q5kZ|mGg!2T0Y1Q#@QXkh4CYD4W$(rq$%>Cu6R-=b%;qFS;f?Z}Bw z7eV3{&J(M4T{cpPHaG6eDiVaR04r%( zJfCEW&upU`v;j+pEpCnp6^oCG%RMnqvC5XnVB+V7l#Zf+Cub}XTqu;0mMuM(w()ry z!)YX&@aHZQq%*2NFkML~7Q50z-pu!?&QH8HSO9qIkl$LH)44|AZK!aHQgZFZYydJT zMo9A_ui{=iHws$T%T7^S9!Z4`nB|oDpQxHb7f4_={xjkqD76I7t37T zU-xC4;CfA4QFr%RENQcRR&>A!{ZW}&G4pXqpdQde#!J>);UtWAzBL+rUk#vbePm;W zMefS`bU!b5h5HA4gnuXc+`Hi&rY_Hsj$0xR-O>35^5Cx+I*M(V z^1y%8Yd>jKN%_R2VXLdE^VP+Y}x%!8k7~E9^cN;5{8?bPSnEM*~F1f)Y`z=MA*d0&e(+R|I?uC|3{5m z)bG^RuSfekwKb^(sZK{Q34=!EnruB3V>l8|(os;6Z22ofX$TQFLHKp{?_XYNcG4EM zfDkuz#%PlVE%Zxnd^*5Ls<$U`y??YT*}f)E!l`}-0wgSxhIhE7#syuTta&=qEtbma zNM>+)a`9L{Kct%*m5+)g6e}5JZX^fnZuk3IeM%~H_p)2sRl=v3Qh%$wL=2Smnu`Jm zP;8S_DZb5bXA-I=m1`P{Zi_2gJ`K4tty0w!%iD)zGcp+&aH*YcmNtL=S=;~kvlVNb z8H1+5^YU8HsJc`}4#1rY??zfFhngfHIqardQxSuyB=Ht0RMr#G#Y(u{^l@CRqlx!T2^b29mH7ZBJ zC(dKx^cx!CL$2zk*Mx}-jWegy1HyMJ5ik=696-(XQk$B_(~A7q?dfBDuAyWynfpWY z-lAk6oiqG4C-HZBZKDh2*HTuCr89~^jC0hL2q6CE9D@yUKK;j?-3>JIRX_Z7XHB)I z5$d5MWO3uFF){w&^}oQI%Y+j(6@V9ee=6$(j1WY$Bk|>oPCN?e9{2LfL@?&^`r+TG z=Y*&`f?i5{sGz$M*7yGYJNAvO3S(MDw7-xh?H7s0d5?}_%euUk%a)q}C{iI$SQGcQ z;>Y_F>)~t5vyPQr+r`7GP_F&DvlKAnxUD$}v<`~G(h4AbckF8V*(tC~dQlUU^vIUR za>md9~YL*kwgu*JCaP$n4=ftH}H zmd|RMaa4}9;q4=_!{v(O>llm^`1ODQzZw)r0+Lq*D~9~H`|e;ImN+Qy6AIbMHawGd z*af6~Rv?VgfgE5|%zqqn%uYn9Af_e&DxMoRo_r5MW(!<6H$24I8G#M62IH&V2T)Gm{f)ZxfT5HAi$mpNURxjny`Zfq=&IMi;%;wHJfMw zzSy`34NyBQ3jqF<>Eu8LyY?qF`5@!lq{;-cEBrpNRj699apeq6fdCplfHl>iu?h%_ zAdSyX4lepvg070ESJn9ks4F5bCF6w^iV6edBxUi35@WrBtz+XFGlugLq-Tu%8%rmaEB7&+VgX%Gi*s+dpVVA7{&i*vNGr9Ao!RHSSgAIXsXn^Rz3Ax}E2B%})Vt?b(wP{mbgy9@khSDu&Hi(n zJYmRB`IvhXnO*gBe&8(#EKw$GJC5~VXZGPTbLg_q(7)8OKF2_FvW@<~oY|)uY=|P% z#is4JWDCtQkZ6F&m&V9b39Mgd*1YV1BaUf(uom^_&+uCS$6bFZ{Um^|;yKbJ1a!gA zP~^{M!kH2#9DG2bGI-Di+NFNKZAt|$Uo^nW+yS0CwluwAKlgu@0QM~7O4X@xk-&`i zm5&HSnIiHSFt@uf(b5)`L0w?Hzd3@Xq?}lXhZ!CRnEtsT8HS zt9OErNS7^|BImYjoV%(w1iogWEpd(hg)hd+e5mr^r2vgGuD?L8ujrr^9*KTj2=IjB zX*Wg1JI=A@oDBX>0W@KN@a*HPAd}+h*kxF9FmtIEn@pS_A19pr$C=${NP@e>XM#>g z_?lnIe)rv}vUCNEj>gz)yJUUVZ$_%b({Y;uwSA6fyu*I^k28B19V(KCr;#QUMwp!d zFT5n7JffA(%-j)du(-UqAqL`%u()&#Z_NH@fq64BD`K&AKTD@G1mAdvo*=5V8TsypcZZ!7^{!d+RD{AB4>AXXzJXv zcJ=$-z2^!H-2wdI?#>(uZK2gz(9P+#Wm2z7V@L(-dMippv&^C!11&d5GDMCmrLirj zETfi%49mAWDvX*;ZAb^+;=SkObBeZR<%O-+)nh(bO!2mP0$R^?NT(!|s;Jf$!MIrfrJZHmsyKO&3H6u3sjf;aLc=F-k5 z@(yf~6T%D;ZZCwkfy!`AZGtC3#9`WXm#20>-G4h*Y*a4IgR*E{Kt=L?Mv5X`H3Po- z%_W-Po4XM!nVK+MD)AuJ1>ybMGshtV$*3TiCSmzpW6_VybhXu;`&(075$!SOOBSf+(*ZhIva=49E_7WZa*gJ9f}dUN zNKjAUX8GarrwX%4(!}Xok|^;a59r*kIzZA`M*|NIqU(hm5(Fql1lg4@YVIq_YRZ6G zj2&Sef0&!Cb=Xt}utS1e3Swc%zVVU^b;ZWNd|q&;s(`rXfbD7Zf$`j)9GP~QFF2D5 z*!`dy)aVL*;b>C6+TZ6C$D?zuTt5I@yQ5~3*%QvYaTn!^96AF(B=1ddTUrr+(bf+V z&uK9hN*_f{=4+{%%Z`^pqzLCqPnK#k;fSmW0E1Av4|w@C5uIfbrDS}eVLXJ03fe6M z@yAsjXVh_wX0Bk8hO#wXyH!8HuMGr8@VQ}PNASgniX;~Swd~(L-^-8uLTV}vx{ThK zRl$Wlyv0fTW^Co252SX8;La_?`hBsb29P}LR`KeqH)FgBzk_&h<|%nZ@jLvbIFqHA zs4se#zEZ_WX|F>RufD&yiA|xzh29E^BiV-YLtDJTm!PECuz!fiG4y6f0*ML>1tHkL z&gAk7l=*s_bTOfw$SPCMIZva|_uhjTi#OMl4?|2k`!8&Hy?3b-%ko>joJ#Rt87u`Z z>wpc(&c`%oIkC)uVe`aD@tMFE;?*y{Or3nKHa;r1N>qt>HHOJJdXfw%vWh1uDGoM# zH=u#3#V+5hp^Z21JUm+|v^UQokppIc`wT4^kYvHsP9=a1=i3uPM3iV`JZ-W>^>QfF z6BP$j(FzlE$==u#-oi8BrWc*eJ)+20-_7yy#jb&}YhI9zqV_IGNcnxep7jGboi$1n zibhtqm79_~F!H`~gfD|VT5Cari^z%?Eo5H_&bM6Lr{$ z4^O$I$tJMR#dHt&m3BSu%*IbuyWtBLcl5n{T^0bcz8#W|9=Ec@4J65zTrvYez(0=^ zmQ$Ibne*q_63kDp0;RAZ+&-n$6L2rl;?4Nf&ybED+ud>}>uM1#1HpcT;1ToXov@%h zr2GV z#dS@x^j%z!(AoJE(e%@pAx>}Dx4tk*k0ipZC(*_}p-|ynyPJJ>OBlOr2)%CyXL~Gb zK?!M)T{NLwN%ihkp#<1(rz>Dn(4omoTTUi?wc|to-@03>kRAgsM39~<6*qgpf<*x@ zdm(XAt@U@)Ns!<&*xeuR+w%br2Tf& zmpq-?sI94IEnnGXrb^sj*d1tO6WkfFZmED~<`$d`26X}tF=uQaW|hS2S-Q8FfzY<{ zBDg^GC$*=82eMRP$Is`po*bH-Sh}9qeW$#!>!dW9IjTQsoUnP3JB*m6`2*VnT>{F>e#|31D&-Zxfng|~|?vM0a_4`XueWL=H}k%l%Ngr3KQB&{U-ZCyxgD>`L*z!Rw2UO-x{D%Vd+=Fj4MJ zo@0LoK#`q}1Vp;!#Cj@sBxZL|8-IQa$8~35(fUxui>_i02a1}fdQ6RbwUcqyFWFAjC8K`Z)e%u zJY956c8L#Ibl%+WRy>}4M$U7F18YV&W^|?mlU$*~=^isHX0~ZvhY3~5xAjWl*1W4&8i6#yMzqSR+^Tnr|5r-p_|&zFHxL(2E<4oy>RLX|Em^>NDe0<)t&Qz(#_xS>uh3B2~USc2GA9fwC=N(NKK!E@e z`KAkyL{Y&)pMXnBT@=u!Z=XWy1B;F9;FdxDRZn}=R)0c8?U}K5ET7HHw=G3Y%~7+p z7H?G(H;B28o!64paMzF!!y`Rv-!1fUZHMkoNOc7|A^hX+pH zB7(ZJV*X{Du>q#>u!yle3}pVN7t#SR44G{AmuhqTw;ZZaQW~wdm$#~(I)lJXy z*Q9OC#67}HUw`^e1hGeK;V_bD3Y*0vf=WxF`~4A=2bP#VtPqyJGh0vFiUC8F&0jb# z(}ks*G&KV@VZ!uV?wiK1L)IL&i`~<3wyHemY5T>FC8vjg0xS5mIQ~J%C?F`>o+i7ehqkCGsCol%QD(ckNq<` zgSYuMc9AS3WzR2Dowc#927}&AW*TMqY|Q(T%-sp!D9yjviz7ikwfebG1I41OuUKwz zyFy6llzMF6)LlxWB%Ey!DE+nH^ruF%lHJz%d<(0!4PcnkulZb(eKE>U{V!^HaE~k) z`f7;Yt%6WqHD`+|mUIM1sJ^E?I3e`cm#)|WKA5T={IOvQuiwRGV6%I;u5HSUnA>L1 zyGF}`{u-snezE$)>AOdfx~ErA+ovX=bklx--X$P%_H%tbd5QF4E<-bErka>iVW`G! zg-Ry_i2l1c!Aw;lFHLc<39?HokRu1N!69_AVKFojm7EHmZNiNDwQM_uX5Ec5~^B@OXQ8Nz>yWjmisyKSuZTq8=V;(efN+Zh9aIW5==@5MFtqNo-`OP zPKq-@aeIU55loGRCSw!qQf?_$$UwaGL*pS{V9g{etyXe)tN?5NMCmK()KgRIQG&1X zIz32t86YXd&f_P~POM5M8U%PI4X6M__4|uq7^TPJKEf-LZddtOgb*#J3vsRWyk>p~0jl~<&_p8UY?c+=5C%5OvIuD4>pxZ@5AHtZ2@Rsat z9eeGE2J3mWIMlnV$f&_Zo%XU%G^>e#jLSClitYy>eFWJ^-;wyN+_~BMDt37%YQn(5 z#>OSH1GU+D5L@!povLL|)oL_(2%*_8`J8&2KjFw^HJ$jR!i$u>jm}t}F_U%hNk~fn zkl1?{dUparqskmQKR*-+J{*0HC*{u6=pa|y z*GsytSp@Ayl)ni+9DKorm@6`*$8`|n2YyQ#IJnB&lBEY25~J5!%YCi2;oJfH0j zgB$s}>V3LXfD;v`iA-F0OL(MjfEGGb7OfVQ8Fd0#e5(XbYOyvvaymjcv)!cn^u{Th zN2A`xEEpOm!WS6EJ>66d9KoV7fM-yl?{L}1@mWB`#c~mWgU@zmb1L6c2=gBpo~F|) zQk;vWSR3J`495|gK%%>YL;qLDpAOMaXoofq333(9=xAz< zs26g3xUOzxLZC?#gWmCTlJc=~#B5D3%|E^tDMzV9uY}0WEucJ_&=}J1oz;Qdz!C(XRLz*6;BfzBc&gao`)lw(GVy|gwSnkf@GaMW+93;jRwHN9Cf&$>;aH|FG+Bd57tCzWKk9i3tMZG@9TpH@LJn!WfY^_?=Aj&Ro4U& zmSM4~WhrlAMwm(sDt3`h+er!TLJV?a5HuzWFmAqrltDvHl}H;5R+& zO9*=tOo|;fJyXvn?X{|?>?F}5C>@4!J9*0^0t^TP$llI<=^?(3i`C^3A9nH0slcqI z(@fH-=-c(vr<7)@xD3vF#tZRKbhd}Ipe@p*AmnSBt6HOHugZ&{$L^9ehB^2Kj5>?& z$}~Xkm;ZPN ztd3U3S*sMQk$vu}tWZ$7sx~<}5F}6kt9~RrbTh$xqPT`{A zZJN~vd}Ljh{8z=ELoH>Tn&&UiSq3;y4BwX~=%-mKuIV~Opi70M+kV^VM)%IE&VrpJ zxx^09-21J;H9WK`Z9^OC3VmHj2X?Wy2?U0eT}D)}!REn8@dK?i_lY{oXJ8La48G0c zUKWmb%`SIU-nH9rnc=^dpvTt}XEumW+2HH-czu?l6n#6`t2&fUL8`#7B&L)&d{1pg zC787_K|Cq3l89hkOEJkd95!EBsn6u_Lvh`i{S_^5sx0Ze*-l7c6X<_xEz9tmSU&=J zHoSRS%N2Ruy;sVWNQZ34)(4lUh=0;BF1&?}@k1fgW{N>@3u&x35ZC)>#pkC!#I+MN zrK4127PY#SyMTxKkD8<)%!)4%Y}FKqVDXqn7}RwUvpBsX!gpf3YzvM}{wcSG>>VH? z?jsf`-XFX0XnS$j%2rjYr}#=9OEtQl*p8x1NV?%s$EB(TtaCFu&)$;rufmcVz(Y6l;SUaXrx&U8q?HGEtOT+M0(l zRFe7}#eCy;ca@jd;_*VrJA(*u^DY{SdE6@qe@U$K^l_SApftDhMD&K*NvwP3OZY`> zo{eXbm~*m<^lD+zWWobER5$`ovsw#w zf`~5XqA}5IG!1BJ;@zx|MTuHuFDPDLu~^h?B6-3*V%grGbv1kxL)hcGKPYM+vZ76s z(a+Ac@45$>yC+CpSO@gaR8XTE^a=c7kzrCQKtPhl%Ri%S98icu5X^f>+c0xIN~6$#8$6sn z){qVe0Uc8J6Q_F!GID|a(GLdtFIdRJ*&U?--4SCXcSkuHhL=*f1m^@pwopqo1?86K*C;X%L1B}U?kjptK zGdSv1W~Gx_S<)EOUn?I;veb2e{!Hda<=)p)8cYiLy%eW)nR6&lv^EiTGY^E`uHOb4 zX8-&#_k4)3bUBrat1u>GM-VJXC8(l^B94*autc(I^>dn&cSes9JG4!BsfZOav@tO? z6EV?D!?w#e|FtAY&bR3Mgtzzd6Ta`O@2Q1o;e92X_UI(;+o#53X^3y}toGf-u>c-0 zxWjR=i7Bn~!SjmAW|ey`xNb6Jh3LO&`N_BC-d*e9^?=a)$CcYW-loJQ;`VJ zml7_D_}yY^d&a8Qby!e1BlUk^oPn#WR-e8{eJ0!TP3wG~4&PgQS)sqh3!*`Mt>(8$ zZ-HQp=3ERo3{uEC-2W2q(|cM?&Dj2hxqKUL5-B~BV6IHYzkc1!>)IylknM*HG2okx z1~j^;-d~UW8Btg-^KA$@r{ZE%?F#3GbRKxyJn9|u@)ki3eHQT-1$?3r?>km~sE0yf zMn`IMi4zZ;GfL3XYr6A%--2W7`3~;>B{^q^*UnGb0f$Mik{@nAM6xx#WyLLwk2~9l z!6N$}TJT^~(QqPI*fLzpgv=ju06DRv@gC755MA5CIqm;Gw&hi6WI)aVeh=$zI^1#6 z6N-;~5NkrAX@WbKYSiaRlwK?BfNvCRxdufbm^aQ*XiQ6PHF+10^?+4Rrr<_c;-dr6 zzzh9lv2V_#^5cLP3*W|UW(c$V$Rmz&O1b)*5p#u;@2v0LjXw#SKXdIr6bW1`X0+ebPOUM7us-dqoqjsYfdW#5;`m>EpQ3^QZjNtT2n5rrsgb|G6?OA-+w3E5@M65@N~RWH3hecrFv@9}#- zzTbad9v(CIea>}V=Q`JYoipd$_w~GPmg_e!4#dXa<^eJVIrzk`@mGRi($W{KMw*dH zSue9s9=Y&jl;swfkyUo7bTa={G@ta0%^~Sf1(VS9a~o&xsaF&{jedIYHdf;4;#um0 zWJ~nbWK{AK0Rv1%YvV}+@Dpw|;X{{=RQ+W0e6Lw~G_&K;@saK42IW1Xm|wSyQbm+~ zey}mfVcL$Y($2kPOFsGbhF^M3!I`aSwTqKqr`8MV^|kSfb^;Cl1us+<@E+!d8vd~J z;&S3vW9;u>d^9Ked${~t>h$s^o(vmq&oCP8|17?sFb zr|Dkg*t4M}Y9^1XmI;q=%PlL58F6UoF>YqNxx&*pbM!c`R@gme(2}4@!NyFp`}TmW zKGXZ>?~DidD!J}sFC=q~9Gy1OcfB}Xbm)O=@mpv(osLfm@7dUi66WCQ`Jg*w{K-sT zJ1-WLZGm&6Z(`6K2-~Pj-CWd`l%0Co|QJ7FebnQH+X-9_Kt|RYiXDP;(0>5g~Igr2K>D za@98nsiD3*py}^^Wm&xQX{DFg9wf^rbyYQ!^Iwy_+iKM4X$-qQVp(@jakML5jE;^6 z$T4)dew10Qtw$r)koHl~sI>sFF>P~lWh2lKPhRjp=+*w=l(e#^TOo(-8FIQl%2)4f z*UAdEG7sQL#pv;hRbc zdcBL46Q?aS3Yc12q1~~yX|spCh6+u#(jDKXJ1X_O5qz8Bd%qBuJh|98kilIO936Z0 zZ0{@TGu}!SfgUxLcAWt)25>|h*Sal+3TP&3dO%ZlJ69@)qY`wf*6~Z6Q38)ii5WfA zviC9@m!+YTOx94`yB_fNh4k0@tMi*N;u7~e3};nd`IwXfZyPdR(}_qr$8)atZX&38 z^fA9QHjw&)0rW8WWd@m8i}J*@bc2`=PODaW&lv?Kz$HzM_N<2X!;edzAnnXQItr#A zDlV5y-CD{Bqjc_Hy7l>eki)AMO{dm%e(aEhSUfC7ee)NVg`~s?$eWIG}#vQ_H+yJM$r7L2|pZ8Kzx&PX>>|#?C#LY*9L~J`B7y z_S#6t4QC_J)poN7%<9&PVF_>*+4G8eZ{lf-3u%$(Lte&O`t*!_K|Tztk17c0mJhn! z(Q2Gm3}L-YD{uFd_X^_+9)xM8-qO~hfrFse^!aac#X&VKx-zZLmE0SWisR>%mPFQ) z(kj*%M|8|IsPtrG`0_jJIL^*qlON@VX!e0HPKRd{aG zILs-4xk@5nk=)?$ld z9f#BpFWz5&@M@b1Lc(StqyC%Q=%T`Z27LX9?vfA||9Q{cR7-RBzByP6MT8^aMEIY# z&t=ZLJHZhSC?d!PWrxPfa?VxOa)QuESxyr%Er^zr63QN}?ukbkcxoFWJRJ~{NY3+e zR5I>T?v755fHaW1BL+*5a+l?VBXKq;Dd2dg7|aRUB_TS5Q;Qu@lO ze>4c3$#U8giB3{ru$!Bkkei4Q4sQpBN=gD^gu%kXf&hge!2?T#y9;6oXZD2rkfV$u zAn<4>A{vJU?Z}1O;9Q8ZoSZui{rubOnbW?8umqvqUJ4;_j$ohy3>6Xq|C9_c$^eB@ zx_BJY1%blLDIoAT8#oc9j3yA#7z_#zk`#gnK|p*aXe<)vM&Os(5%MSfAFa!PcU0P^ z;%@(Ur2nP-?^Fja(N7!wqPjcW2~ZvKtLgx?loB2VC*tsiI2=Y!6%ThrX~7);wV{C4 zLQom-UrGKnm&{I;77mHF^-u;P4dsL(P+>ucs326t5CV}B6_FAV5r9CYAdo#F`zZe+ zV^;&zZ)AwTq@WPABm8%#o-;{L^-&V69$cd?>OgR~8U<2DxuP&QV2ERh zpo?fCND!on#yWruh;X6{0kp$_$GbQYK?ZO~CkzUt@TbloBLd(uYA$FbYR{7~=wG$j zEB#f;-A?|sKQcfc5qlo_7YRE9!2sp_Yw4f8-0RTJbmN4>?;7$yY4WR}T}}Sd>VI=Y zYH9s+jNgt)N5^0N0vK3YT2jh5gv-vg&`_3h0W2$pg4)<1gpoFa2oy?OP!u9&BM1{m z!2}VaqT)~yNra>*L}GWW{K~t}fA-{RUR9*H#P#6^GhD1Z|-IQx{z(0RD{>Qc3bKw8-=bs(^FALr; z?%ziCTjOt!|25a&nrJ`4zvkL+jlVtq*Ia*VqWuK_nrpu`{`UA^bN#J}_7nUkb5Z@d z^+aKT{iYkRvHZ>6Rs^yyMr>zCr3$Q)K_X&1TVDO28(Prr7FZvw4eVJ6AQ9ocS{)}8 zRspfIkhTzkz;+o_(L_tIjtdck#_kY`?4;|^2kXM^P{5u91qRlxKPq<%0TRfbtevrlSh}HB6g`x>`c3}4b9k4iHVg02i@@k; zdD64a@+>KLRRl^aA5=aY9PeE^;AW0)0vEE@+Swh=KQHX#$7012Qoj%%I_WOm)y}N4 zIdE(YbR+waBi+Uwj<>5E4el-$+fR~4t*O`hpUJYmEdNH`kaw@g!R)j1z1R6 z-Gw7Gw=dFWb=+qPW46o~>J)jZ_P8|TmQAS2NZ+XLL(|h#mZyuuS5Cc-u$lR)HOD}F|>UV!rH=vE(lnKkDy0RjBDXRR$r0Tiue0#3zcPX&pP||Nj zHxHYLMK9u=@bLFzAY?h2GLWUn>5$X8`B=ten$_2gqbVA?m3M4jjIpbip1Z}3Q+fO- zgE3rNjCp~^Z19s5;%i3Xy#qJ54}`zGPuDb%drr;>g3^PI-1^ixRL1M!bgQJd^#}^& z&4ALDPQysBQK8lN?h>X)d->AoL+G~ua0R~e9K zPFdDAc72NiLgHU6uJ*fO4Tb)B&+2~%DT<3p>>oH7Luee5gQ9hV!OGe@kkm6mw!TWD zL$hF2=LyyFDtRK&fL)o7>U`D8gi20{o?LDor%P{omekQTZ=bFq-*ySz9$&H?DlKUI z1M0i7Iwh$j&?ZnYsBgt zlkJ~5Swc@Pk_`sZ3h`3G83dADF5EXME6Tn)gYcm{jhlGK=-OIxGTh=*50k`VJb;F2 zQKd2BbZmmU9CJQq^Z44E=4m6uXN_)BPqBD$d^$7!XtT^YvIbP|n+V!B0hI5#rLxnb z7*yYhv1OmkWTAa$5Bc2ic*;QN-Gul@O|a&3p-#TVMa|+bB%$<*!UJaXTDlF&WR7Q% zX`>&Ga@`73kh`79Zw?Zt6t(Rh;J!I$s&-pJv-ehOLc$}8o647BM~W|7r6(u6G!1&j zn~QsCb*=O*e{aF#b9UwmOSeKII_X0N!6qY3GiO>Ot(R8cu?pCpJQ~5bf!5cBh|IfM zUB{klr2F{l-WG+lYuBf{)gKkt9+SNpD?7^4*l>mnB*_W7tVZUpekq?EWcDt{U$0-r zivrbbDMq}Ryt*0CHt$7qR4k+ROPsH{TeEmwc)-m4MtI8&JHZnYt+H>Z4OEL`sRMlm+AF4im!s_7+T$0Z>cnp2T_N=IZvqMD>HG$c=DDE zE6Y8TVJ;?4fdoc(@;@=&;JW!+zutE*_CNTw#4mnrXsB?Di-V%7M_^N)DUftM!beFB zGm|=Auw9GciR&!R;A9NYOu?dEh~P3KM}4gPk_KNl{ZoxP+fkPV(9?yKgi)Vdy;;4U zw^t496c?I(p3YlGvihigY~F4+ijqF6oL$B@mwb5P>fAlPV#qh$x`({XIgF3Bn#Y92 zBWOY<0-TbK(%3h7;)Vuii^Y$PfYo2l!`DMd~lcD-!08G-FMdW-6(ZnSsACXb8KvGPSk;hd6IN&!a|;qar=?b zgXcx#GgChqGteZiJ^1{RlX+%!d0dgL2sb(%-#_A$61dQchp<UE^z<1l-EqWu|LrXPd1?u8em zZC`|LD6i1RX$d;c)`_XH(cjToX}4~x)eGvMo9{_Wuf9M+ z2|<4+vpQ+tq!3NNKCj0sSjJjNfDaYohukHyKL;{}H0!Pwzs^4r<4<`>4DY>`ND9pz zdLl92Q%bkOPfmp)_o#an$DpsRJT@rsnoHCN`*kbebI=4BlSHeHBmzRLbr3lK-SJM$hCd75DE0R|-atvNrW2drhS>ScQ^ z-Jc82k{zW$Gror%MKS^Qiw zyqL!&N%3Kv-#fsLiFrw~DJeC*``nb^*j8fXvxMbG7ThQl*L(6~DL&W5C(jh4FW>aO z^K2t2XZccPb>2Hmx9Jl(kA*h#p7hT+SWJt2;_%53{Xl9FF`)Y0s;9PRMmX1lwrAzL zLfFzAv&x$x3g$u!*Tg_KBL`vYk1pR%9dy&a`eo~QSNF5Xy-q)Km5hv(E(Y0%?j3h}vwS%w zJFG;O=ct(o87nS-Zhgy#=JOfo0!z#I@$r)d-xS}fRHng5UXCx`7#F|m=qY=hvZa*L zhns%zprU{T-BaFkD!noj!lwDbkvLAK3!m+UI*iv9X@`sqAqT>p+m$0|vNsgw#TsIx z-fbL3wJYb!^Jw%v%*mf2+-$dwDmrYF`Z8U%U3N-8P5*RCwqeq3K~lx8g#+Oq#=lV5 zGmd_j4=I&(OYs^@CYe|Ya|#^EQG6o-mP9zm9O^Sx&{kEX;V6s|Z2z#4)p#QtJJW)& zJa+ZuX(Q*c*IsWYy%0(RNi*l&jg$`6kn&Naxo8B2(pY|+GO&^MS#fFCE1~f^rjouA z5c;-fF$%2MTyf3vmUL6zafP@_3RB(Yl!K@nA#>9Y!UyBmLL}m)kUmO=r~C498I-Oo zT+RTsQ}{C*i5*sZ6ZM{vHLsZxi7E%v5gs!YF8%?@(KY9q zbY9+$cJVIe#AOOf>8x%YZXS(V)Wfqn#wE7%ai#%hJ8cpp9aC*D4P7tZ-j=J{vQo6~ ze@3!-E_KBMNUe}ldWQM8E-(6zmMxO|(%-9xX#&&x2M9&g2IKm(Z1UskGG}n{*@j1p zLpHZ>VGHaC&J{E#v^woeI%@JeYM!z*5^}@FV_$=(-1AbuxcIoRDlpWo*)}BU^mv{w zPNzlm-FLqrftbmCS5tDG#b2I0tk1`;C7usfImLOaF!YHTr$wu!S0_%KcQ`McEf<{snKmt|NM(Ol@*i?9I2matnV9T^LEJbnJphl zkIn5W*E`!>A=5!ACEcO^x>xE=*y|6>(y{%>?dKwD!t&JC0pM#VKZo?aa)`HMD(p#L zJ;xBn0)V09%xl;~&-%2{UTw|}1$(O7ZrR0AJx}nMSR{LOEAERY$Kc6xGEN_pX>pJN zrxh9w(V2Ueg7osq+!RXM67|SWOg?u8Oe7+XMvI%vF7md{#lZ$Uu~&T6st!jmad$L% zvv$hWIM_#aAB{K{(ea%5@dR0mNP@a)oaeT@S;EtMV@v7|UYaDbtA3Hob)cdZsYtiZ z1m(aLquVVp3WN|C6T8#x*^DIeGebk~;>njDWjuD(eS4|CA3>>~`BKsdqiD+ptJvD6 z9d`L%<7?@cDB7lRm;dWEQS1S~fL^ylkbJgwAEzKC;~Wc0$1rQzJF$8a<`a^4jFLQx zPqEbHP5VOQ`9>FB9^;Z1_G)ue8gV;)Oq}YG=itPrm6z912=e!@Zj$mbG6_SFp*D7Eq&Yw+4?_RzR>;`p;q_jUysOZlf+p-+F&^3k*$@3w6}i=(eRUt8 zE8H9ReJy=5K0qo_)@&lsaOXMk+?#S4i=gOFR?V*Q+N82j+ZR3Mo6nt*x3_V?M;!?%jhF1$6QzrLWo`>4zCS}r5 zHKs;mJx&ygUiQq(t1jQ48YkDAr*QYxoR59KRfAq-39yhL+ODcNW`7*ZguG!}qb<40 z$;#Nr$yXIG_EtkY=GKE}%PfW63(riI6=q4cc79+i4it3S5jBs)I6`N2gRR)V zJ<}L1&yMz}*?zU|^S#RVQf+_k-Dmfyz^UW2(LrOrJ#&LS@^S=lgpzEuQvHr8M%|l>Ky&LR|`3HZDn~LR*izEkydw- zB6wu4HfTRdL9KKRKgwpVTvR@I?Y&CcS-Ih)81E5U>8TTj+wiPw)O=)o;7+fmy&FZxdR2KsTxn>W6!~mGM=?UPuFgo z+@84h*8ee1`Qgx)mJ2uXUym}7Up6O^d;j&6`CZw_-UU=2*Io5#`XlWb4PxNp1!;Fq ztIM}!xT!?ncg%ia=-Jt$j#zNN`mM|VqeMCqkYCpFnAnSvA0X7qt8&3Y)`|@L7o=3% z+SQ{~-f&iIb4C?;`BEI3aPCciarT7E(J=8>vErpkO17-1P&KcBWBEBUk<3yH3+}q-fF-c;DQ^9_nc|b(u zNq0FnDKe0Edyd_BamfW~(5aNlv((}mS1c}E3_V9UU1)xjeMt5j z9jus%yi6|waZ&}zH9B&wAZ2XHUOly~z)orHY(2wRjw6b#d9L0-Sc_D(u3zC!4A%3G z++vsZ^EORAUTJC&$H!Hfxs%asnz2{j>OJYa2rlT4qLV(eW-V0ec3k!ha*3Khu7teHOj zJV*Yb=LSPL!26`NZS@f9j*_En@^Q`8cWTdveXC+I7z-7?V(qux75(zr9kGFK-2%&W zaC`o(d>wR$Pl$E;0_)q!Y{!03!M5AP`zk+jzhKQ9)1$%G(bH$$IjzUk2F zWQSP;eA)fxx`ewugA@#dUdcT|=@HM|4_pnNl-4BsMBcChwF=$R%$J68ggD>1AAui} zL%57Yo?2ZR;oDj1zZK;t|LsWFcN6wM>}p~A-tnv+hBSDE?S~wzS`t{9AaB2RQQw=T zpNnsu^t7@HtqScmfif(rvGDdv{`w78{Y8Nab?ITetF0TOWW4f&-owX-%&#o6R1K2B zap+W@^els?GJ+fN1~FXb0`)epuflpmbsOlUuZlVFrdT`2vGj+|^^q5`r1`%p(*DL$ zw!o%$n8D!gtY`Ny?@H&>Bc+CJQ#XaLH+3F-#bHLJdUCe$DNF9{f>KG1`HDy3KCCN+ z&-|b;h32M8{lSDg(LRe_4TW*%+U>L^PEPY7%5{y9F`5cui@fgXIiM2XfT#i4Rile_ zK5IqC{P|hD(b?4f(cDHe)>BNahQ}P2%|aPSUYtJ?)#zta{xUDh;ON@q#pW|*XGrC?r%)I3auSZSSw3pKSaELg9a9Tky`qD9)dHjRDst!i4* zuEus5b;3j|%+8)sKlhk5EgA;*3$mTgGRw)CX1!KfziwXme9Y!!YFJHVM?1Ddpxa!& zRGv4n_R7i=_P*H%-yU)~u{NXQ)G@=YNv4{%-QU_~E^GS;DPGx($~db_rljn9G`%Y| z@f##VXP^o<`RIG6D{Z1nKkZjvFzm)7=tW(ySGj_@d+}*iRa33D(F^PT zZ1A0~AP`p&+pRm^SJ}@b#|E+UTAZhAe9@MiSA}eH4!7esc#Nm68--cM@)2#{+?o2& zFvH|G`?31v{hBtpxjw<_icFS`-p+&K#3$$M z0$V{>PQ6G`#&$aQSWV!0YULs8Z~6_j=9#)ZmX)YCyya3XcV%o4`PD3>tybS5y2C>D7eABqhMwpkGp~CB?ZJ8upwBc~kA? znfe;?mdP_l2;%=$2*gURcgptAsTPuYYAsA?ro1v8-2s*86#oNhsp`3*HXMAJ*0k!! zbZsEvnVCq{?hD=?9&@9ktIgGeeRgw%)n2dUQXvv99d;q`!8lS-0`I58p0G}nV72zb z?<9xZ{SQ1MftXEZ)hNt9m*)aYob-J9f^zfvN9qIWn>DD3Nu7+KZJoEdhBYK4FD^HX zVYH<#HhkmMZed6d|02TqsEI3_Ni{41d_lJXZxdv+V}wHzj*^6PJvjo)b*D=Vjc=emN~BxB zlQ^Z9gJEMwPB;~j4=Yuqr`s-7I4_Vs6}lLOz2f*f#S1SS10xMcJs)%X#pmKlA^w+H znpWZ@*X_2>F*!RcRBInZLc3Q(zT|J}Pmx%+ zhF>Y3rnPAGC6RhU3Ub?~O&sGO;ZpH&s9k9EupjmaJ4vDM7;yu54s@fOCGKWY zJh(9yM0;>J@#G_^LfC5Z!S5S+PGM^|+iq;Ij2}OxX-D~u>AJ(P17*8E3D*I}7i5en z2Z#Y25HjOuCt#!!1*B^M*2j)^lb#4Y{L-IRSV8DLHI+Qw`-3Zi7j(&{6wcTm;FUiN zqY?6d^$7q&&-x#>CXWajw?0&LVCfh+oI_YS;4_Dj`VnFOR?s0NZH0oIE{Nf%ijwm& z+T)59?I$uR&IX@NHP575KOuF{(mzj;tX& zF(;O!L*viB&1;vF<^@GP|1y7YP0IT)TR?JsF@=061&y8(HQk}%z%>p!^>cM<;-Fx4 zN&S!#kfk~pe}qZl(M>l#>hQn^Y1Dd8_;d^@!hqB##&g zY$E&R}pQ z$nbU-e-<1-Rfm^kl_V33h=s(MuSSy^Ot}$HA`+=BcrK3>w5lXPsT4a>%5_a+1!Fy1 zxLaV1O7BuV_2@HIx^S7_yO_+FdQnBGMP@}tMRrIvdkU?@F_nuCZntZDikY>E2Qyrqmzr+#jwT7QR&a@{3#4y8A%z08IPX5q#tjb zezxyy5yKHi9lnnyfw#ZjCcWLw&uTnnaM#FKZ$($z=wrUOo`-qxqv6b3Df*A_#;6Oo zA6&_AF%mS8%~+2Ry6EG!Zc=He0GWVRb!~F~3wN$HYO~LQPuJE73J%B|S1J@{V?-c5$1MWl>%b z_Ad6`tfOoMn-W-W&i|v9kLjS|d8^tB+t&_YtkKWJVAhZVhUzX)ejIw>EnMkVY7G1u zym%;+?)8gizBifm6TyX#oUblmN32G4cuw$W@GSGtYN9kv;>QwP6Sl5A!Ziu!2v<64 zi@Qk-+J8iE;S-(|w z*4NkTnYN6cb`y^iCrzXmuYG^IkEc((S6isZtj)}-*1J`EAQEljAYr3Z7WL#j^89eh z*fU}#!F@P$=!GlBDaq+A;ewOY*w?z=hJ?8fV>075BSjxas|1FphRi4W=6FU<3>#0b zytf>XZL99}X{xV7+-vsGDaj}s$G(AsQQ$a}w4PR~9L8yrukJBbK~)0P$!ilE^FB|# z;e(7dUf%3^UFI(>dWQ~gR&A~T?-uP@&{^7+BccDquFE(t+73h+7D$91XJG7N_{#HM zvYcy8Aru=5NtZSh78Ndm5`|jDaz!!7+Y8ytIe8%hR|)mfXA0<2>1gPRa|Vp(EM?iy zbY{mSOH}Q&y6@&VFn04imZj%rYK)L#x)FKwIyQXk*;hn2)DBT)lZ~)770UI=9mlA- zGI@m~n)ydH8~PgKnzWmG8^44lhP#D=qmsv-Itf&O>1F7z93MOW{dg3Y5LYmFDbL8Q zv>2TQqF;L0y4eH$@Xms?j3eIA0~H*czT{19`^pn#+_f!4|=3Oe;)A zOgIrG5gl6%yM?0lqUB;Yq|WE^aqsG*rIDq1(k6m#gxLwtjO9}16dEY=Q;92>)#MKB68&C?ycBrtRn3jLXi|gUdbC;in9U;b2p84b!TNqo**lDy<2ZJ15MG1W zj?@XBsdG@_Q5jWX4>xPhR9DpFm9}Cfc-h#hII2LW9G6~~@qkb}wq?0--muB!V-`N|%46f~k9x9h{yx_8 zDI?G0PLJMwaCbGNJY=@pv+GF@y5}}~6H6OxvqY|>X4Rpo=LQWGjS3BW)O=(F3yn-) zr~SM4@6)>s-%?8)Nz8~;VXORhV_M>#;5EV0$0gklMn09#uNC9cYFVX*d>(9YjUZpS znGYJ3>Ge_dr>@MbBt))7-fWw13osDSc5&-REpZFeAIJ#(J{ZjXD+u&1i~Opbc5mD zKMQTley&dU0?*lfNcJn3Hc=? zR%gh2p>ecPHvXJ;$Vu=up8y#fxX?E|~kem75;5~MDCL}z{lf6>T!_Tuk zx~gD45qPAydUa{zw~J88KNq1t;2jWPb@?MD(*Fl|2QL8c;N5|D@cwV`4&MKt@D4E{ zVF26#fr7+Egn&q7Q4z@7ZdaEBP=N0iVGdg8wU?f83a#2-MFzr{rC z#w`6L{wLwu`#Anl*#ACyAin?i;ov~U9=c?Q_*Za+Ea#3rchD6O2}6jal!%y=*v>J4 zuGj^z>`?y+qxdJ(`%x4Bfc}@5a#_yZfOG^LWh;tAq6DF$2$Z0xh!|AR#@5D0Pz+^@ zfJq8VKwxmOAKLvAJN^g1s0b7;DJBdP6t#hf35r5(Bn06|TX8`oQdC?Vih$dqAkba@ zpI`_8ar7tXUC;AmY@fYC4?&<#qmr#!WAy&+d)K>vSMh(V>|X``wZOj?_}2peTHs#` z{A+=~gxT}D?JV%AsD8K4*o~?G<7T(A0v}i%g>eOT9)`BShaHF?tgH{bP(%d_yhH?8 z>;Q+rx(Zn2{3rlo@^>Nx^yTEhsyHkWsNdxm5!(x~H-{_R03g|H~t1`hyI!3a1W0L3EEcmNG)i$S@AkvJk8fdGJ!V0#y= z9USlCh=Bt^lE4ZJi*f+t0X@M0^csl*iAaK-U2sGc0Vo3Ss*>VhJ3QPK1%e90z&0)b zIGYGY!tLyUSEcNIA#E@KSQmqKBA^Lilp_*Oum=M%|2to{7#tuNY>S8QLJPolE@;fI zSPaUR_~#iOZD&seJEE~J1h5kdPqfFm5P-$vo*IC78#n>=^JJHGS3#g=k8}6z&+>hR zyN&I#6Y+2)$`Out0NbJgy}+6T%#MaSDqw>>Q<)*rfDw24V7^xX7z~3V5YS-E9w`n5 zChXOC?tX!w;t;Tk3myj?h=LI=_?=7O0i1{fmjLI0!rH(AJh(Uv{8KOj=j5@c91eix zqJRzq&<9&Za9JqU@$Ze4<_0JbwAJG2p8bti=zt&3KQ8eDH?~|x%9iz5GW)7v4?|!>;97- z(8m!@K)CE+JpdlqPNlO891n2p9NNP%wtFr7QAhwmC56EXyN&@?*lSH;pR+0Kx)NC7 zXV(>W9ZW$5tn{OKl|5#aJ!X}C%ql;*H1=pT_GmQr(P;dnQ6bucwfCCS*(2B4BiGqS zuJZ@MPc9&fFcIy9@c`@WU0kC*R--*uqkXJKKY7gdDh=&%cq|z3e@DPaZ7>8de2*8t zR}J6C3jnHt5e0_-6an8g0~{EhKgE}~ z+)JAAV}jc^QAzBJ0^ED6k}e$YNC4(MB^<`Z5laAxfpq~VM;v){E7o8O{25&N;XFl*DMT>9td6@g^mYFLGKlmk%8DzFlPHJ_4bsn|2E- z_;SFCuwKjyV!7r~;y99)Q5F>+q51H71&jTFuwyfopczLvzx<^4o$umTg6?lTi1E{& z@?faR=}QFX=j#q_gM>`hbvfp4bWS%`^O365X7*CQ+Nx;CXhI2evNMRB8S@{nluDS2 zMGsK22TQ8uG1`Vs+)F7|nLg9Z1|vWJxN&vUi#1V;U-@#NR7!GQM8@{vuaET^*Es}k zV}K0!@6W#GBTN)|}h=_ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#define RTE_LOGTYPE_LSI RTE_LOGTYPE_USER1 + +#define LSI_MAX_PORTS 32 + +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define NB_MBUF 8192 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + +#define MAX_PKT_BURST 32 +#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */ + +#define SOCKET0 0 + +/* + * Configurable number of RX/TX ring descriptors + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; +static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + +/* ethernet addresses of ports */ +static struct ether_addr lsi_ports_eth_addr[LSI_MAX_PORTS]; + +/* mask of enabled ports */ +static uint32_t lsi_enabled_port_mask = 0; + +static unsigned int lsi_rx_queue_per_lcore = 1; + +/* destination port for L2 forwarding */ +static unsigned lsi_dst_ports[LSI_MAX_PORTS] = {0}; + +#define MAX_PKT_BURST 32 +struct mbuf_table { + unsigned len; + struct rte_mbuf *m_table[MAX_PKT_BURST]; +}; + +#define MAX_RX_QUEUE_PER_LCORE 16 +#define MAX_TX_QUEUE_PER_PORT 16 +struct lcore_queue_conf { + unsigned n_rx_queue; + unsigned rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + unsigned tx_queue_id; + struct mbuf_table tx_mbufs[LSI_MAX_PORTS]; + +} __rte_cache_aligned; +struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; + +static const struct rte_eth_conf port_conf = { + .rxmode = { + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 0, /**< IP checksum offload disabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + }, + .txmode = { + }, + .intr_conf = { + .lsc = 1, /**< lsc interrupt feature enabled */ + }, +}; + +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +struct rte_mempool * lsi_pktmbuf_pool = NULL; + +/* Per-port statistics struct */ +struct lsi_port_statistics { + uint64_t tx; + uint64_t rx; + uint64_t dropped; +} __rte_cache_aligned; +struct lsi_port_statistics port_statistics[LSI_MAX_PORTS]; + +/* A tsc-based timer responsible for triggering statistics printout */ +#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ +#define MAX_TIMER_PERIOD 86400 /* 1 day max */ +static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */ + +/* Print out statistics on packets dropped */ +static void +print_stats(void) +{ + struct rte_eth_link link; + uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; + unsigned portid; + + total_packets_dropped = 0; + total_packets_tx = 0; + total_packets_rx = 0; + + const char clr[] = { 27, '[', '2', 'J', '\0' }; + const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' }; + + /* Clear screen and move to top left */ + printf("%s%s", clr, topLeft); + + printf("\nPort statistics ===================================="); + + for (portid = 0; portid < LSI_MAX_PORTS; portid++) { + /* skip ports that are not enabled */ + if ((lsi_enabled_port_mask & (1 << portid)) == 0) + continue; + + memset(&link, 0, sizeof(link)); + rte_eth_link_get_nowait((uint8_t)portid, &link); + printf("\nStatistics for port %u ------------------------------" + "\nLink status: %25s" + "\nLink speed: %26u" + "\nLink duplex: %25s" + "\nPackets sent: %24"PRIu64 + "\nPackets received: %20"PRIu64 + "\nPackets dropped: %21"PRIu64, + portid, + (link.link_status ? "Link up" : "Link down"), + (unsigned)link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \ + "full-duplex" : "half-duplex"), + port_statistics[portid].tx, + port_statistics[portid].rx, + port_statistics[portid].dropped); + + total_packets_dropped += port_statistics[portid].dropped; + total_packets_tx += port_statistics[portid].tx; + total_packets_rx += port_statistics[portid].rx; + } + printf("\nAggregate statistics ===============================" + "\nTotal packets sent: %18"PRIu64 + "\nTotal packets received: %14"PRIu64 + "\nTotal packets dropped: %15"PRIu64, + total_packets_tx, + total_packets_rx, + total_packets_dropped); + printf("\n====================================================\n"); +} + +/* Send the packet on an output interface */ +static int +lsi_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) +{ + struct rte_mbuf **m_table; + unsigned ret; + unsigned queueid; + + queueid = (uint16_t) qconf->tx_queue_id; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); + port_statistics[port].tx += ret; + if (unlikely(ret < n)) { + port_statistics[port].dropped += (n - ret); + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +/* Send the packet on an output interface */ +static int +lsi_send_packet(struct rte_mbuf *m, uint8_t port) +{ + unsigned lcore_id, len; + struct lcore_queue_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_queue_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + lsi_send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +static void +lsi_simple_forward(struct rte_mbuf *m, unsigned portid) +{ + struct ether_hdr *eth; + void *tmp; + unsigned dst_port = lsi_dst_ports[portid]; + + eth = rte_pktmbuf_mtod(m, struct ether_hdr *); + + /* 00:09:c0:00:00:xx */ + tmp = ð->d_addr.addr_bytes[0]; + *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24); + + /* src addr */ + ether_addr_copy(&lsi_ports_eth_addr[dst_port], ð->s_addr); + + lsi_send_packet(m, (uint8_t) dst_port); +} + +/* main processing loop */ +static void +lsi_main_loop(void) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *m; + unsigned lcore_id; + uint64_t prev_tsc = 0; + uint64_t diff_tsc, cur_tsc, timer_tsc; + unsigned i, j, portid, nb_rx; + struct lcore_queue_conf *qconf; + + timer_tsc = 0; + + lcore_id = rte_lcore_id(); + qconf = &lcore_queue_conf[lcore_id]; + + if (qconf->n_rx_queue == 0) { + RTE_LOG(INFO, LSI, "lcore %u has nothing to do\n", lcore_id); + while(1); + } + + RTE_LOG(INFO, LSI, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + RTE_LOG(INFO, LSI, " -- lcoreid=%u portid=%u\n", lcore_id, + portid); + } + + while (1) { + + cur_tsc = rte_rdtsc(); + + /* + * TX burst queue drain + */ + diff_tsc = cur_tsc - prev_tsc; + if (unlikely(diff_tsc > BURST_TX_DRAIN)) { + + /* this could be optimized (use queueid instead of + * portid), but it is not called so often */ + for (portid = 0; portid < LSI_MAX_PORTS; portid++) { + if (qconf->tx_mbufs[portid].len == 0) + continue; + lsi_send_burst(&lcore_queue_conf[lcore_id], + qconf->tx_mbufs[portid].len, + (uint8_t) portid); + qconf->tx_mbufs[portid].len = 0; + } + + /* if timer is enabled */ + if (timer_period > 0) { + + /* advance the timer */ + timer_tsc += diff_tsc; + + /* if timer has reached its timeout */ + if (unlikely(timer_tsc >= (uint64_t) timer_period)) { + + /* do this only on master core */ + if (lcore_id == rte_get_master_lcore()) { + print_stats(); + /* reset the timer */ + timer_tsc = 0; + } + } + } + + prev_tsc = cur_tsc; + } + + /* + * Read packet from RX queues + */ + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, + pkts_burst, MAX_PKT_BURST); + + port_statistics[portid].rx += nb_rx; + + for (j = 0; j < nb_rx; j++) { + m = pkts_burst[j]; + rte_prefetch0(rte_pktmbuf_mtod(m, void *)); + lsi_simple_forward(m, portid); + } + } + } +} + +static int +lsi_launch_one_lcore(__attribute__((unused)) void *dummy) +{ + lsi_main_loop(); + return 0; +} + +/* display usage */ +static void +lsi_usage(const char *prgname) +{ + printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " -q NQ: number of queue (=ports) per lcore (default is 1)\n" + " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n", + prgname); +} + +static int +lsi_parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if (pm == 0) + return -1; + + return pm; +} + +static unsigned int +lsi_parse_nqueue(const char *q_arg) +{ + char *end = NULL; + unsigned long n; + + /* parse hexadecimal string */ + n = strtoul(q_arg, &end, 10); + if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) + return 0; + if (n == 0) + return 0; + if (n >= MAX_RX_QUEUE_PER_LCORE) + return 0; + + return n; +} + +static int +lsi_parse_timer_period(const char *q_arg) +{ + char *end = NULL; + int n; + + /* parse number string */ + n = strtol(q_arg, &end, 10); + if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + if (n >= MAX_TIMER_PERIOD) + return -1; + + return n; +} + +/* Parse the argument given in the command line of the application */ +static int +lsi_parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:q:T:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* portmask */ + case 'p': + lsi_enabled_port_mask = lsi_parse_portmask(optarg); + if (lsi_enabled_port_mask == 0) { + printf("invalid portmask\n"); + lsi_usage(prgname); + return -1; + } + break; + + /* nqueue */ + case 'q': + lsi_rx_queue_per_lcore = lsi_parse_nqueue(optarg); + if (lsi_rx_queue_per_lcore == 0) { + printf("invalid queue number\n"); + lsi_usage(prgname); + return -1; + } + break; + + /* timer period */ + case 'T': + timer_period = lsi_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND; + if (timer_period < 0) { + printf("invalid timer period\n"); + lsi_usage(prgname); + return -1; + } + break; + + /* long options */ + case 0: + lsi_usage(prgname); + return -1; + + default: + lsi_usage(prgname); + return -1; + } + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +/** + * It will be called as the callback for specified port after a LSI interrupt + * has been fully handled. This callback needs to be implemented carefully as + * it will be called in the interrupt host thread which is different from the + * application main thread. + * + * @param port_id + * Port id. + * @param type + * event type. + * @param param + * Pointer to(address of) the parameters. + * + * @return + * void. + */ +static void +lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param) +{ + struct rte_eth_link link; + + RTE_SET_USED(param); + + printf("\n\nIn registered callback...\n"); + printf("Event type: %s\n", type == RTE_ETH_EVENT_INTR_LSC ? "LSC interrupt" : "unknown event"); + rte_eth_link_get(port_id, &link); + if (link.link_status) { + printf("Port %d Link Up - speed %u Mbps - %s\n\n", + port_id, (unsigned)link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex")); + } else + printf("Port %d Link Down\n\n", port_id); +} + +int +MAIN(int argc, char **argv) +{ + struct lcore_queue_conf *qconf; + struct rte_eth_dev_info dev_info; + struct rte_eth_link link; + int ret; + unsigned int nb_ports, nb_lcores; + unsigned portid, portid_last = 0, queueid = 0; + unsigned lcore_id, rx_lcore_id; + unsigned n_tx_queue, max_tx_queues; + unsigned nb_ports_in_mask = 0; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eal_init failed"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = lsi_parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid arguments"); + + /* create the mbuf pool */ + lsi_pktmbuf_pool = + rte_mempool_create("mbuf_pool", NB_MBUF, + MBUF_SIZE, 32, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + SOCKET0, 0); + if (lsi_pktmbuf_pool == NULL) + rte_panic("Cannot init mbuf pool\n"); + + /* init driver(s) */ +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_panic("Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_panic("Cannot init ixgbe pmd\n"); +#endif + + if (rte_eal_pci_probe() < 0) + rte_panic("Cannot probe PCI\n"); + + nb_ports = rte_eth_dev_count(); + if (nb_ports == 0) + rte_panic("No Ethernet port - bye\n"); + + if (nb_ports > LSI_MAX_PORTS) + nb_ports = LSI_MAX_PORTS; + + nb_lcores = rte_lcore_count(); + + /* + * Each logical core is assigned a dedicated TX queue on each port. + * Compute the maximum number of TX queues that can be used. + */ + max_tx_queues = nb_lcores; + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((lsi_enabled_port_mask & (1 << portid)) == 0) + continue; + + /* save the destination port id */ + if (nb_ports_in_mask % 2) { + lsi_dst_ports[portid] = portid_last; + lsi_dst_ports[portid_last] = portid; + } + else + portid_last = portid; + + nb_ports_in_mask++; + + rte_eth_dev_info_get((uint8_t) portid, &dev_info); + if (max_tx_queues > dev_info.max_tx_queues) + max_tx_queues = dev_info.max_tx_queues; + } + + if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2) + rte_exit(EXIT_FAILURE, "Current enabled port number is %u, " + "but it should be even and at least 2\n", + nb_ports_in_mask); + + rx_lcore_id = 0; + qconf = &lcore_queue_conf[rx_lcore_id]; + qconf->tx_queue_id = 0; + n_tx_queue = 1; + + /* Initialize the port/queue configuration of each logical core */ + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((lsi_enabled_port_mask & (1 << portid)) == 0) + continue; + + /* get the lcore_id for this port */ + while (rte_lcore_is_enabled(rx_lcore_id) == 0 || + lcore_queue_conf[rx_lcore_id].n_rx_queue == + lsi_rx_queue_per_lcore) { + + rx_lcore_id++; + if (rx_lcore_id >= RTE_MAX_LCORE) + rte_exit(EXIT_FAILURE, "Not enough cores\n"); + if (n_tx_queue == max_tx_queues) + rte_exit(EXIT_FAILURE, "Not enough TX queues\n"); + } + if (qconf != &lcore_queue_conf[rx_lcore_id]) { + /* Assigned a new logical core in the loop above. */ + qconf = &lcore_queue_conf[rx_lcore_id]; + qconf->tx_queue_id = n_tx_queue; + n_tx_queue++; + } + qconf->rx_queue_list[qconf->n_rx_queue] = portid; + qconf->n_rx_queue++; + printf("Lcore %u: RX port %u TX queue %u\n", + rx_lcore_id, portid, qconf->tx_queue_id); + } + + /* Initialise each port */ + for (portid = 0; portid < nb_ports; portid++) { + + /* skip ports that are not enabled */ + if ((lsi_enabled_port_mask & (1 << portid)) == 0) { + printf("Skipping disabled port %u\n", portid); + continue; + } + /* init port */ + printf("Initializing port %u... ", portid); + fflush(stdout); + ret = rte_eth_dev_configure((uint8_t) portid, 1, + (uint16_t) n_tx_queue, &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", + ret, portid); + + /* register lsi interrupt callback, need to be after + * rte_eth_dev_configure(). if (intr_conf.lsc == 0), no + * lsc interrupt will be present, and below callback to + * be registered will never be called. + */ + rte_eth_dev_callback_register((uint8_t)portid, + RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL); + + rte_eth_macaddr_get((uint8_t) portid, + &lsi_ports_eth_addr[portid]); + + /* init one RX queue */ + fflush(stdout); + ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd, + SOCKET0, &rx_conf, + lsi_pktmbuf_pool); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%u\n", + ret, portid); + + /* init one TX queue logical core on each port */ + for (queueid = 0; queueid < n_tx_queue; queueid++) { + fflush(stdout); + ret = rte_eth_tx_queue_setup((uint8_t) portid, + (uint16_t) queueid, nb_txd, + SOCKET0, &tx_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, " + "port=%u queue=%u\n", + ret, portid, queueid); + } + + /* Start device */ + ret = rte_eth_dev_start((uint8_t) portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%u\n", + ret, portid); + + printf("done: "); + + /* get link status */ + rte_eth_link_get((uint8_t) portid, &link); + if (link.link_status) { + printf(" Link Up - speed %u Mbps - %s\n", + (unsigned) link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + } else { + printf(" Link Down\n"); + } + + printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n", + portid, + lsi_ports_eth_addr[portid].addr_bytes[0], + lsi_ports_eth_addr[portid].addr_bytes[1], + lsi_ports_eth_addr[portid].addr_bytes[2], + lsi_ports_eth_addr[portid].addr_bytes[3], + lsi_ports_eth_addr[portid].addr_bytes[4], + lsi_ports_eth_addr[portid].addr_bytes[5]); + + /* initialize port stats */ + memset(&port_statistics, 0, sizeof(port_statistics)); + } + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(lsi_launch_one_lcore, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/examples/link_status_interrupt/main.h b/examples/link_status_interrupt/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/link_status_interrupt/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/load_balancer/482252_LoadBalancer_Sample_App_Guide_Rev1.1.pdf b/examples/load_balancer/482252_LoadBalancer_Sample_App_Guide_Rev1.1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6ce67fc00b94ccb64c130e7f0f4c9a92f03db886 GIT binary patch literal 62413 zcmce-W02(AvOe6Jwr$(CZQHi(p0;hM?*D$+=f?i@M#YM%T2T>| znJb?swSFY>!lE<`w5%{B`v)@%Fpx}4^!W7nc7~QP+}w1c7S_%tj&!2d2F@nJCPsF~ zCV#dZot*JG=;@?QY|Wg_@!44!=w$I380eHe>`mz83@w$MZFqT|oE=RJY+xYGx0_Tq zEU_5iKT&vEW{q8NB;1c`xNNgo(Z&gxF{e~G z#j5tR97@{s2lj~50s1ud=69>o7n!k2V5a&&)G&Wc1Tp_UNvqS%-%bB*UJ4?Hcf7XI zy5w-7B!`=}KhY0pDSFv;d1jI(s`(J7bToCgW#g$=8fSj&L*s~GVzx<(NJ)m2P9;Dx zUK~a!B&!!S)8VQ);;fsE6I&_KC4*L^IZVq^KSEwg6mK=T3pNOAD4t(w?rvWcwSJkgM=#eGrg5MO6yTcM!t8#Z;QqU*>R z+to_eUORkmzs5x3Y?gG)y`7FD`BuL4TqL`KkBYh>i(JhnvcL(Jl|oOzFza@Tl;$x z0S9iaYN1<8dPAeJt*VD5a!_QaYUQBheBp%Bxw41VfGO>ygEypKSY2m{jSn%I{?yj6 z#zY5xIH$PsOB(}d^eAx`A(9hL-q{Fp+>#~niNOdZm??CXZmvJSchKiLtr_T&m}xmL zY7o4ncvKo7sEZ+mqdINL{G1xNtBu=BX&+o4^;6yrW8VUtm0c15i*T~!UE9*{`2y&pYs^t+f0|TmZ7OIC^n2XSxt#aawLR6T z&BY54{Ji<{=z~CW3oaeWT0~4?TmjUmb-eU9h0|fJl`{+Lbo)_*OJ!?scV4Tc*!CP5 z6}$HSJVs~YO>T#!rnA`zY8aH!Go`_mMrDnfM=3O~EJuklc4YxSyK=1q_#F#QRQ(JC z*;n@qjPEtxE;ep3(58GrzWHu=jFI8VFvHN-1Ey}t{LU0Z-I}#82s*%PpQO&;+KKTW z*olGZKZfF9{X=q$|IJQ}4fOJVUJdlh^sH)i^>c31bW>Ccb+gaY(iAbE5Lo0OLuaAN zs6}UB#3iMu8*2uXjKCC%08}dUfQd)&2*BVb(D`}e)C8iS#HhtUFbqH&ctM81{67H! z)-Fio|5kjaeg$x#79hqC>*@{yt%&FTi3j~qB& z;5Sg}=K4GT@C_WD0rZ@#PAr8sRlHe-n-%Z9YiV|I4C0!`5V2fPH>zA@?>wu!;Z>HY z?w8{+(S@%<|Mn?+f*y2su7UWx=p9oF{_)cXp;UL0@U&=B*MZrJq|liI~`$dr0kfI zZzZyKMifD;!GXe4_{|;)!FKkE(>S57hy|EPLdU6KQU<%Te#$mFNtSGv0%vAP% ziYJBvsxBc3muREk?vdpLV2gR7E9UU|w6qL!P=O!E+@+9E#vRnj7K%zrHTCO=F6C#~ zOnMcw7(Fyi#nu<{ltL;8-a*PUrjU3y2w><2i6{Ob9xcnJUMoEoN4yA~FSWgFCTlPUIw!5_XqAdj(tEt! zU=-ws836{zsse6qS-Mi~goLr3)NPFr1ZtjjMq1I~9M4#AgNN$l?oP@#W*45r^ECG5S3bR(76q}kJ|sNcVDT%B?7ToPGO`Qpcvi*uS&7lG5I zm(!xI2aSnerjMyh9ecMM>fH-v z&B_z}CdeIg91af8hosvw6y_~d1HLymzajk^KmG(0VgXXpAg&BCQ8qi6cdg=H}k%&cfic7@_ue<#)W4VS^%>(l3nMW1&M_p{*( zA5QF}#pjhgk-|FzmZt!&{2kQrogN?1T50qF;{(P~XhyLh^^Q=Ze5vkNOq;ISu;@hW7L_ z8FU3>?uQot`U@$c^-(~prHgGkNUog*Y_u_9g+nqAl7|4Qi}`3#`_BkShU=|%oG6-1 zH;m|lm9?c+s4EaU7z<*j>WnX4;RQ#8!Wuna(OzkJ$q%~#y`$q-_Ac(LQg8R@ zyU&MDX)fQsVgY~wNH%02{?>(9|A7nr58E>QiMW4?iT}p7j7$pj^8bpjGh=j+wBpjT zKO7>akfUIteliNUy1vz*kX-eXgp=T7JnIYPMKOqjjMnzbdtc?iTIaU_8nwn7m$)uEi;@1e^8 zPxz-gpr|AH1y{~@P&wVbUB$_bXf-^PAzIKGKT%J}GJU0+!t2Dw!p@#6!og8?3)T6A zASM~pR!3YMpsd-96h`Q}c0J`S<(MOY_}0*+{}t5eRg`?j_37PS@TnB&8NIFXD6V>Z zxk0hTGM74CV1@hJ+Pc=c$u||U-L8)MDnyltI11^st~Vg>T4P%L6Pp1yD5nK0yWZrL zt`9)m$-3ij&CLD}%>0kthE4hZFSm(gCd&L-C1l0?YX2n5|Bts?KIUwS%9_)#6EB9)-r(sm$X@ z;yrCr)lzf>yHU~doqI@HsQN^4`vnn!VwhnLkXSPwLTKNzAR!>w zDto228=;m_X#1nfz0B;>mc*9ZlKUbgxkVNx@2y>JBBW&Q_){X&Q}3-u%q^2c?9AQB ztzL?#A72~6`vGVQaj)JjQ6B*BT?Osm9Rc${>1+Himtyzf}*ddls$;Ypijp?{9~u(&YY>0W01lNq?); zKdJT@SpKJ3!^X(`msafm&9RzPbnO15vyXDO2XFl6UYEo%%rcl*z`__R7y&C4P$UKC zCCpd{a<@oeBQ=>vm0P;gy& zS99~?W^ZlRN?yd$-Wlq6hDgea9i+20G0DnSJWrl*Yo&_WS)`R%STE{(L9=%n0XPqv zJgIwDOs7poSRZQJPF8kln~KT#4SCfg7LWc?{6J95io~eGXo65RU7h%_B*p#UYTcLJ z!D@`7pS;2O#Np~^eh(LYxF>@A&}|jU zA=VfhKt_ep{2<6&8ZBw>L(v-n5Ac%F`x;^ssNfxXc_!-5Q;|prF z_~llc1nUGs?c^5Fc|&*waI1LMMeZtIcQTKI9*Z>Tf)tcVClg&IGc7JeH(?VK50;qk z6_Da)lEBX;5kB2L6NFXfySk08!qfo)5dc^}8VOf~L7xZ8_ER0L;NLLV2wB z3Bbj?&fB)9!wG|#&%WP1VIpYq4$>AwJXZ&}vzOoeYLu)fo{j)KmIZR3VrvA}Sa;d2 zl6nO@eXnGF0Lsi*8GnnKe^UFi{7>!A$jb5uH2)PdRf+wQzv$sZ^h~z#)16Hd`Wo`Q zP>pX}F7nOc>89pYXIt4WLEs`1gk<@kFP}d@;L$*+@$>M0>eqqesuSk@fPl3J!22PO zp8{p;CT7o`rj)bHTs<)qEDvC2Phcskg=E!{AtX~VrlmV&G2Qz9;jre5NlK+*NxgG$yGjbs z^6nh5NGR@#zls1;SBQ-yJIO_1SwMrL3RxABDmZyS(xA9bQJA6t*@3TR*H&xvW(Rd= zU{+3CM(~%fs4bHspOHD!o2{r_X4Lc()7NKM&z~mCd>9}8-x~U#bSc<5{vYYG|Et<& zpr`+D1T~o^Wk<~dAN)N|&6KH~dU+=#XdS>G2Ch>NRHKgHzc1Mb`tt2vT548unRyi^ zE-tK~=rhjyXy4vpN?oMHt}$Ag^mRGPp}=f8r}H^*{0CUF?4=%2?iv+nc6rG)WqsUZCM zM8$D`JKZKyovT6yNR@%hL~e`~TF8ju1F8Z8`9&FfD1R!aA(0uO92K=u{t+tPYK9UZ zX0s78Zy>93xi1P2oW4^s$$aZjJ#dXHo;6J>3BW1d2$AS7MSN(vsLw2E_M z!WSqWXcXB6Q9e&=op!umcgJ8lefDp*?f$9XBHw;*A5N;=ejlF)dVap{FZ0uIdP8DR z#zepF<*s`^U!V2p+?!0k2U8K|=nMs^(HMx9BTvF?5PBKQUMS|DXp{{(#Ax?WC1)LQ zTIH|!9e!$8&Vvi!-J@1O&=uMd&~nIQUB@KnIVWBPdUdX%L5*pnW5&}oGau*wH?8d1quTw2WI}3U`GVx z);Kdqy?Jm2aT2?HRdMHAg!u|a{zxR3An-Ei?bZ?%pP*4o7^0vdOCsSOOKbw!PT1{M z*(jHro^$q!*f{DGgn=bZJ{xT|A09sM=&I6$@WP!KQahvyGIvJjWIrh`BNj zqm2~!%QlN8Ows-QxJ~C4s~;u=!Sa`y{Io@Lz!#c)f}ZE=OQ6EKPcQ{sMl1Uo2+P`5 zW+HXXDbp)Y?w1gnDWGxBkfR z*(_|(c~&Nwgp~W%O2U)6W>ctIb)>qCo{4gKE$`5Xrc_L$?X8@FMK#S3)_09F_z>#FukjPVy!rGKF5=rA=F^<9( za|VzVRM{XcKu>=fM4zm?CWJ4L(a)g^rT0;%o`Ilp8ki%Mg%dp>OiCDOr15u8O_9zr z7&`zxQr10KFm>1F_49Q3UC5noi-8qSO>@Dln;!dUaB%3=**4yHtxUVR@bXxq=W_^p zJZ_;u%d2P5Urr%~9J*+B8)>(=Sa%zGzOQojZZoWzBaveH4Aj-fG`K-yol|=aLSr$= zqO%%i&_}QxK^$<3ExWfEvA%KvQc_=M&+h%+zy3GHTS%Kh2qIQc=~XE z9bE7DdI?1OpLJ^T+WHpMu+f~Zs&Nv`XpTiDkdA-cHpqTmojU75>G;?YwchC)aTuBe zt*adYC(b>)l`Y4OboXYo2|X1Oj;aNO|}Lg zdwx4nJ$;P_V7-6K;m`#xDmd4k=uOD_MDumE;CHv=eT;xLt<5;=ruDi4$saF;_5!;| zI?!=;lRj{6gxjoLzCW|JrmD1cm2$2YZ!Den)LiJ*7=m>Ke(hCXDd&K~`W)1$waYGh zuBy<<*r>hFpIdQX17}~Jyf6#Xt?i`wh&>NL&B5x0DdmwY5;d3hIC&A}hL7jWr_yP< zB~7r6c+n`49KCmVRE>&_B*2A6}*1Q(h8+L;7r?035;is8H} z0_Mgx=0*TZ4+L(9u@@(jlQnQ0aw>a_;Z(S@2ovRb1v%r+yU$W+b%_OEqBoBs2)4uWhfR4y$Wu28{ z40d~;*pmLwprdVL!*=rY-@|>D;YKJo2=Qik4<8>7hf8(7dbvG5 z-++N&9MtcR@0XXy6B9U)8BCaPiv&Eor#z4@-2?6;8JXqpf}P|3L*M-UIW#nK<~fJ+ z4?%ak&F2t9{xZ_O*NJ&xq&-?5mdT%TF?Tneh}7?e z7pSMYooiOjLo)j*(3$tDUsk3=bwO(Bmsf5SzWvo@3^Zw5{~Krjzw`NjH>X*dSs4EJ z#e_+%3CDxcIXC-2ogz9*|!LRVONw#T@ z=WP%Pm$jV3ph6nrIFFrc_rrY-lcHw{!qDYn>|vkP&)_B5?aX#LMk2Q-zv#% zm&PK9hzU1pXq~WMXu!S*C@y>BZDAE%u8C@|H=eEtW(pa|SA@fOZt;r>3Bbnb$5(_y zozP7~3OH)Ej;`^f3YyBaplcni<3_1P>;YR62!{@PH>H#&t$qq3C|}fv?m5KVacC5M z#|W$e_=3IS3uRZvH8Gqbv~Bz3*Na7!3j`i0CnGI{ltySO?uihqV;U%jE>k9@Ol)P;-_0&?0yj_ts@_JD-`p!iG(lfN$xsfUJ{$v#>&yLljV>nOsA-fc#i>vVG>v3S zN)jyC=^zb-%x_Xq>9)| z#~wLpRJgC;QE;dlf@K;Wl1gMKAZJvICVOZm1U zQt@2%?3gT&&K6gFzz4@YrsQ@zgWlGA9SH|qOa_zl{}4xT$Qm?7$kyi)aV_!~X9CjL zg)7rnXBkSJU2$gMn9!F>);p@e&cHQ9-E5KvNMs6QLG^HBk8AikbLau0+)Fepjbf%>c*p_r528ZjOKb)P?CrX3zX zk_1)60E{Ud3-S&9RE)2hK)tLYX8$3NB4{*&9bN@~l@_4uX@+4kWb>UQs$vqvMG*LH zic|7@5D_6YX$YUyKLZ}8Qacy_RuG6(Z9k)f8=z=LUJg(1Ly1XvQ7Ro8K{xS%9iZP& z%HfJUkcHp4+3)CEswx@95$RH>C9sMTwfvs0X#hPB%C^R)krxYH2i$vY+X4x2Pu_;O zq)br@kOm^yE%|bvA3+UVPF4rhQEP)j^fc>&w7dL zzwfAUm?PozBJOUqQN@ni5&LO3M8(8JG_&o-gA~De|OlOcFmvaD*eo-~z zhhrI!uaBvHm5~`qg){ZV*%FGj64nMe%c)XPGN}bR@mv; zEA$@v8#cpt?Dq5*5qgy?fuJWnR61*b$xCh_AAZYWGJjksyjLMg&P6nEnYD{hzE5g=CBVFNOC8PjO27(KR$^mAL* zi6LU)3h?31Ax{!`jVKptH3)MI4EoI4l)}Ocn`U@1Ob$8AG_$z@NGN%GClF?Seh9RI^|=4iUXG1X8CU zXMvmg*|L0ef~P*_IKiv7+MpXZgb1iMwcZnf7Ga2tAzQ(U&iCYo9(CEPPsKKRU7lGo zVWVL7#`+KM-)_X|z2(eM*UJT2sxtK+eY;N+TZ1xelM04Eh&18u4dZ$Yov5mwJiQa{Rk4pG67ATl-wy zrFTFJsV9m248=aJ3M79<@K@{-2vdN|qk%}r`{#^oo0shoItq4oc00M+ z@>?tm)cSE<Xd_sft8)@dZ{cLFgMo);(%~#YTJ>-yq?|^229lebwdrko&`-1H^Y;=Urixw>oIJTEk+?XkhU>%0O=G-N>Ky3=Us5DA2MXi3Wm$)`B1jqS3c8i za0J(qBzm zK*vOTD!~G}+7-2i$jgow1lSi!j)h}62(+dGp2>g5m&PM!1cfxY(n^!h4;XGKw~mGP z5Jh;lsE?9_wx{W^n(2wq^sb>7y387mVYgMPf961hS4!5$3sU-}10zr;*Wo&PF`uzl z+6neth72u&=i~NVyu=*Ht!$&J#}EXTfX@$qQHab()RQmL-ftr}$E^!1JqmePDJleZ z>33u#FywTg|FHF%goKU>#-Qjd`V!cEwN2UJ57N>zj99$BhI$B#Nd4l<%r4?fm_=8@ z30U&e4nx&Y*d;mB3{qp^@hGuI-$y@BmYHU`pZ|y9yz&({n1LxrRRWH*@w1zd z(Vq5TB^`rscD11oiL=moDarGIq+p%!7_Bxj9DZs5Lv1C>&~aS3fj!LJ!0_m*$7t5x z6*S1=!w@dmjLPL8=vpat}5<760pqpO(UZ3QG9z(?xobfgV* zp4}p%;=ctaIv_V~S|giL(WpGCTXw9N=V)D-bJ*3T<-;d@TS)iC9r|EJ4sg7Xf$!jk z_PE^@V@WV8Felu`YWo;P0}HtT)`@QEKMV>_WNh|C#&WvAJ-c#aZSXS4tDZ&gr8U%N z9bq?lwy^|7m{jVb1-7nWrfGvzXSNoD z8snyNhy_jr{#=h77;YFaG(j(k99}sF@@`<-y*AL1SR=cnBTmPT2yCf5Q@6pt*~Q3A zSH*=f-!(^{PI0EZ7Bja#0tCXzk66#}yhu0Lf1Tz>nJyW2arOl{J(cYwwZ*=pLophJ5 zq@+h*0MzNtyh`+a8Yz}H#$qxJT)87h6{koAFKPzM&8 zz@~;bUvyMevp>1IH%J;-^ygl(kmna57hG9WT?Gj|5unbF&-Js6bLoxtj?{sqaVYq1 z&BfASv_m<16dhd2yGrHARbO4iZjjhYbKui)TO5}$9ACAh6jtfh%Mb`3RXe^Mf@v7&?4d(vovS#r7ERkrRF=|PG;D}?B#6B<3`AkC-8G# zaH!1%Y;>3w+{=CQQ-Kc`I%8RVQfY)JHLi4d7(8gxw@%^JjT`m2I(N7)^SLw3o<4is z<#2ANS5hBC!yN+UBs*{&?+?`Ct>PqLM6a4bMg+g4q?2&@n`xo%)MLgRZa$Ro>!lt2jx67Sg^3Up=mWIyvHNIvS){~hc^hJYy zpPg|{n*8e~RbC5JSxkREbB?klE2zolp<9!k)U6DWnoWsEs2^W-MEuXxz|asIHhVL< zfc#Bvv5MiAoa6;v?2=#-SdlgjoXTKc(5LO)RK}?878Sabx2Ll3OjhbcF&!NG0rcP0 z)T<7MN@5khSsMFg%ntfHt(9LV72op6pfETn;#a56OO1yw~*n>P7H2oahus_ho zS1lf%E%g*O-=W9R;&5@kLFvqCwnsvVSAqB{9AbU zMAZ-E5E>Eoz#ViPzWDPPm|9zOMZ*ew z%@PuCn=EkhX}5|?u|sM2P^0vpt8>P?^}dPlPfuQv(j6qPFWp;Pu{^Kaus!({0H+OY z9+^}a*0jvM6Moqrkt1}=tJ_f z3K^5XR-MAwCp{8mvt?oa;c>saVe*FUGIbO6`W0$owcXK0136a^biyv(in|3Ban)gh zh3#DW-I4n4)|A-^0EX`qBbj;8cqm0aU&Y4Q6QMER==Fw9Ar_gZWxl;yT-C#@kwtu_ zp1N3JVnzt|Raq^eF_LawI<0SWjbP%X>G)D!QK<9 zcUX4B1wt%xIOiEzHk#~G%Vl@U7TGmVf}eyp%J&bGQ*)J5&t&uSFh|6KoeYe8VNtDo z{t}U~qzps+GUQ?jUm&wjBD&G3QhoMyp=pdfC`k)ORyf7yhj82_+-K50qLZIGModav zf7GqK+?Tj^M4br6e81Z2?zYG5Ycc6W^}Oh-<-BWBDI6$fSMD_R5BO@*u>s@TMj$<3 z^0rXrUbEp_{WfIL=(X+oye4*{n{-3{TAlt9R67v8^6yqFbEBE;%o)l)ntDr?+&Guw zp}&Vv`;h64Qk!}5q`G$FBH6Qh!qkAHR}|X5e2X7 zmwl%%o|C;WG0<0iawHoiX@PV~rHm#%Q@%{9MG-YTwT`ZniV+g(QDc3yE4fSx3NOv zQ4+DJAn&5AP(=}`dzF_GCr_xQ40~sn!cF>Fup`JFZ2V`n9pw zrR^K*V%?5~*Wg>eU(MlfjZwPL+Zi4V#nrqsT{N>iMs$uYmlVeX!%>xWQ6;{*W`!Ue z&Tr=d0^^mOirWK*Sex{w46J_3;$SpeCa;p{;>?s!@ZAxN>5(Rit|Krmkdycwq09*~V7v`F3pU8_!gb6+>Ba z2wi&{#{1np5A)|zf6ttnl$PAEREY}*^Mb+Fyp4-)PI%k7{<+z_6GO6agp~NgQ)hE4 zFZrzw*mOtQSuM2lyKCt))0ChA-{oa6qU@oUSA|xZ9Kq~bPrYc@(3vqa?8ij?CB36f zu?n}F^h1j0+nUW*gpwT6+dWeivKyy~@TD zl+hpLH)>*h7*-|oDl_J;I27I94tYdVLg+Hx80s$bT6R^-QN@Uf4lzah&9a9_tb{r1 znt=pZob8%*U7t^ePCrB=Qg3VX(X7WbC#pY{b!)LqpLSK>)tu(YfY-}}U!4F4a5kpb z=3=R2^`^{9&zFATme|XVlx-)pe3nXQbvBcYx%Qz|q2tR~o-0C zQ=F$+dcN3)!s1iKc^}I3i#K~K=%BAe;2!aj*^cpml%{Q7G@0-#zPbwzaM%h5PTIFgPM|RX3EMN;5k}w6^7Xvp_?Ys&V zFq|vUVLO(F^#KZd(OO%K$2_lBtg9Mb(Kww;+)Uy%m zD3~2fUUeEzgtkA5ol*69Op?8B0VDkO20P+)~| zL#|JOk!5GTe768RG3CkH zC}ejzxY+LH*(!W$ss&+!y1|;sYMyfh*qUNvljfsk{+6DN*Q)eI+I@HU6smQ_Y;OZ9 zZp>YhEL?F38?HTeE#-ZYlTqs~CkvFzxXX~?MeSr^y4hid_nWW8pCZk5JSdMI^vfdW zRmKbn=To@pCN)acumKpD&>}TOj48eT;rq=8Q+1y7-s8yEeg1Torv#)Em2|_Sp6DP3RYoK4FEaP!(C8+b9~;`{WMq;(mw; z*Uq8a%u=E3J<+Gk75(pC*};m2@QiT4`7h$ni;jGq<`Il=%qA*lRDo7D#=-R$jn&%A z!%{4f8$YUCKRZ5}r87ZKhEW3+-$#nWHc(NCgB)fOr!I7Jvf4AuS3l#t-cJIB z*uf@{v^0mEW_UgBi8B#K}vobyn5kz^~CLz0|u{0DazQ z!@SeQw?wShh7>&469x>%X@9}~k2%;(R! zwQopzfWs$ZgTEQP{NK+9{Cn`i$i~9wI?I#$}*MLsa<25sfFsd zA_e2+iiiGtjw{k_#4?V>zf13;IryKULp!k~Qj{qsNIEY7SGNE#!T1Z!R3p!i=Gf?( z+IX3#u}-7GIV4SjhW!{uvwr!mX|wb)YgYZDCleh(dUF_-^MlV64Nr?!Rt1qUTDt)U z#pU4usb>d|vcRg2V+BPEhUO(9f{ld1Fc=Nf3qpnbw%kij0W4GiOZP^N#KLW}@VqQy z5qQ9ao6Dq*4jnz|KB4h-s*{pd=W|aqhGj-mAhXW;>jt>xf;!27)AhJ&kw5CGgve=?@b?)s6%n>s z9jdRw8l;hN0H-*8jL%hKi3H{iATyr3RI(c4MTfs{ppO-G@EEI63cX|nNVZw020o-; z)-YHfqekRO0`pBj1uwNBL4Y=#G2Aa8=|fdiGH@#zXc`z>F@VU`)?3tkp6@N({G>`JvIqg1}6MNbBXGj^`5717a!i zh^#Pss^x5qVrnb$H9_+bfB#KkPAaKw0R1{uP{~iODxsVd_t96{Q0&Ji=Ep3T`DA9p z7)S7$kq!_|lh6-2;Px^gD7G-NSZ65#Tcu+VD$gn+be!O5jw%8XR@O+}#kO z`O$v%>C16J2uOZGPBE!@XUvlyO#>kV#;0*0v$4oD%#ztODTr>4DB9-cW`G*53+9kH z;|7fw#42PK@u1+qLMh_a7`Vnr%c86U{HT=vLv?1rX&QkL`Lxd391$nw^PTBuj4F=G z492Z3WOednOyvTa`5ljOxnP<^j-!CUP=Iswu0jmXv8k-N>Pb3`vV*$7Me>A+bjVq_ zZI*~5ry8q?{E}D!XIF6FOo-L{h>>s9(b=j5MJ~TVQegoBYY+WPzMc9_V8M`s!R10V zBpMG+A@mb&lk>;gWoeyJl+aUV7M9#Gw6GeXIzl1dajSye@ z@Z8 z7--+nPdM@d>bL;=V)95s{%C88%bz40j?$vcB2lhO`w-^53 z$%YA7RPhvusb&sRRGte-TV$`}5z;^xBhbZ=!+HprD4jX+NqT0v}a1D7gxKxUb?z|+xS9$uPY{o^1(|)3VF_d_DWa%Q&6W_As z+-)0M=^-}hDX(A@IRMtpD5>!0_9R6NHOCJnq4agIpQpghJB4r3jgiZ+Q_?7YxNf7H zC&(^Dc2FFfXv8ssEy1Kug-`kCr`0Hz0FcnA1&|SkO<)UTpg4jPqK!0=1EP)zVyD?OTx~;`FLNv?Sx_A_291Q16EKalNS(_^U!=@+&H>H#e<$f zpy%-cL}S^OG&uLa*rBzuN#X7|w@o#P)l}@iy84a{eVFzLouEI+K^IW8Z2EP%+t*SZD|tz5?!AN5oFtj8CL?L;2?T@s4^Vk$BbhA1s-6GaMIzyogiYM^s) zXb>hZQ?b7lf*_%S>Q>y!r-m0{ovau8KXX%No)wDy#twT&Vw z#HD|VKA~JU8B`4060nO7@_uU0!OeGf`&wg0f54IUoZm#0#J9IHYy137eRIJq^x@Ipo^Ct{r(W zBh?@L&Y-grl1ZvVZ=q;;4CgW`u2*q-EM~_u*NdIp`&HHzdbw)=#FnCz2sd#nSxePZ z^CXAS{)o*{wy^<|K;M)JxH-ZlJA1DNibU;z$ATC88;ySlki7GiS`8|#R+G>F=6t&4 zd%gNN^K}qSNd)5?f^(aW=c@D^VJw6I#czc@XhV5LqEAD5=5U=yV+4S)MZq6}~XUHj= z8exxVMzP@V`1EM4vRd{lMD4Q(-1{2B!a=3Rh`tBy0}H`&`$S zu>nU|fV=chy$f4e|B%rmx4`y|djb?T^3*TzC;fR8_7xlb8TmQPldcim11(}xg#+DC z`LkktV=Pf4PxXCjW#=nz$#>gWWM48_O(X%c__lR4u|ip?(__3Occ%vG z>njTB(;)Oq#10-Y0>}3*E=?e7A#t} zW!tvxUAAr8-eud|W!tuG+qP}1>)h^+*DvD6{S7NWtQ>R9ne#=KY+6=G87Y!NHX2%{ znwXKa>NfWz3vK;!8n@@SxzduKkDWZsA#BENLw6bh!SZ3UJDM5FWlAdX$CwlI63MEC zYU%MFq>m%@+?0)ylHB%Q)(w;imF4E?$HD#e{p#yUedu6F4(72K)|PBOnk2y!xiQtd zZPEgCm45LEfyB66#cSL)G-&Hlk2Sm71H=7F%akdE=A>{-#qw}$nl;%?5mB|3K5hUB zS-}i6q0ZA}Sct?MPJPU&VUDbVMo{e2`t+ais;Za?i3p*{QARmtMa3dmdqT7InKJhe zEPahT{N4UrSXz1YRkf07ED}iT)TweSP9M9D&P zk>JJ^U_iSV|QI9lORfMA586+!Il9LyE)K4U?)PRuP5Su4fa<7iGxF7Ag=_5kWiSiU0YbCDW`r`19Jy<~Rq0i{!;eUuu4$8BG~^ZRxlF}CZClc1m07`kNf8>+F=gB z6!QY5%LfTT3GgAQjQn;Yidncipy)u{n1u5K#8~`CX7sIt3GI~(<3RlU31yI~IJi;O zsX%KbAOjnEfzfK1RsG0$$lS00a2~^QyeppCWUJcI2&#~-XA^@)REmSCgAO(i?Wtf` z^@dc!EBTe%(X1o*=}sbRWtHe_&r1tw1Ty&JPDqR1x=ja3Fw4rMWy2R{=CN<_zKUQ3 zJkg*E%~sT46_X;xlK zF|%C7G-<*kGiez=Nh1Q9F&2{u%)X>>%wgmnW2DR_@3$Jn$jS;(_HYn_b1}l6C@%tb zCtsjyh9jBA9?+U&Y^-m}o*o1g`3%-Q%JC?yBoU|hHkj%N<7mWvYn*sD$3%7!*?7yS zQ!tG|u#PGN$0X1b*0t*?b1HzFVmFZ*sMr1!Jw0P&bU8$ou=bEa#1~U^16d(iPc7FJ z#MS%c1o3yJ| zt^*2Ch*xTUH8##dy9C?>2tSj7U$7zs^IVo+6y2gfT^jTAuotu2T6vh@7F)zH>F=Xh zj_id57xqo=^EwheNmKL;K*%l?%-{Ouk1M@7rMUF9f#HO|)nj1K&=&@|f%7Rx6H*}% zXLxHTM-C94LPgIQp#T!CaLZd^A1*6%B)`&oqNmtezi4()ZO~PHYSLK^kh{{PYx&e% z6N=%?WVz&xT~HCh+w3iUrN%P6s3pswlU#7j~C}dwTlz)qdHvSFyG^B*k7)Yjp>P`0=HBgj%*7Xiy+O; z?y&{)#LM&Cg|@>leVCi8!XOaKd_W^cZKUiX&jXDT`LBIsn4*oa{wfU+GbfQwATkFh z{+<)kv>k4Or05F$<7#?@M{Jc;O&Ob&6QX+K9E7z%_XpWF5%(NBx8Rq-+4(mZbC6YF zV{IEpMx!8H<=}}=jFJLx!HA3Hp6XM|!k!M-7n+5kwCQNNUFC|7R=d3e=rLkuX3D>H zB!Z)rn>5|W2u&8EyX8a<&aqQg(a|=8wBfBXjX-y{vR~)9C%L9epRJuj@3{8id2P2P zlzcigpqGrH9Fym8f+!tSh}9@;ufXNX0LK)~I*jsOuybStTz`GhtdD7^6&Z`aChIG0 zLa8DhQScv0DgqyGxhZJ#mRnWE@+>6P>X73XlT&?4KG(Msuwy$8K%{%NJ@H^$Sgl3=;T zVJn6|&#Fbf0XQ^D=HPkD!T~$(XjUxW)yC2E%{|zw(HKCx88|R^?VO{OZG}IfnC|%q z56kslH{|sv<+=Nh`x(4+`*D4S5s6(!*>~;OP4Q|oOlP^Tfk~AN<*LyiQBk% z&xx%&XU*rt#b4or$N3dL(Gh!1amf3>@`tkJ0oFb~=YF`HKsW5;EXhT>Mm5wIUBows z0z-)32`!f*_taOWB6!_MjiAj?M|()cBNo={?#14GA~=$bQ|SlF!VBegMEmT}O!$~a z6jol11FFbP3q{uxt&84263sC;@7G3uvP~V{%a2vz+8wYqc?E`wjoce{=(0JMWo8`| z4wtYP-&ScmkCBhM@3sYmP3+Q#`oa{fakl>+efeOVfMQ&yqB6hM+I9v%;!XF}g!2w7 zL-xgRJzX!D#M0kVaLDLBQ}V>bOf)1iNXWPu1>~%~KATA_fR%1UO3=#phY^X4(Wv@! zQ%-V{x4>8i@xt1hsj|-)^h{@|!K7HCV<-bBwUN@bnFWs=JMl{(bs)J!1hT`THyvsWH=g2R`w=0tvvVH}i&bA9sc;{+< zAe1F;MuA+xWI7gYov;)XhjW~NN3Xjtm=Op~m3(bCtXwYCJf6#Kr)omjtSgKm%P*iO zE1Of#fRdq~f2}wZmPjGy_Mk!Ngp{whu!h^s;V8|41&JkwWl*5ZJAsmHAAkH^+JNkE zMwy+YoWCMz*-yGzO@}}_apA(}>2d{bU^du=ODN{6t9m$dOFSUcCygQTb=h`PJynDF zsH;jrHs6QK<0bHH>Avd^9Jw}=$V@8iaOa(ye;Ih?eof1N%;xco2`nj9`{G8@JDJ0c z!%l5Sie#>L<6V$_&nIaIfY5umFz~T0aj!y%QTp(&fI~Qna;Qx)_;65z5T`TF+*N`t)Z3FTlT=G9G(%9eYC4A z%ZK@(yx(c|>ei?L-8`4x1s$PdTuU@y1fUe|eb%*kQOB}VC&Tfa{%h-^$G|MT9}-?1 z6kCETTHJld%9j+__`J;h9POt&JdSdLk}Nr^A8#1*&zRh&p|Khh-cem;&VgNY)ikYC zoGy@#t;0rS;Yd}y&@*y+y9M`Ej(kO1M@h2`BCX^z5(m($gXcSnR=X78&Pw)DA-(4^ zx`j#wpYy=kNzLIge>+$G>MJ!xOTM|D1*~eWzs(ULD+$|qSbK?mt|aPg!WLM+&PR{& z&xJT0I<$*N%GX%DK z-*nPX6Q^DNe(;lV;+K42I+zX)64?H7Qmg8F?HY{7Bs=$V^rgi>j}=9-8w$fsx0r)_ zXciOLjEl-QCK(MSQjuyLZ>L+>5S<$JKGQoojp}c0$F0#qTR-&n?HGvReON`C6(5)j z3bFS%-k@8~ZzRQi-Pi5}D~;Csw;z(OGFvM=wY>JLSn<7SNp`lmel;Bam}^P59zKZ_ z{vH2zz{;5SqEkU3|J#fqGT)rvpDn1zvkdJ$TYxt;G!N}r&`%{!KUOxujBM#ANwh@{ zWN)l|XpMoy-M@LJQ#UlULc3zxfQB4Zy2dPg(d2G#xF!8CQsm;K(4=FXvnRVMgD*^Z zV5Q0=LoTIu0W2ijPR?jM@oGiBm>E{7>SH=Bu3NT|EzzHVy)^?-l>(knCH zb}a&VTRu!r6_@WsmmF<=J6gThhx04XerynZp~^adffHst^e4=dlAl4#8k%|sN*3bD z0S^?0zXcXF7FW*v>s1l_6Hj-IV+<8Xa+0xRM2CK~#$smx4I-Lz@j?vOR#`&(Ij zhpU2mrb~Y&n~z=otVYqgM@WS>toZGX#5G9plz+u?I9iJrHn(@kVXNB}h>UUME z?*RC}J}q}^P6>M%Oi5!MOx6sr3$Y*w~+@0vS}6sP$({tEc_1v1R48K-ih%jeQ9GR(kA5 z%%6~`LM8Zuh__~a16j)~z3zc$I)MBL!VNLel;`uN&I%qQt-lw}^iwJ+B6<|Oe$0=2 zMGeH;$RmpqwLCj#nXP#>%w8%R^F-hlEDUZ63KlS#8p9SUstf*@azc(2K>AjIIQ~FN zT4+-bXIB#dbDTLnq0SdIuy`i6hU!{EeZ``R$pA6rF=jGiGMI!MAApHv0LX!hcv{>y;Ac41*9zj&A5rp z*OBLKwqZV%faU^|$%if4fnD!T#(02^TNz zXg*ZW@+)6>UW;kC<}Fsx)V6~>_k>}12SDp4HhVvWtR?n>XGLccM4`%i1Up#A>U|!a zHfPx&ntn95;K~eD>4PO>8->hW^|i&Z?XrzR!=a_1Voywck5IZ*>bfQb3d)x#MxlKC zdW-w3s>oVcqKI4wl*g=PQtRZ-LI`6qJdEPrVro&4xrQ$ATmb zua~I+*EF+tyl{;;9LDEt^W7!u=|mj)7@b;aS@j6yR)WASf=<{*-<(aTG@&4|?Wwu- zQ922*S5MGNH|-D|!kIu_Sm}acFtVBm?I)!YJT7u)*JLCugep(I?3UrhhX|3c+U5)Y z1nn*&a64-mAjGR=ZQfIW>0V-UM};wPj6sJ2N_ym_XX#uZTL1OV|633bJu$nj9|L27 zxA+%!*Efhai@f_R=Fsa07>k8?pl!W?0IDRT z*C?syLxkUC`7)%|;c}#PP&N{v!-D&3Z4snPK%H0yW`Nv?&-KF}mRx`w>M$#(>+Y5n zU?&%viBn@pJCNY_Rwif8{F)IoT1_G-2-&m6<6xqeMxK%Z(KgbOsvOP7hO-vSWE%bg(ZCvRsyrg)`Bapl`BS_h-OF>gg-33d7RWCu8X{S2nr7I!qtuC; zQs5%Q!$gPF^>`$X3N4KqZ+&cfi91bAY_t?tlh;y5t~QiDNXi7=US+~n(}UUi{y67B zdh00oR;XNYE)lTduoDDa*(AbVj!zpG6@OM%#3R<8u23aKXa0@B;#$6n9hdhTuOv3{ zY7}FU0~y| z^pL67s^%X9#@{?#PK>$4e>pR1OxM$!5ljAklWQ0{-}7Oqj^NCpZCT3^Eya6yD}{Jt z!#2~a5%ul)K0rl1Y9e=9(VO37fSi<3vP$!CNceII2rnT{M{)e{;t|Y$a(~_eSpb)Q zXAn%BM*n2|m>L)`nAayx|7u8b_GDF=O`OquJRE#|ti7*Lz%{v~ zi#~YCccKoJ6vdFhXrAY0t0eMHz=;JFpy5=0T)I{qU44caE2VmIU{!c=h*RX- z#J%OJic&x^S+q3}iwOdjqR-ZaGG&$AcL?YqH|pc&rG_uKUcqH+t;LteDUpRf3HmFK zHX`h{A`)WR-I_c4f-+P6+{^JuPzqT|$X0BYTfVP=OQC|BV(SWv7KC;B<`7r*9tOZ8 ztm}zdZ(QUYH^lI~Ekz|tYKc+n4N|=$f5p4;&k=F=^1JO{bZD??$lV+lW^wNPL#TUL zbNXn_HmTUKj-1h}A+t7c&K9sxp_}$IfQP?lj>hsBNJTm$*JJj~7~>OzU^wJUB_^-_^Wb^E+opb*-1hCRj693ZK$G;) zoAGuy;1P5rpCreSGk(+XW_Z`SwlAh!e>XzgrqMk{M$|bqVEy}*<^oC|+)r?bW zO3?N-v!YvHx8xy`nW^W3a1_Xc8wQ5`$Ryc&fG=Q_`=N!LI{90@!x|FmZ;UfXl!sr7 z={afE*p}R5W$%K^v!$-2?wMWEptd|lZR6kydf%z?rUjK^h&ifeB&m;o+&|?cn5W`H zoRdCvLkb7#?%gl<{B3odnuRT0@f2Wr*Qi3_)^FYNVfbBNHkW`u6Q3 zQA7_&PSd#sf>WxUvewGg?$327JmuW-X4Q(VtT++hHY0#1BV- z?C%96TewpHL?E$(h++Tv;1=xRCxT>CE%9#lB86Jg+UPSDExrk7S}n1W?619#yvaP@ zToyH4y+B9w!6m)K+_bIw5%_?#MP~Gv{Up=+evaz7uaa@$zBpkTQkdqEz>4X8+D->g z8unS25QvW15f7)#9~nndg%d1mIgF5n+mgM^@{oHa$=OoY?GO<$++OymTVz8+XC#1& z^TCmCyix8OK|qRz66@30K<1bWkoCDKQfOe7pN(j^bZFj`77XTejf_x*qKL6rbMA9P z9Bb3{495EnMcO%l18+&+n7)vt$W+P@$(C;koE*o2S0B`5G{<`6S1!!bg2zqO%_g7} zBu6dPh>=CdqDD+Q_?b)#yLPAvg%rV$e_V#L zhrp^FK>^G(-gtcS6_E9{k&7MI^euK`Kg;%%d6TCr5L8A93JH~ki zmLpuby{exCld_{i`a20MU z-z^n~66k)M3)Xnyk=N#IfKG82717|Kv20J?mfxz_ZsIU8_+XQ=O;3+$kBEF<{6=y% z8vrymQ`JR&RLLdWT>ts9nsO2@0Or*h&brw$bvY80){^rHSM*9K?}*Z;yg^rthFi|S zygx%$v=yg69`A&jJ`qU%Xg^RbXZyAFj^JV&~z3 z1~=TW!DBBmGC{A{;nuX*4pW?*0g_jl24b8+b%8`W>Tj%!a^bL<)5C(M-5}h{2;K-d zqm{&(ROuv(X2~)Z>^~Rjtz)nt6Yf2^CaR3_e#!?q^wR3R=PAuE&{NXP89%t&2)p4PA;#mfS zW*T25qCWka!Z8ifYemV3@i>&p}9 za7q9fx^<)a^&GI=2C+nrBPuEjY+uLX^|mdTkLQOt#A$y9u#JncoGPEiT%?0xeI|f z4l!~X^!QbRX^3Z-%oLPW{q(u*gwSCxxwHP32XAU=NPWJdiLg|QPZ7m-I8q(+hel=F zs!ijJvyace^?2BGXsDgEKPT2*Ir=O*o9m>hWG$!jAQE&Cx|!OE_1zMb6kG~zVAI4+ zm$dkk#MQ-I$|F?-M{)55y^b>UreR=bh?$HrmwVPHapaiW8t#Qw?sC(_ip>6jyd|b0v;puq4aNL&0*?BvrxgEY64BzT^z-dRj%E!G{gdE@dRN z{SqczNyg$y#_Q<`l1M~?v~S0_HVCV7bYv9>+1k?OK8)_g64W}UUqwnY<+PhiL3;W& z#;_=!3LVRhaqQMBroGio1=8-aHp1B`A;?`-I(zg5~ojbUT|XckKy&zK8l=j6!1n-{GJ#aYn|OrOf{hnZ}OND zF(NK1g&c~)^Y!vsm4s6wlH#F;(;q42gl7S#;Yfoh=@Rjf`jyrU;CCuWLbG!z`rt=cIh$)!BCKGi6YOo^ zkF4mULOW3|!tSH2*YzYi)j6BQ@d-OC6R2=y-ODg<6+LV@jNMC>D(n~+jG{`eY~lA# zEZxX36l<+@*~DbKXVicSO0VRyINnauvdVyOid6Gl^A^j#k%2j?POgAl?P7Y>_FnP) zi=elzkjaO8uHG+}a5aRLnzF#Joh^ zc&m!V)XbFzKDJ_-8fp1D7eB}U;mJ=VP;fG!L3n@-{mpafd2qTXyP?Y)m=vb!$G?wC0OGOwPW9HczM8ZVySetyqkDZS%X2@)t>NZhZbZP&m}UuG zJUVZ86dl1B#|18m(dOv*U|dC0o1Dbc>ccaQJh`xd-f>;O=W@PlEu{0oZNWx(dg8ci zC?PY0FFjjmJbZ;)SE$C?uyS0g>l)DJ87kq_Iej>pJuLec3|NW`2? zpyl$!_8wM#Wf|6;%FvC;NqH<*L@fRR<|wZ!)`@@b#PE=HQGEZ!noErpo}1r~CI#-1 z+30sAYKJGFUt^XsCwf1B^S7z*AL2Eky!^lRwQ->l<>uz+a$O~*YDQ)j8W(E_Jt87p zWjA23?P-d)MKgvJAj51Eb=VpH3@L|noC)nC3YV6wlPJr9T< zGW90dnJ<+I%I(Rc>UET#%Vw*vN@V;gKpSBL1u?CZ7Aa?VxbFn)Yd z)6Vsb@vLnWL!1sO+SuI3uUB&r50iXI`m1-pJpx#F^Bf%3RTnTz?QMLK=WjBLr-Ccf zJY!|m-X#d~nyjbNSyY-i%JR&u16h!rkR6wuoPA_jUJ5E7ASUDATVLYHzqczLB!0(C zCv9Dns%~!wu#F`g3mct;7A{p{%5gsg6u=~?!+d=DZ^&%uSXO~{-t~DBzT1S;cElU= zn|Q8;yHwmqj^9%$Z-x*-nMEg;0tXb8;*;g7*Jj7EK?2e&D#1qb9zkSx)p#MYlN=Wa zng^D+X>PhuU<8eIN&uc)Kt`sAwM$$bt3G9&7k$`8(@LSk(PLgVlhLx)mU1?vz|v@- z(RuA|d6*$BHOnBR*`S`1Eo7{Y46}H9t?1CIc_PtyjipJZcugZ*B*B<`wJrCryMEOM zo%z!=Xu-)P)6@REi2Y zw{WiYmWB=Q0&S=r)S{!E?j*Hq!)^XX2Aukjc6|#CQ?lM$oBls~l;5M&S>*6s>1=`( zz_<ch+XM?9f*5B3&gvi9u9#KJUDFv4*O{R~ zV_kGbZrs|5@~#tBsQ{O@`2C671{tZKh$lcSB<{O>o1*BI@eR_L5y~}mZlN-v^?e97 zhDkN~pPaEYr3`f)c>#7yDR)j2W9gz!PD4_1=t{(RX2Ek#Zz892%Zqdl@xg)X3gQW4 zBG8i9TU+;GW7B5fN+tbKz5oC2ovHmO5vTjPN%C#hWZkbi3z1zkqlH87V+0j^A zo;Q^Hhw8iX+}W!k;!-#_KJtSV#1?F4u0`%eV*Ut#(jE&6A5^m+6(4nK<2? znr9AAq89JzYL}5qLY)x#?_Eq!E}n6#+P>&n5ZV~59IeR_kW@Q-x+COSym2)TZ;|O% zGlc1+!qJj#Hl=#|>?pzCITwie4zIiy`q?^0oL2r4EmsM_3qi{O{Q4&~Q=6G>Pqj1U z3o@OVT@`kN+e9zzpRyH&z!-XvoHkY@nKl7$J2Mi%D+8}2H9hdEET+8fOo>xsw7svl z;p=`xtG^;!j+I}|8eAi*3nG`gm9YR)&qxIvH< z((Md&7#9IcSvtcID;%`(+$>~jVjFlnrNRc&|g&fA9ph9DaPr)3xxjXt-}8k2(d6S{{IHt5;ZTXH--^^10i*gF99^P zsjk~&Hc*_8LGdfL>F@wB@kqbqQpZI_^vFbdig!uxVISrODMilui(HFFDY_Hhoyp5^ zRo8|QdY{Gf>I{?r&z$@rk}Frl%~dQ;s#kI=u`y6FjNC+}8f&O$Dc8Cd2N5FbRM9MY%>n!39;?ts<&+z*RGgsg zS)D%qFLko|uHg7rodk;)NiJ&}Vg_GxQan?nHFNym>ZCQtf2oslcI$@{#g4%_^}7pu z0%?h_7H{q~Ah5t_k;tf;&M zsqf{-b8?tz4A|Z3>x!!7qL zO@Q_YObwzca?G4)Rj)CPV^{{WA)P6d2Rip zT$n7N{{|}!)y`%;_I7B4k&oko@Q-=pZy(5+9#I;!)S?sSvXosgOnn(rC<=ftNoghG zrz%O&mN``_LSI53GAd0zrQ0XmxT-o58!4KE9|Hr4L_TXr36NwFgf&vXADUh7zVk59 zZSfG<%vb0?dm(YTG~Hth(1CsxmHewyN`7MzZa*r3W{xm5gDOO}Lhn_gQmBziSkT!( zZ^H_E=t0skmLOI;o)hMJnxZA=*Xl3zve_w*BTZog)%!DNZD4U@BAfwnoyZqx-)j+Y!4n>Cj;OkZlCjws z^=syB}u zzUgWP3MkqYtaV(&1!QqNQPF)k>j}p5@M~{^%5b5H%sRToo;RmkmXV*n8p+x?*cVYZ z=(be^-lDyIOXb>I5%%_}mo~N$d5N^?$R82j>R)-XL~{ODo^1V9mvRfI)5FXKfCg8k67Kapz(LqKv%hCD+520kvmh2o=q&NoPqg;Y41_7G$Js9GncQd7I0=MfM zMTIeV2E|UT&1fq>sWIyHe*7eD2!s+m#%%_z7m(U!hy}*OIKFfK#?$tDd7A|Kcjo zW^rL4Z;QqVrLFY(&j+s3f80re3fx?Ah=fZiO@cIj_V+U2EcBA8a)V#3E7hz3gDuRT zE%V6DgWXm$EL&uJ&`}yup*)%a1|NDLO1v=8F+cNkAWL==bC|Krn8hb=WLbF;_^*zK zL=$Ew^hkbmh!J%CR)ZVdi}1%5*8a`A%$DvT%uthn=IUk?6)YjRvSE8mZp1xiRfZ98 zLwQej^WXFXqG(sC74f+l?MV~mqFw)0E%8`ek9EP-tZWv+!5+>Raexv*zg)o%4 z?^pWPqor@#hDxm`YhUYj=p`yNK$F$`*Nl@l4I=dH@S(y%Y1{Lou81+W8v$<7Kz~1; zau>*T^Ro+;61XR(PCtQsJcZ>k@{0o>>I#{ivg)%XpR1RSfikr%pRJyek}zoiV_}p# zRWWRb6p>A%*T`!MN*e-=8`bkxz9*TI_m5$vo0o{C!)}oUW2l@u-sO>iyEDJPi+Vkt z#q6i>A4WT44qbfZ-xx;Xw0-#-w(W3`gg4t>$`vxMr5{H;AXJ1OS)KZTy`H}NCp=|? z^Z~lD-Tw%=C_`%6g>~r(28DHH)E~J?s=~A!E)((xt4Kr3WlD6rz9T)9T&K?CJbKP< zRJNLp!9+NK)}X_e5#z%XV^nW=9dv!^{~>GKSj9(=IXyKS;EBzTiqeX;{%b#Eil+sI z2WPIK$j-C9GugXz6pvRf6zag5kx4V6ipW-=xiIhHv0nc@3`=2(IWXo7X<#e{01tKj z;Kk%geuDX(J6$~XU{X%|_BT2BhPeQOHy{ZKB|DjHR}rA)pj?P0Nq*#;55k@442gsd z!wfJ!OS~0M0lC!_!+&|@Q38xJ3lJDyj2@mFIRXqC1_oR+4t!cbrORCF#wfDuPg~1& z8fA80UbSe8DFT$-`~h(mV#k@teF&e|3Qt-4-(2h;E@EZ?qck4xFZjBv5mn?9l}R?O z71R_I&#Klh+wlO&*Q;Ny$~nF*kcJl#1b3MGBTGKdt|BvrR`AC^S2m~ArrLR%zHq8} zymwm|jy`SMJ`?q_imkZX$2F~oN(jrU;PHf}$BCOM5kHSNrS?f)p^nk^GT@aFn1`vo zC~9PRFjcDl*gPCcU8b*Jxad?b-h9)ldcWbT-r7I-L)*TzZmm@4?JSmlQVdKQ-+gE_ zR{bo!Qn~V!DukSkT>KC;^+cbYhqi-7z(VHJ?md*BUC3M@<9h7e?Bs|scBCFrARVX- z^4l=AXFG{VX33em6yi9JAP+WpjkpnRbA;HNJ%1L3&Dw4=3`6a54KnIyB6Jyj#Qv$A zne>o;y^g!jf}=ET6Z@#PJkzU*W@tgem1 zTZc3vcG$}E9h(x7!=N!{A^_Ynyzg?d(+41Z=oO?7QbRWCjqm{b!T)$)1%@rYfy}N5 zORsq-%$sNvNXf%RnIS#zOlS|-o5qNLZt;KJbX_P6&**)nRej~Sa_PYwq<#Ma*ZD0+ z=n;5Joh6jzT*ywCEI3~%ZTLJQc&y9Fr*rUPSvG54w``=7?oQmzJ$Qua#4JF_?4p1d zKlrpny8{b`A`H7w~Qrb!N#gOJ zSUCY-xcqbnQZ160%%w+Nk-J%A^o^BrNv*h$=QgC;ol0b(&sLOB;}S0=wQBUG^#<6hUs`vXdok zCQ4WH6B<{-j|z$SRfc^p$&~AT*Kg1)meK8>NbBX&FeDQc&nk!t0uCr#jDsVUXh17F zCn%DRF$zxd{V_6szElE=6Uh^+*(GHJWW?a2QmM$oo9ttAo~Bot4U4#5Zx;S~SnQ2r zTed)v6$R89;X3E6!@o=C_%C9BTaF47|4f^?5=2r_E#L3;?dTleFz$BtA zCFWNEj$?$)d0f%2=loM~p_JjN43RN@6K_Lxy#oS2 z$o=8l;#RE5_V)cP5x>Ss#vfM588zk&n1ZnS zu(wUVW1U=P_(=-z?pOTqL|bxGm~(g~>^kW!uIBsQi&fO~WviV;2T7iu0B2m1GdxE% zJeLZfbViZRUSE=Z3fGjbWYZR+^HFs`rliw>ZTZ(%u$Q>PUczTA7xgLSWI9c)ArXFs z_%8L;)_Pnhr(K_JKy~}{+@FTJ6*t6t`I{`aSJffh^hXDmI_-&mklHqm?tcQ|ABXo; zh05(z)L@`=qSpx4jVZp@WED3Kb54HAF*=*1Y7SZ(3$~dQZD3G^-(#5`x~#4o{KGyU z?8(ANc_}zEo&lp-^K;a*3WmYsYI{Ng#(Jj*zch2`^A1N;K(4`LgqJz=+Qw@>uei~r zwjIT}%;8+DsX*bXu(4?eV1J%Oc5*D6F#g17E_73y7f>>12x)1{L3RP;ba9a+v~$+r`W5EAqnI!`r% zUlpNkMk?Ax*HB)%^yv?dN2YYeWF^VE^s1nwWL69yp@(8=c{eG*mbKKb_%ODZKAD^} z&+Ew4Leq{NccUYHvYfjyc&% zYSkeH#Nf?quXfRcVj zA=vIhSZkIORg+CbsS}==dpOHsns|+brO5sua~u&!_c$dy%b|ASJckJ!63x+clS@Ba z5pJQBM!-OU^~%CozBA6jvR5IUoM(iBVBBInSDj;$s8%sXkR>CXU>9AkL0{C22Yz{v zIBtu>OiHNJmJluvH@P#>X7XTD1n3W7>Q+^mb7mBq`7}A4&a#Lt!AhZN@I}GgWI=si z0cERYHQhKWZt$VTUiekBCXi<4Mq1z!2;5Q>C!5Ror?E|W;iVS>oDrAP07!sbL3Yny zxPZFh_zM^2%a}t60DOZ=8v&hv;lg$Pw50IbfN;V?5FVRWfy8N|MiAg;Z!AgL zvNfJC^0EioFI$N1HEh)fY;6!Np&1Yy86_+nk(MJ~S_(SkXB2K}4y^M_V678S2iz*o z)f;IJIf(@_L4Pcj`^podXPBXTl#z$KI7 zij9L?I5~6AfenMx_!mS;jqRZ?6+qPMwS!5LkJl?U2307r4*bKP{*8Fdkw#vni*gQe zgg8P0J_f?EXPo$rN0tT(VgEbJ^+w*5C(A?#0KY0sd%;n8AMlDn-jxZ* z>2EJ$zJLU)7ELTV5=W0xAPY}igumX6Q&oaT?Xu-cLzoKse-u-e9s;2WpIknr5ebty z`Xp1zhccsgT8Wu>mDBh#iOe)vd@Z)6_Xc^d&zR3*!*LBjE4M2UKJoAmEm&Zh;O~FmF4Xh zt487;3l||;33_OC|Ld4C{*F_;CS8a*aWqEP6=)RK4usK%Hly&_tBQP3IntWaTAxCq zpXUAhERuOuznF!rM`j!R-7jIf{6!0Y6JAylTK?mho_lZzjHx^5I}9kZ?hB#vHk_x| zFK>aA!iaYjbMC&|$9ru$O>uU8^YLpc+zHyQb^@fAF@XaJ8S6%}j`3NXGr|Li9FJ#% z&;4OR*P2jx@Ebc3)|qNgdvDCP!E#nSe9OHfHyktiuX z->`Y-Tlrzk%kQKM-+Drw3z70;i(?b?XqLY*BjA<`r!Y^E$RL_a4u7`?yGISXR?Ky( zKNfIq+o)1|&}dn=O&w_LB`ezpq_O~4?H5mEx#A#A>2PTh9V>;sASe;8@@&h#e(QOb zsrYtKr=6#?{~M&CO+FdX+J35*QvVLV950b9vtfN4{eFFGx6ybbkAtOzp*i)a??xUH zBZe=5;j}2gQ$gu{EXGBI*L17DQ31a(<em?xe4CHlv|3F z|F$cZofuG7B%PJaV5*?_w$)H$(QGH9FbQ%$;uAXhL|0;YNSZR>PAEiWtYN<2s78nd zal7l7Q;V6U{qgi}SEb2pCU1~nv>6XBBH)ykF0ZSJ}ahvg4S7ic5*r1o~OHuvEUzYb2X zq*r$F5h5%aq=*ARi0qtD$!6*3*2|pJ+py#kP9Wm8jaOJ#NHF}UN}K-GsdL2%2!fM& zUjDf6H#zr$6L_@oB?IMAo;G!J)&l%-!|Nl@_o>rY<2_%U-+;Woso=G49D@vBD~w|C z>eUyqh}Gkt)Z90M&^vkdtVZ^Icl9Fd#k12q#Y^l#mOhw?5g0G|H)>-A19(K%+99a# zMvra8oHraNZV17s8Qf-@Tu%ViF7x7JQD~zU{!$UZ2a>~g0D~WsPA#G6b}|I9d5soE zIcP5#b!v)r#vG3yp#ZyhqYtGs^D?qQQc%=D_N5FDFG75)j%EYJole6gri`pLweFx% zpqy>gaio3}$T@b$v=2UM^vx=r+?Z5@Too}gQ*%Z50?aK)Wlhgxq1={Q?wK&u2F>zM z#!u!vsnEO=_n!U6H%!fuyVbgS(xlR%?`5c0S5a6IgFcWa-dMJMvR7j1NzEOq5UUrA z$b>|1uD*h@|MtF!m)?n>d?w6QCtp3mvy?PL#3rR$+qZja_B5LaTxZeiHnCV=U zx-^1aVDkFgy=U>4diWbH9vpnIM2V2>O22BS3?)^-go>Pz*RGAjT&UyF9d=3>#V3V) za2yH2vAd;;mWpK|&wY$_|MWf$41@7oG0ZXoUqRAXLs0^SI|BXMpL0CM0Y!UMan9Rk z&10UDsn4W2Yo75se3J>CzB)z%8I^Q%tgJ`&M{68H^2A(cddYW#x_&}$Z&T--;%gUd zgoScECN)$0%^CYWPNxSH8qpjoCScF#22uK#gQHi?nJ2$VkfYZRS&kSXf6-qv#i_|Ys zZHWx@V1qKGNE(0{iv2r#Mce&bw)_af&NS~^+t5nHKFv`wCBMHM)5wo zZRHexJsWsrF1<(`R8F1(B}qm{WdKiD-SR73wVO={2W4*-xxY}}sZg?!xRR8Nrd9Ns z0Tjr_H@19mBQQF42ckeZ(jRrk@pq<-&5#Vja8o9kwP6^&n%B_dO>VoSUptF1SPkE} zk{U!h5vdl*e1NW1hJBzt9cAHr=A$@v@T0H;<`~F-8lfaq;3`#lL_(Lu+Iw8rK$mp0 zYbp70?uHYaycRymMQPPXo5Y7j7%XbSlawNGjKzc(pf6WH;X*)_i9fpmYQYRG){(XL?q^9ZHEx%SEb{4Z zvjtrCJ+`>tl^K)If=FEF1X6L-$!(OO7BQ$wvIlXG2vT-F9~WTHvlJFK>JIHS?Co6( zuFeP}U$4bJ07NTf4dZp!`g_-JU%%0P5oc0*nG@&Yf?kz(bVCgS$HbaesNMQ;)1ZfP zEjD#+H{XhCN4l)PiRSK)BV@<|;qRJ!TVKXr(fR{9N29~|^C(N;)OV+oPer!+$1A;U zoP3;H_62-+SrBYK`ET)%*JsAFW1L+3RiA_(V8I`X*u33L56yW-KMx5OJ>FGmI8(OB zz4f_12}%N75Hbi&dWEyTpEV26#H&993PH-{+e6)FmBo%ixOJ1^f`u2ei>s3C8Y|bR zSDeFJe!BvjHR~Gm<+EFqc!wcDL3bbsPK^?ABz%E)5Jx95Z46GR&Cge2))GEEo=cN4 zXlz9|5SL$ZIk-OS1ugb!U%_uNsjAyXmKMGhRGSA_3T?gIbL}gfIgfkPYmEy&oc^Fd z5=Ah)KF@3U)@c5z27?YoRuAMkh=z}?d9{HpW!!{*-mR0{;MkVEuy@;VkzSFWQGAjG zmmsA)fE<$TtJy&OoD?cRAvGX`@Dp0;o&qC=fv*_3$wEhik$UYM$#I)NuM9=R ziV`}!CdLkb8(o2ONgKv#2FwVY-~{RxWvgQ2>bVXBWi}=TGw10AshJO^4ROnsek0)K z+|fhS*xl;289uCMI4V`8tHNL?GEk5I7QtQvd&N}Cln%F_sBks~o8;89cC#0$HYP~6 zqBQGW;UJVugXYu4$yZ~9R=~O!%LVajf5g!drT-NUYaJE2fyR)UO70?_9m{jNp@;mf zQ+o!@+kMhk0^Q2l<`Hjo!YvmYLAKpFhp_jRuZJ!vUw6?^4|>ytzV31bw^-9}1pZ>9 zjCLKlu{{O9!HojmgF|Qu|M{csQb@nuUCvsrdn&7u%XCfkJ@Z|Cn(4(;Qv229`$#Uq zN!rsgmeqn!@sVf+*fNK5#8J&82$Uh;88s>cavtsSTDd6v;*LFRN?KubnS+f*6wt={ zB!?pcmsE$7KF(`oH%3Uqi?DSkBfJz6DOe#EFcl`AXm0DV_u zOFS0eOH+d-=%2e*v)D$%DxPhG!r5$9TfdmSgP@PX*g!(XazRF;PE$q2-LMn71^Taq zVKQLm&h#G5XyN+@oBaFtVJiZYx%86)(~D%--G4?*4P_3q6mLoy6qt)i>LRK8YC;j;XIoCtRv3ZGoQ#!+G0^PdfolT)1U+eITxF1**B zBl!sAj5a;YqMFK%49Q({#KhuEUh+kU5Fx3}88sCgg&0paGuj$?Tx&Hk-zdI4e&4zu z4X_F3x`_S=xX_U^{uT-iZ^27v|D-YS{9wEZCwe|P9Q_fIBJASfn=dagx+gS!kSCe} zXT|~;Z*n-ecH#9aSvU0$ZTL)$CS1kX(bulz4!@}zLQ^~h1gy{?3|-(~@GqH=I26iskK_A(8^GBO@tWV~V6VEzAJR&Ys$1T{a$qavy;rVA+0kR#c1?H~ zn9auP9xh*FeTE5Vo*NkpH|2zGhCqkYgzjz>CCCU}oPW=39AKS=N2t(tX;zX<& zU2%bEfk?NQq$UwfMz`h1e?W3y#bUw>Wxq-RSJ`lc8ACBzRZ8lP}Sq@a;<7%j9rc>2x zulNjXn2kwlyu9$Hcwf93;pzHhu@ADoQP`m+Z!***TpWE+M=a6iQKGI%;WMhf5 zV`Ey^tVhWQa6=-32}igy0fjA~J+f@F(J_@y9ioy1_v)j_C7z5Xhb|ny9`CVsn%;V8 zp&0E`?wv4>M$SBR-l%IA=V-21zG<1wm)hh}<8*$VgnWBVT&c8EULYMUzxuv0|2y#5 zXxenClhTLxjG3~r2b8*_aEaukOmPzbZskezU^qLbYfhq$(p=GeLP*$j3iY0h( z@RZa8c{Mw9&;`+;tqD4YY!D%0Z|dvSGdPt^n(5S%C&JoDTd&f`N_Bao9?Rw^OE3GX zmz~6Y5i2=B+E_FIScMy(?g3l&D*?qI})1$#m63TN6wwY{p$QzX(CY z>VkB%z)$&2MMypfA=F4ps+p?^nkxA90?jwJpv@@7u$p1uGvEeHvX0)>jztRmg4$ot znVs$&ZahvH$%OkjOYp&P?xXmT!J>#(U%vq@f(G?$q)^w zZXlchbTf+;#bbgc*pKhd^ifot7CB&hUvp9O_$= zbB0C{fVxB;5{e8l1(99qUdELCSupkk3g{y$uc3y$+qWtuc-?p;h??Iq$6#1|ST!x$ z5YTY0gF6$+w4b1EHSKIo;7$34 zj++X+Q_Wq2H-sLxcIZoIupOVm%Mc-lg5VdotIGV*m#aK@GAAQ_cI8%v(7!HM`D-vD z&nr))#S7Pga#J+>!i)1<;;Aw5_OO#37y{A+uwvV$(9iJGbjc7!GOQa%sL?|fHOoGw z*Foe_1nP1kekr#Y1|*{84JPzRG@Go4P}G7u0+$d*>foEV{P3N97&v^`2Sq8GEoANB zDwphRlI4Uyb=pw-K?g$_e?IOHL3iZAAByUY_;Iu)*pX5>GBfJBlCn&)tUdxwtB73; zfj=+5$8PW#^+&|9hX4TFN5nChlOzzV1w@G=!eE!;=~5V(52qP^E(>V8D*AM30pETX ze>E2w?r9~J{W+fMeazmC6d*2j6l#ryUC-2Hfh@Putew+ubu0t4GO0b88wqc9NzG)LB|~&s=Hljhy)t$9@Q!nI=7fV9!O~S3O$065 z4@1LbIU!jpi)r(t;}Ej*`(A-&)Ao8-J<40E$J!)7c`S2v8}Z4mI>m62z-fi^*28I@ z(09f0#gdD<6NYJTCf9T}TJqJzI>_wU_SsqKPhgUMKD*vW`<-YU;pqdQd0TlX&`5W1 zkSR@R^(Bkeic_9fPns%_Xc0|ScUvD(COt3{*iq9KiGjtmzH{Z9ihQCL>2+WD2_66a zihdD)|0`ot1GCAFW-o*$!zh?0aF?z*6X5u)VY%=yUc6ApnWCU%SFlmq>_q~eB64on zmTdCACS$JgHE-eL3&!&Bg46e|MWJFV!t( zv3H79id7Wcjg&9MvfE(#?v$rz~!d462dxI@D{m5a->Ht;x zv~BCW3RfvdI)EJ17OL-mwj%Z{{0UexW!Qe;n%7&eW`Uz=Prx__GjQKB_9X^{sng3l z-^-N?7opc-TY6k$+Lgxpm)k{hed&SH^48+MFV1!7H+1|6IAJ8}_~0P@gE;wh9>Oe_ zAOTqbzg>pkO*~H~7X(4?tKJd-L?thyfDisCRW^vqzpJ?ARnWhalh|0Psk8txAgRqf zt=c^w3jPKzA)Xiv<2<333vUm6&W!SqWB3xXRx9zP=|cB`o3Q}>Zm?GmOT}dlaOU6Q z`do^MaE3==OcC#(Sg>?At~#myqNxuCTIL&;%ItA}=MDfg7F8Meh{>Tjg%E5(YQJ3O z(FYex2D*aTk+fJ`_7ZP8x6b*K;u}ANkmn}^BbKnT65by;A0ysaZTMn2{s{7V_fmQ- z`hIrj)8^!Sg?p4=Y9OeP3T8=Yy+q%#Dt$~L?55bok&659(!dRyp$6mi^J%`XlmH`^ zfl!#J(pgij%Q@JJ%js;+JDbfsN=i;#BnZ9i$Qj}3g^Jf7oze5GM4(DlQxs>GJi%?Z1}6H3#yv6&;y&)Zf0<`mpKf~#U4siz$w zeDe{L8?W1uO}0$NwFvvLqA6A+1P!o!ZZx&3!Ho63e%dviM6f{ZMcfG-BValR3@D}{ zyX=R4zff>r;b)x*oCfqM@ApC*9s4S8b4-M?`miWI8x!ihzMS9R2+u)M^WX&W>UR0@ zF31o5P}na94bxHzNGRBak+e_nVI69b)%m$e_$Y6AIYo9MQ@s3BI1m=|C%b|ug179$ z!;oyZ8%eDF82o53ryU7koAJuXRY#_vf9*qRg@%*)PZM^j*FYub9Oql zJUXdOiP7ndQ$tCkJ_8EhtFjcH=0m%gxz5#Rzkj%=%tDd$qr_@?`FT>mj({;48xS?QfG40hyFjm7Z<(=?4IMdD9vem7#iKxh@ z%o=sylU7@27elst@SZ=JeV_d7j2+W^4>@7Yi9HbO|I*A7I+;BiEH)PKMNu6ikoZpU zRSG!_9h_5^82<57tw1QbR;NN5pZ%O=dEEna&w(!3`(87%5jG4iXgPr)0exXLfs90w zpUW-C8Vy(%BN>pVAxFX48Jb8STTPnW-WSounLMHDf`?Th5YIx>mFX+_0EYZj&a`g` z&)nbDm=VJKT|Y~YPe-)b&>X_^yPkvTGb)5q?@1j$rFvbRo?>urxHM&D8jj>3REhC( zg?my|+Sk$u>YtIcRkBx`48fO2%whz^N6j>w4&_Kr3_pK3p77b%9MUmGtJ)1s7R=rsw=}c=*H>Q?H_W zZspg7@b{iIEx2m%a{IVbH89mjR$q@c*5fB3}~Yf`|0)oN@S z_?5*fR6N83S`x7rD>{EclF*IImS zR8M%%1jVeU1{gf5ms1P@_$|$sgt%E*j2Cn0 z$};{PyOAfZ_$=19M^Tg~3Zu9Bva(0Wkk0wdo9CX7^};(iI#3>)Qq{lNfie9@QLO;< ztn74(j`|L+Hb!(xAAcAG|JD1zLhX|h^591bXm>Yejy^N2II}YmX<~UL8&GF;AshZK zkr?4v9K$cj;LBQwh={`eem}^eLa4(C!^x#`z)%tki%dg8B|me^8|cifc6?8N0i?8D zzaP8=r*Z_JYc^VW-g@3|U^ZLC_)5_xW3jKYuX304U@$4&kW@Z z9I@SlUgyMOlEf=VqeIY&#BjEK%d0NcFvd?79b6S2B<>3e46veqrQ9-C98Fzd8Xsnx z)z_`VMlXboP=p-70L`86GPV5n1^w(J6e=Nsm7ttqda- z)cQ^4j?}u}Pe1*g;aP8igyG?=F*(xQ-I{K@y6mmaP_A?39>BtUcaB$r9l6Qwinofnl745KD<;$TzjwMdP0^hR2p_O^0`u8E!4g7V^yhCuEPXu311v{?zMi7;geEP&;T4s#a&Mh{TXl}t0$l>od6)?gpU=Ip-f5ua43 zh3w%jd89+6Y4`3_s9l{wXW^at1k*8Rb39kP0^V%}2p9u!i%d3Md>#d2uFu1u5lUW5 zYjdk5{5z|9+oDlY-S!I2>EXwiDaU%O*x9ZuNCy%%nknv2fOT`Q^buMXFcJ~kJBAte z$hV=QcGY-%s8@E+ID_^d|$Jd4dFlV1m{cQ(R`qDfNhdb zi6u_PGMp2nn`9Dk>2U-W+32D`2zTe0UQ$uX&N(E{YbbM+Lgy zyvTpBqx@(ecd_zD3#DpIX$TpCBIO`4QJs@AV8s%RP&jf{KTEt~G(5ch(J-sUkh0>$ z>51XI|Jf-UtS0gvs-qo0xX`dREpoc=k2&oc8wzqf*poK+ayPzPMzXH@U-UWgxS z;ku~oP()xPh8L;yS)SVu=m__>;?hw2-Cr<~ydo>7ZdZ$#7FTM+n-*8{B~R1WMd|Em zX^{e%V^`)J4X&l(_gQ_f3bG~mtKfXygP-zIqh3eLGh9c-6vC87bQ%iL*sVWZBbTW* zN%igkmT)4{vo6P0dC%8h&mL#IEa=w<$;c&!fL%u?Un3<%D2qFlmJ-t96Gx4bcM~o* zkqs|K&{7($I0P5CErwOCN=r0%9&dWeQBnsJlSeowo$MncKM@QD`USi!!C@+kw<(3} zTGvfYnUNbwf}f(SvvEm0b2CpZDXNM&QYca2AkG4J;}9=`5iaG!O*6hH&2;&B?rUKw z3^CD}hhxd|%&ft>L;(yAnxnK;s_z7-CtutRN(6oKjYTFc&?o$-&z>j2_w#WQswBA= zRsyAI9`T3;5q+{*;WXd@#RPDsIIy|}iI#jAa>#qp8i?{}9o)fgy zfrVSsu9?}?@QbXs9l_Op$;NkHD4Z&ZzU@h+stW#$^2L&6qy?>qD#>v z;=_Ir0fZ10;#9nH1sMqk%g*$7G>W3%5dLdqf?CxiwY~Z+FA}5*lot0^ZX^wsKGMn-<#Dd2Xd#gl|oiu*ZMwSs)rOqsv z_HTml#RIm@LoLW@9fD!t*yo)wGf*S=8T$ZI=4;jfr4r8Kg848V0grhp21eyztKT`{ z9aZ#D3BxWWM31vh1gsM`N8NO!-_Mir?tv7B$U@*%@P1~(@xV{C5Ait`;Fl|=6HXi? z)rtU|*MS_O8zp`CKtfPV7;M4@EIrf8jeghGF^_V*PEO5-NXjvN(1w@YjM^6rVjvYap(=i$=_BG_hL=*=#Mf}EL^qlXkh-U6C$@h@8=`B#Ak*dM^l{V zyk%`UV^PK8?0sLnTIAcIoof47bAo$NTid2sL0Czw$)jpv&)6yMBl5G-`iuxneiD#S& z+qV=V7Yl@>_ON}b21O|)sgEBu{psULiL}x%ji&u4IEn+X0+tBzM&k*h&FS{k`gPai9%8^*9g0Kv#o zxDKC^9I;J#GzRYhYvt`djyJ-xmY<^_K60KS*sOr_Gy4kPRyW*Z7#*FZ)?Le8_vX+CmZA}SIYl+5Ji(kBXUAG}6i z!Yjwe{s?PsEAr>`3qj9`e~cf=?qD~NxKWF!W~~})A`cat=~r>&3(fY|Fcr`zS{#$G*9R`kG!y&&;IGbic40}aF7;InI&7-W zO(!i7=Pxu&w+tcurJ=_ZPy@hQJvWS=GjIlV0Rwk2!VK-YpJL}61GM`}p~rMJ%OEt5 zaeKUe!hx&(6bo~W!m&ZER|`;MmIwaoOXm^lNkJ9oHIR!SvRhR7`Kd6147ijR-4|1@ zkT?Fwd&80_(&pObPLR>*GyiKLlFAo?pKFp=7Dpt@WD`boM>=lDjTgbX5|Bk4bE+Ib zW~s!*^XTIN4lP`zgvIHV(O(_yAR34C6w^!P4rNf3*C{&t=ao!Im7vKQc7ub=2@hWY zI*)lVnw9DMg!JX%baXGTUXzbbeN5siH?#`rDGPh0*q+9=6J7@s33wb%b#iRB8fmLN z0G7N9Q$!w-%P)i&g0DI-lSuJ>brNTY!#yrip7(pEjYid>T-g1&eWJ|qc~o_%ddTB- zcT<}1EYv~U*eNeSg8Z~0179a<(ilu?P$M>YIv$D0S4by*Di>=!vWUW8qA}6oL3r@hm2Qd6#)!sYtTuSU^fq$@vc|k#!0lmFmogdG5 zC!3Ol@|qXC&0zRWbF0gWB=Qq*qg*K*yv9Bz&C*NF4?^`!k92i*9@L=Q01_d2`5q>)QZ3)DzzUzMS(=rFIan7?G-HsI*1l!dOPyR_|lkh9)L) z0Cx0TgZZ6!yqK*Gm>?I9H;)_R{;wEZ3n-`8=#i6_$9}|#pPMKd&m7hu{lH%Vv?_Ga zhl2EBRXvrxava2-y7vLA=P+A-)!nowU<5=I?mNbjgQh)C9n(MZ1lT~H#1x~6Ru1>m z-Q?2wkB%I7gK8B@la)a=!sqIts*)TSRY{0?cuB@@T4zjpuuDzi7j#()#s)V5ffTIg zi4B=wE^XZ6Oo(w?hKe7$BzVR%BrKm9wvixv@f>0liFk$*Yd7JpPj8yT$2-!!OWzOh zD&C+A9KZDWTJQor@M&<$fIZ1gj(a?5yl|Ik9U`}_#0L{nhw#Er5kjc#USc7l#}oLT-Ij{-Y}R;9{Os{m8;9{ z+3c&okqP(iC3C`<&$gwqStf{~XMkGV*$}{uC7e=7)#vmcaJ-n)5qbk~gvMluJ4)8U zBtjr3OyJF0!BiVTBPKrG=yQb#3KD`TjPL!%)eMN8j-3XX-87_ZIEvUHkkyAFI~>W) z2S~3)u3tAoKa4yB<8AKFI}uf6<5PKd%C!@4vdQ^R!j`#p`eRd)-{Qw}&6kS0@{TiS zY)qr858y>bvmK7Jvxz*pK}&4{xW4rhdAt*C^g-NBkHa+{w=`YjP{4z1u-a$;1U>q` z8pEuxiHb*&v*M%?c;#mt53{q_x{_~UDi4v;Uel@P|YmenBklHMN^z%vw2;1NJ= zui^j;1frXXd(&&S2S%uAMxbJ+urW)f)q`479UUu-%BLUf^hfzshEyMkoL}<|v}bJS;fo9`PWy^i*>=5%1^Rr%KG6fNn4 zL@QLIBs3F}so06tv|9ebaY4@Zd7kJO{P#gPQGKLOqT8+~(76u^l@oOf)O+tb`2wBL zd9{{xp5y!RA6eCM92^IVcO`f#=JUI#R4HbXnI?$Y&8le55}EGbpuOHov=!T#U2xBk z6Lyoj!yU}6O!&=8YpNJ8l7a`=MQc1HNj zw2ZV2GzRp{O!xpsS|(OTRz@~zd`5bDb~<_nItDfx1^@>=8wWiz{@*`@JkTFcTy}=W z9121re<%3yA2*??gM$qR9i6kYGp#cdt+kyA9RoZ22O35?Mn;+s2pW4=D+fIn8Y_FE zzajYt9w8%p13NPt2QzCc{NH%>^sOBoxCse=6Z+@pZ#^?K_$M73M>~r@s2Cd18Ce)v z8d*8m(=pI8(EStnM_VpK0}f+rJ4-zW9z7cy3o`?~-&E*q42`+y{;c|w&~JDHJ0m>@ zYr8-C`A_nHbXd{I_OGSCDg5Tx!i-1U%E8Fuw@huU?fxLbMfdlbKe7J?|D*J;i2h*Z zf2aDJ7Y8#33!}f%{DUCBg~MM<|K#MiN;oX^tW3BGU1$uAjP)EX90>o$UQY$SkerYt zJ`KKbu=?H`akvKuWbC$kH2vIx3>SH z-+wUuM<@QJ{QpnyANKuE#ar3a{$bs;2G*8zE_yZ}N~iy;(m$X%1b=JMNa*92hmoFv zk%pd`hJiteftdrq#=*d@!A199fd14H*FS5l?UbyoEqFxi^el~}^~^sc!az?;Ps{Kx zjsFhvUs_9B8=4ur{%^!tIG8_%5(5=I0|!0*-;n$Zp9>Hnb|v-Sy+4sko`k`^!VheW>$vQ z&h`{szlY+V@c*}2|Inng^uK5RABJOT`B(n`Xu=_6ZQ%HOMiCd{ar_tr8~}O-V?#C; zHX1hlk2#T<*^q%oU*EuxhQ-j31z@OWqz_4VVo-9$A?g|AF^k+WsHq^f$$SOwJ!~2D*Q~ z8vZfz{yC%lKY94G!~ajv{|)K?5czKj_kYCoKjQjtN#MT){6E(9KjQjtN#MT){6E(9 ze@$G_e{L>~tUh*>&L3OHf3tgKWCQ%Uef`G{6`$$%o>qZQ=3^mjkIzK^`$B>Oove+K z6~Dpn4V5M%+us00%pA1oWE~wW%&dL`{*et%flf}(#OPz8Yee_4tN2IdABFb#jBJ0y z_1j&BPVi$hY+-G}!vpuh;qMC%jBNi=>UCDYzl8Y^3(ZrKj{d>`+df%&X&W}jEQ3w9 zJ>(Y?!v{iX%ZHu)U6`uA^+#*h=E3qcu(O8Q09`dsr-=!CB`>3=H?|ITaPO~_kRun) zxhX8+mvzK_{KygrOQh#yf|+}Qz7#4P*t^nb@wRnE=NPHbc3wDr8}1fx-Gf< zY{7m#5-@Oyk_g4qnHZs1+J)P*Of912^@WH`6l5#CcQ_V4ci;JiK}WxDTl=l@*90Vp zNg`mtSIaAEUXJ&U2G358I=l-7H?m66>1@DWG_LG-QIa6vcD zv;B+Zz3y|fzYG&P5fo~;=g(vo9F10KtGAY$QZw}IL%vDo8`bopJ|3Zc^x_O` zC}bu{!Y0<}56yA5ZI9H2qH0kbZRzGhgDYn2Yhv7`%t({xt3gvr2xE5^ci8|#c$gsU z)?3yL1pcJyu6R8ZC#A6PD5i*z@WK^Z0sZZcTPwN=^+<$zq0*E%{5zCY7z4;-O)R7; zAm+->#!hc(AK$PER45Cm(gwnY`Z56}3+{d#WiMM?t~FVIXdh3ko;r@iTDZKR5G70o zfb|PbeRGS$;HI!T132~_IBYl*X<)sKnn#)5NSIcluqvO=6RmpYv=={SIB}xH-4m6-!zR+jT8If zZ&36=GMg;_eiXC+Ig0j|)J3Dl|b-cro2`PHpCYrEk^+qOOEO*gf z91IV%Da#DEL>3xM52Bx0kaFiWEpDWdQ^WN%OpLKTKbH}V%`n+lO4o`f$RcvDo}$%# z0b?qmGdhPOgVsZ)%5s#*RBfy&2{<+IM8dQ_oJVmQt3wOZ+*-t7y-HzT3m*}IQzo<= zU~ojVMbs}3njAb)HrN$k0C5APu-N5c*})HTQG@mwEl)-uO!|IWBH<{>i$)fi2jG^V z6=5UHo6_(0l^-k8&L6UDNYP1k)6SA#T}joR0fl@KU|d)KA}!Y^1ZqiXn7g+IPaGG@ z&y!e0p@GlxiP?Bzoh0@`O*D~TYB_E!HMIaLR!A#hr}nE(URLU`ntvBrnf0(vQ2h+W za#cCCi3b05TyVtfmk=5{)t!M;qOmC5>-%{eDq}SG2=Zq$1vz@AODCOhE9!ovjnRxZ zC{Cxjt?%s{0uSY&llzVE*!_J(p!n>B_+LdqUBt91!SL1ROMT>5x!j?Q2DJeWv03*o zz7v=3aPWY_&a)&h4d+3Yo-p6j%znL*ND~@V)-mo$Sfvs0>z;Ma=Rgz$?**9J!FY!T z7@^I*?%DbU_gXM^ybu+}3Jjx?Ve{-~k2^wDYy<^Cc~Jjv2(#~=cOavnm7HKf`(C{6 zbIvp9k1o_+7~_vnH#F49W6oo7!hK-=uwj$D_O0ZNs!kSeWOYA|iNLQx&z4Kw zC>&3bjluUROC9t-qQqq6H5K{NvUinp43sv1 ze8=-~&UMQ3I(M7jgY|1{Bvi3YNGL0f2JX8A%Ck!SRY9vV1rq|!%bZe_v!#Lzm-g^u zcbE|C^#1lh0Jp|^?<8e)$1~gS*O(nWUc1|r(;^e;kcl(eNTItgNzycy=RE*X z+%L(pcT>6(o$~&x7blCijbb^v3Ijsp_*)I%2Fuzzu0Vbgpxg@kkhCkMrhbzcP91Xq z)^L>1?=CJUZFaFlS30MN%V~s8o#(7XmD#580>b3R73~??^TcB_SA)i*G~ypn5+fVH zES=p~2|;2jLk5)|3!JW300@CXLxHH42XTK<>ixvmE&=q3-s}}r2hDVVKjzERr92r; zBTluw-gdR!whL>?t{+D5pxk}!cqL4{&nGQ_oyS8O2t(O+1M9(JJ<=To7_@aSj-30DPN&TQ4~3&zQ3`dv&zzeSrQhZb zgl@>VKUmL+l}B2MURU6Ay^q`HzE5DHvI(UHNEAUiUrnXuYGTsiuLl>>S=^8al*#J- zpLVejR*s1c!yBYJDBi))fiiK4Q2wp0|3_ZOENp*S{AejjRuHp8tsPK!_?lo~U9%6Q z=j1I=AJgo=rA2k6-WF(*7#R^SfgxslhO-g zsc2X|HP<|0+9L2QWL^W&j93?W)miLZJY_6%MOeIh`%W*_nhk{p`Umf%`66U$Lw-q zkBZ`~g@FPNpuMb*MPMsV*zp~^eidE#ZBCQ7U*k(c>$pkb)tNwXtSc{`-aO%Ui5Pxi zj~skHK&yHbm=SKxWdj;MxQaYr`EKdP9sX&g{*x!kmrZa1DpsTxGHT&vu0uw(%D^aV zLJWyrQ`%{jCjo?QWhHvxFxx4i2)L4G{!2h#LiGGIywQ|U86T~7bd1yXWx6U?Z_h|o1S8s!%Dk_`ovkhAWK3j-f5IiDXUUv ze3_)?OF65b7G;@ZthF6p`~J?;_NF6Xy4nTwLY;G``Ek>^w4Mm--y#2D_}bu?_c*I66MNfcI8^Cb)4J)7%IS_4?0);Q z(1V0=x?`7$oHIvHM;w0So(JQ;blF8W%-TEzgOyAd308yCviXj>aBLqwu+SAxlaUm1 zw$@|9xayDwbwbTCZS6@?6**vz+?bz0psMbOb~u3BFYD_Q`qO+zvuMq$QU%kFHN(~N zyiff7R{RpVrJC^_GEU504uG>+CMQ03fdss|skV*cz9`eWdr0fPRr`sf^Jhik7Ue`j zpGryD*5*%r1Ub8Kx|zx)WYaw4>bEd?31xFl@|1(kT+^R8IH$$NmpLXw$Jekp6IKo1 zyO~58`CxT@>4MO9gI7k)Q%o?b7xV6^kwdXH)6x zQ(X_QKu6<}&fEw#(Wtp>HnI?`>DO)U;0TycGqh>G@MV!e3Cghc8g5~DCa@dtjaq<@ppKaC#wtGq9brlpQ%iDt=TlE@ z$=JUZGk2E);=cEey6wTQx#Ng(o=p|JB@il{5!=>5VIAqx9_qu1brp$l&6iq zph~IcC(BS>?&Jh{R*ggUWaV_%T6FB5iW4t}6!N`a!-&LujP4W8f;-Nbh%C?rZkvZ& zcf;XE24G90u^{Bg{ch*BB}WEY;>m3>$fr#zvD&iO5OvcY=Rw_BAf$DHc|-IBZ^8GL zJX?-AqTVq;LT_Mc`u=4y>DX|LKb$Zb*!-#d4VLj$1Mef`$x`m}01W(>cs&zU7PmUw zofQIAzfj4i-agR-m2whQ|69vv`j0ICuamt}!e}Hm1FX;mPT*&B{?Fd}=uvvWy}`@= z*PTsa$GEF%e5I^&gBB3PBm(fsd+ff8^x#k#$Bd~H$5>ea|5S?STGaEt3xm=(Q-eZ^eUQba zMLr&Tx(Gok)|HC3OMw{@tY@fUc|Se-AWz{)r!w}baMe$cDGVaEQ*2~JpW zemRFm4nOBH%X zf0g!3G*-Ck^wVG(>LjN}ka7p~$uwje-Oc?hD8XvI5&{oe63(UM`I#Q?v4g;QjH>jm zR~!UO4oVJ|>qsFC@pa?ez9x$Ya?d|1*V7|}RN)=p>c~!}q`v9NXNK%LU0;R7kLCor zAI&a-n$3{Qt&_IpArkHVFlff7+*&Aq`)ouavT}S68SJYD5YN&Ly2f|zsO4WqE5zT$ zPQJiQ74`w>YJYLM5bAu5<064(UYb5XLtgy7N8QmR8U42={~x7>VWt1eTHYc-GHQ(; zR_LB5kXBa!nL>g?WMWDzMtG92`JFJj#@!1F;?Qk5PG-uTms9iO>dzggsu8s-rU zMo$oZH$Rz>D=ZB3AElr)ZtShFVHEu$AhS)tzO=@4F5exV2u4->IIy=eeLJJ@2D5(T zUlsP7B!gdoXH1XmSay1I%H%&si8=IFI)0*aNHdX@KP;BKNUNo>m0fCTvK&w-Z#Ay?<>gmNQXhBKSbri@aZIX@om?(W}TP z4I<(ABIH7C01o!jTKvP> zrDO!_80khpurqh&*yzB+i=UHyIqa7_*rFrD^z!h5JeBpw&HvTjc?LDLwF@|)NC&Bk zA|g^n(gUFcq=O=%S1BP05IP~!J0ev;X#z?=bO8e@C?EpTMN|$|K|nx?6h+DbA%3ZJbgLOd%gTl40%}<3}C(NJmPMJP>B(r*J#XZX_ zXJEFtJ*cpgUD1wHR)mK2J`WbP|0? zB{7#9kxSb=SuJ)8Z~Jns;*VnCV}u_8C?pge9%`+ zPdJtYM0d$g_X$QBbyk|dPH9UDaJsj!V4e-ri<;aLYLlGsqHJ*yr`w1(E%c8Y@s76# zM2lVYLOHH*Y!TjQOZ16+)<{#GoDH2yI7^eq==ItTo@5FS z7ULxmN|;6x%u5Cp`)Ili?1$&74vPWochAak??#OaXRYB%Q7t(CH2=`$(=Q7zGe@{l zSCvK>0L~84$k@t=J_^SAnE+3)xHy>7|^la6P_*%8$|mK0u3Y-+xN&y^5( zwOqJ6>DXiLVL91F7F>IsPzdqMm$zca<*@;G2CY3HOV+s+SorGeJ6w(u*>i7kpNyZ4 zmU-=fT?57FFSWU+3$bS)VItg#X_p&dTdDe>jD%xjlY}EhA*TcsX!YiLEgm_}PWG1j zJ)GZNZMJ(+eM)%KlvV>@$@gAl%qdbqTJSz2vdo(;Bl2boD+`Hjg$QuVuK^*a8M)oa2Pc$k^Yd*#)w-`x zKLlQgcX4ojmhDG`CL*araugDi+uv2q0>wItwJaq8F?L_h^0+(C<_egno0*#(OXo=o z)GgY5xFIF9_(7naVdFI{vVT3|L&dh<9KgCe+M#Nm)x0|pAakFZ-)omOWBL?8P&L5u z@yb&l`w5?@BXoMsvDaWQ*tHtolX5pRe4NUIGn(=|e0GbYcEJm1 zvs;eWML%Z&5S?@yZ?dW1&BzDm9;RaV(RuxOY->7_@fi=r?&{948#rxH0wi)01{cfFOVp`!QK3aaICe8@;dySEUL=xhgsQ`YJ0hh=+1hiGoCm)t+He~#1`9iXq ztLV-}(lP5QGqD|-2;Dfvrg64OU6MasLs~DQn%_bLL}cVqEV%9^$`l=%o5!SkUleUX z7ae|H+4&q--WgqW-48{4eG+}54u?L|UsMy%VeXZ*2d$nV@yc*m1OwHjvPXJTd%b!m z-%A~3x_b4+6BCRaWc=pAZW%4eG@yXXgAW$W)7))y*Tkocs-I;(0&hbzNWBoa09lY; z;P>R+;#O}dp6GU4tfAv&O^pz5LyY%K9CGKV@u}mqWONN%Z?0My+F9PC z(cmD_d0w$rqQ<~-LVJ~JRJ=93RR!69kLqs$2&C3TnJCI>*D3J|HJt89Sk(mPYurev zP9$9+Ws>4oqqT$`6Hj!o;i!B>YOniW-@dtR6e@rb92a_$GAODm^a2PGT~T5O6${M@ z)*3<8$(KInzhmwoh}9+CPm1BxcL*~`?h`A<6q^#FtBb3%NMzDoQsS!NtR_!c?ETmb zCUaq{>B?@^Gzf#zi|iU5lO%~G-%g=UB$x8M)_xk&yYD8xzM7c#-Yip$<)dHVkeB|^wSNEbk}sW z4M!{db$!gj3nq$gWa||WjqvA_a~&!=4JGtZg@42XFL>pOybynnTIE3qA<&2%vpZ&w zhwC23`9}I?`j-B2nmP24@@b8A#6%a|OlihJl%V@GXsby+RH z49|$Q{J2|jsoHVcRXxfUa|>$ud z!fR2s8%Xm^`{GdG+6pa&K62KU^pYC0A0>Ptragi7ki)tb&2-NCOWZdlnfq5)li!)& zH_o6Z0r`@jGUfV!@99Thr+LZ@zygjMI zwG8itgQY5^BH1~)`Z!mG2c>HSHxwgXBEk8x22eP(8bShgBg)`Txa5_R&!y!N;#WMH zWlvYK<*+fcRh7OmTCza#5!g%*OINGeUF=UUb>!+7zJuZr;&~b?!*eZ;CB`NCOXDiG zA7Y2Cvnj#angGiJ%4VF@J$d}1v29{gS}ns*Q(CoKhn{|j%82%g1jT1fKXenX1#!r6 zIIvH%@36-U0tLf`?g>xc$V)`8;Jl;iGC7lz%9271!umq{uER5`>j^M_*Fa^Nqz2)7 z{c~J9TA%4Jhnt7z@vQL}^0>m3VQ5=TyA|>u|tEr7=tcb~g?kzR={TDy%xCdNSJd=|BT?r(~iu{PtbhtqW%_uq-w$ zOnf*t{GoEozL-SNo*m>HLw&%nlAFi_etBz_5>`f*Rq>VR8rS@$!d#EX(_bvM6bxF8 zM~jK&4tI=7Ms-nM!2#CG*^^IFj!mWKrmsiTL@f6E_T3*K3?!dyuxGHh)PIZ6o#bAfK|zds#8QK5EH>p&K@3xS(975YoH z;-1FZR`+Us=Z;O9wO#I-{}8|(;H+S>-P`XvG#1!Ar#5qOKEvWb7i|xV43lNel<)jU z=ZcYmrLsC0m=j9!LK3~-0k;<4)#v+xmh4_<1y#$WGKACS|r)V8JA34>Y zraSPWK|~SF7kjrgb2sVBtB95D!gqyJ93>pr5^WO;3ttzy$eZstZTrl9+MEr|`YotD zFlTGxkri*yY=$WU_T|PdaeQFH_+v#}o#om?zy6@Lm5g^%pFF}{r^fgK+Pnw0T;GHL36cFnjen$k{18t8%70FLQwqM~>U+U^jQ?NkJxX9N zpL>n~Sjqq_EdxWyAol)KF#UZC<+~BEFBJTf=x^lI9=-n$;{S$?qXhPub1VjLOQFK? z5)e2RF9C-kD9pL7jg15XZ;M4rL#4n-4B}hs{)U$SSsxCAV5AXH3a)NL!OGzf8z~74 z&Q?+ahl5K>La-QHJQ%WH|GNl7(T4s~K=#P|{bI`TJE8y0AJU{1_J;zdoKsBDmOjTyiG4;W0!H*Gm_D_`io- zBJ9~VR(NL{JS9OmD+)B`3iR-_^Zm-cQ*@mD4)OKuTND1Sr+h8@K|$GjYWC%v{~vPx z+td5A5mPE&3|jEn9CuR5O645N(rV4R)c}z z$_Paz6_^@A1*!^%LZJ|8c{zj_)N9$fDi zYhpzl4>-t&+9q}{^t3B^ZhEi?&kLLeY>BemlvO+@B<&c$X)lpt?K6|HhNbR4ywj&- z2oYlyCpk!Xpfe>AfgWz7`m6V)3G~KuNGN~wdUF@ULU~Teu%%6 tkd(zd<*^EE1lazD5&cP*q4=Rhk~a}=%gi7NK|&GC3_?PxXf +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +struct app_params app; + +static const char usage[] = +" \n" +" load_balancer -- \n" +" \n" +"Application manadatory parameters: \n" +" --rx \"(PORT, QUEUE, LCORE), ...\" : List of NIC RX ports and queues \n" +" handled by the I/O RX lcores \n" +" --tx \"(PORT, LCORE), ...\" : List of NIC TX ports handled by the I/O TX \n" +" lcores \n" +" --w \"LCORE, ...\" : List of the worker lcores \n" +" --lpm \"IP / PREFIX => PORT; ...\" : List of LPM rules used by the worker \n" +" lcores for packet forwarding \n" +" \n" +"Application optional parameters: \n" +" --rsz \"A, B, C, D\" : Ring sizes \n" +" A = Size (in number of buffer descriptors) of each of the NIC RX \n" +" rings read by the I/O RX lcores (default value is %u) \n" +" B = Size (in number of elements) of each of the SW rings used by the\n" +" I/O RX lcores to send packets to worker lcores (default value is\n" +" %u) \n" +" C = Size (in number of elements) of each of the SW rings used by the\n" +" worker lcores to send packets to I/O TX lcores (default value is\n" +" %u) \n" +" D = Size (in number of buffer descriptors) of each of the NIC TX \n" +" rings written by I/O TX lcores (default value is %u) \n" +" --bsz \"(A, B), (C, D), (E, F)\" : Burst sizes \n" +" A = I/O RX lcore read burst size from NIC RX (default value is %u) \n" +" B = I/O RX lcore write burst size to output SW rings (default value \n" +" is %u) \n" +" C = Worker lcore read burst size from input SW rings (default value \n" +" is %u) \n" +" D = Worker lcore write burst size to output SW rings (default value \n" +" is %u) \n" +" E = I/O TX lcore read burst size from input SW rings (default value \n" +" is %u) \n" +" F = I/O TX lcore write burst size to NIC TX (default value is %u) \n" +" --pos-lb POS : Position of the 1-byte field within the input packet used by\n" +" the I/O RX lcores to identify the worker lcore for the current \n" +" packet (default value is %u) \n"; + +void +app_print_usage(void) +{ + printf(usage, + APP_DEFAULT_NIC_RX_RING_SIZE, + APP_DEFAULT_RING_RX_SIZE, + APP_DEFAULT_RING_TX_SIZE, + APP_DEFAULT_NIC_TX_RING_SIZE, + APP_DEFAULT_BURST_SIZE_IO_RX_READ, + APP_DEFAULT_BURST_SIZE_IO_RX_WRITE, + APP_DEFAULT_BURST_SIZE_WORKER_READ, + APP_DEFAULT_BURST_SIZE_WORKER_WRITE, + APP_DEFAULT_BURST_SIZE_IO_TX_READ, + APP_DEFAULT_BURST_SIZE_IO_TX_WRITE, + APP_DEFAULT_IO_RX_LB_POS + ); +} + +#ifndef APP_ARG_RX_MAX_CHARS +#define APP_ARG_RX_MAX_CHARS 4096 +#endif + +#ifndef APP_ARG_RX_MAX_TUPLES +#define APP_ARG_RX_MAX_TUPLES 128 +#endif + +static int +str_to_unsigned_array( + const char *s, size_t sbuflen, + char separator, + unsigned num_vals, + unsigned *vals) +{ + char str[sbuflen+1]; + char *splits[num_vals]; + char *endptr = NULL; + int i, num_splits = 0; + + /* copy s so we don't modify original string */ + rte_snprintf(str, sizeof(str), "%s", s); + num_splits = rte_strsplit(str, sizeof(str), splits, num_vals, separator); + + errno = 0; + for (i = 0; i < num_splits; i++) { + vals[i] = strtoul(splits[i], &endptr, 0); + if (errno != 0 || *endptr != '\0') + return -1; + } + + return num_splits; +} + +static int +str_to_unsigned_vals( + const char *s, + size_t sbuflen, + char separator, + unsigned num_vals, ...) +{ + unsigned i, vals[num_vals]; + va_list ap; + + num_vals = str_to_unsigned_array(s, sbuflen, separator, num_vals, vals); + + va_start(ap, num_vals); + for (i = 0; i < num_vals; i++) { + unsigned *u = va_arg(ap, unsigned *); + *u = vals[i]; + } + va_end(ap); + return num_vals; +} + +static int +parse_arg_rx(const char *arg) +{ + const char *p0 = arg, *p = arg; + uint32_t n_tuples; + + if (strnlen(arg, APP_ARG_RX_MAX_CHARS + 1) == APP_ARG_RX_MAX_CHARS + 1) { + return -1; + } + + n_tuples = 0; + while ((p = strchr(p0,'(')) != NULL) { + struct app_lcore_params *lp; + uint32_t port, queue, lcore, i; + + p0 = strchr(p++, ')'); + if ((p0 == NULL) || + (str_to_unsigned_vals(p, p0 - p, ',', 3, &port, &queue, &lcore) != 3)) { + return -2; + } + + /* Enable port and queue for later initialization */ + if ((port >= APP_MAX_NIC_PORTS) || (queue >= APP_MAX_RX_QUEUES_PER_NIC_PORT)) { + return -3; + } + if (app.nic_rx_queue_mask[port][queue] != 0) { + return -4; + } + app.nic_rx_queue_mask[port][queue] = 1; + + /* Check and assign (port, queue) to I/O lcore */ + if (rte_lcore_is_enabled(lcore) == 0) { + return -5; + } + + if (lcore >= APP_MAX_LCORES) { + return -6; + } + lp = &app.lcore_params[lcore]; + if (lp->type == e_APP_LCORE_WORKER) { + return -7; + } + lp->type = e_APP_LCORE_IO; + for (i = 0; i < lp->io.rx.n_nic_queues; i ++) { + if ((lp->io.rx.nic_queues[i].port == port) && + (lp->io.rx.nic_queues[i].queue == queue)) { + return -8; + } + } + if (lp->io.rx.n_nic_queues >= APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE) { + return -9; + } + lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = (uint8_t) port; + lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].queue = (uint8_t) queue; + lp->io.rx.n_nic_queues ++; + + n_tuples ++; + if (n_tuples > APP_ARG_RX_MAX_TUPLES) { + return -10; + } + } + + if (n_tuples == 0) { + return -11; + } + + return 0; +} + +#ifndef APP_ARG_TX_MAX_CHARS +#define APP_ARG_TX_MAX_CHARS 4096 +#endif + +#ifndef APP_ARG_TX_MAX_TUPLES +#define APP_ARG_TX_MAX_TUPLES 128 +#endif + +static int +parse_arg_tx(const char *arg) +{ + const char *p0 = arg, *p = arg; + uint32_t n_tuples; + + if (strnlen(arg, APP_ARG_TX_MAX_CHARS + 1) == APP_ARG_TX_MAX_CHARS + 1) { + return -1; + } + + n_tuples = 0; + while ((p = strchr(p0,'(')) != NULL) { + struct app_lcore_params *lp; + uint32_t port, lcore, i; + + p0 = strchr(p++, ')'); + if ((p0 == NULL) || + (str_to_unsigned_vals(p, p0 - p, ',', 2, &port, &lcore) != 2)) { + return -2; + } + + /* Enable port and queue for later initialization */ + if (port >= APP_MAX_NIC_PORTS) { + return -3; + } + if (app.nic_tx_port_mask[port] != 0) { + return -4; + } + app.nic_tx_port_mask[port] = 1; + + /* Check and assign (port, queue) to I/O lcore */ + if (rte_lcore_is_enabled(lcore) == 0) { + return -5; + } + + if (lcore >= APP_MAX_LCORES) { + return -6; + } + lp = &app.lcore_params[lcore]; + if (lp->type == e_APP_LCORE_WORKER) { + return -7; + } + lp->type = e_APP_LCORE_IO; + for (i = 0; i < lp->io.tx.n_nic_ports; i ++) { + if (lp->io.tx.nic_ports[i] == port) { + return -8; + } + } + if (lp->io.tx.n_nic_ports >= APP_MAX_NIC_TX_PORTS_PER_IO_LCORE) { + return -9; + } + lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = (uint8_t) port; + lp->io.tx.n_nic_ports ++; + + n_tuples ++; + if (n_tuples > APP_ARG_TX_MAX_TUPLES) { + return -10; + } + } + + if (n_tuples == 0) { + return -11; + } + + return 0; +} + +#ifndef APP_ARG_W_MAX_CHARS +#define APP_ARG_W_MAX_CHARS 4096 +#endif + +#ifndef APP_ARG_W_MAX_TUPLES +#define APP_ARG_W_MAX_TUPLES APP_MAX_WORKER_LCORES +#endif + +static int +parse_arg_w(const char *arg) +{ + const char *p = arg; + uint32_t n_tuples; + + if (strnlen(arg, APP_ARG_W_MAX_CHARS + 1) == APP_ARG_W_MAX_CHARS + 1) { + return -1; + } + + n_tuples = 0; + while (*p != 0) { + struct app_lcore_params *lp; + uint32_t lcore; + + errno = 0; + lcore = strtoul(p, NULL, 0); + if ((errno != 0)) { + return -2; + } + + /* Check and enable worker lcore */ + if (rte_lcore_is_enabled(lcore) == 0) { + return -3; + } + + if (lcore >= APP_MAX_LCORES) { + return -4; + } + lp = &app.lcore_params[lcore]; + if (lp->type == e_APP_LCORE_IO) { + return -5; + } + lp->type = e_APP_LCORE_WORKER; + + n_tuples ++; + if (n_tuples > APP_ARG_W_MAX_TUPLES) { + return -6; + } + + p = strchr(p, ','); + if (p == NULL) { + break; + } + p ++; + } + + if (n_tuples == 0) { + return -7; + } + + if ((n_tuples & (n_tuples - 1)) != 0) { + return -8; + } + + return 0; +} + +#ifndef APP_ARG_LPM_MAX_CHARS +#define APP_ARG_LPM_MAX_CHARS 4096 +#endif + +static int +parse_arg_lpm(const char *arg) +{ + const char *p = arg, *p0; + + if (strnlen(arg, APP_ARG_LPM_MAX_CHARS + 1) == APP_ARG_TX_MAX_CHARS + 1) { + return -1; + } + + while (*p != 0) { + uint32_t ip_a, ip_b, ip_c, ip_d, ip, depth, if_out; + char *endptr; + + p0 = strchr(p, '/'); + if ((p0 == NULL) || + (str_to_unsigned_vals(p, p0 - p, '.', 4, &ip_a, &ip_b, &ip_c, &ip_d) != 4)) { + return -2; + } + + p = p0 + 1; + errno = 0; + depth = strtoul(p, &endptr, 0); + if (errno != 0 || *endptr != '=') { + return -3; + } + p = strchr(p, '>'); + if (p == NULL) { + return -4; + } + if_out = strtoul(++p, &endptr, 0); + if (errno != 0 || (*endptr != '\0' && *endptr != ';')) { + return -5; + } + + if ((ip_a >= 256) || (ip_b >= 256) || (ip_c >= 256) || (ip_d >= 256) || + (depth == 0) || (depth >= 32) || + (if_out >= APP_MAX_NIC_PORTS)) { + return -6; + } + ip = (ip_a << 24) | (ip_b << 16) | (ip_c << 8) | ip_d; + + if (app.n_lpm_rules >= APP_MAX_LPM_RULES) { + return -7; + } + app.lpm_rules[app.n_lpm_rules].ip = ip; + app.lpm_rules[app.n_lpm_rules].depth = (uint8_t) depth; + app.lpm_rules[app.n_lpm_rules].if_out = (uint8_t) if_out; + app.n_lpm_rules ++; + + p = strchr(p, ';'); + if (p == NULL) { + return -8; + } + p ++; + } + + if (app.n_lpm_rules == 0) { + return -9; + } + + return 0; +} + +static int +app_check_lpm_table(void) +{ + uint32_t rule; + + /* For each rule, check that the output I/F is enabled */ + for (rule = 0; rule < app.n_lpm_rules; rule ++) + { + uint32_t port = app.lpm_rules[rule].if_out; + + if (app.nic_tx_port_mask[port] == 0) { + return -1; + } + } + + return 0; +} + +static int +app_check_every_rx_port_is_tx_enabled(void) +{ + uint8_t port; + + for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { + if ((app_get_nic_rx_queues_per_port(port) > 0) && (app.nic_tx_port_mask[port] == 0)) { + return -1; + } + } + + return 0; +} + +#ifndef APP_ARG_RSZ_CHARS +#define APP_ARG_RSZ_CHARS 63 +#endif + +static int +parse_arg_rsz(const char *arg) +{ + if (strnlen(arg, APP_ARG_RSZ_CHARS + 1) == APP_ARG_RSZ_CHARS + 1) { + return -1; + } + + if (str_to_unsigned_vals(arg, APP_ARG_RSZ_CHARS, ',', 4, + &app.nic_rx_ring_size, + &app.ring_rx_size, + &app.ring_tx_size, + &app.nic_tx_ring_size) != 4) + return -2; + + + if ((app.nic_rx_ring_size == 0) || + (app.nic_tx_ring_size == 0) || + (app.ring_rx_size == 0) || + (app.ring_tx_size == 0)) { + return -3; + } + + return 0; +} + +#ifndef APP_ARG_BSZ_CHARS +#define APP_ARG_BSZ_CHARS 63 +#endif + +static int +parse_arg_bsz(const char *arg) +{ + const char *p = arg, *p0; + if (strnlen(arg, APP_ARG_BSZ_CHARS + 1) == APP_ARG_BSZ_CHARS + 1) { + return -1; + } + + p0 = strchr(p++, ')'); + if ((p0 == NULL) || + (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_io_rx_read, &app.burst_size_io_rx_write) != 2)) { + return -2; + } + + p = strchr(p0, '('); + if (p == NULL) { + return -3; + } + + p0 = strchr(p++, ')'); + if ((p0 == NULL) || + (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_worker_read, &app.burst_size_worker_write) != 2)) { + return -4; + } + + p = strchr(p0, '('); + if (p == NULL) { + return -5; + } + + p0 = strchr(p++, ')'); + if ((p0 == NULL) || + (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_io_tx_read, &app.burst_size_io_tx_write) != 2)) { + return -6; + } + + if ((app.burst_size_io_rx_read == 0) || + (app.burst_size_io_rx_write == 0) || + (app.burst_size_worker_read == 0) || + (app.burst_size_worker_write == 0) || + (app.burst_size_io_tx_read == 0) || + (app.burst_size_io_tx_write == 0)) { + return -7; + } + + if ((app.burst_size_io_rx_read > APP_MBUF_ARRAY_SIZE) || + (app.burst_size_io_rx_write > APP_MBUF_ARRAY_SIZE) || + (app.burst_size_worker_read > APP_MBUF_ARRAY_SIZE) || + (app.burst_size_worker_write > APP_MBUF_ARRAY_SIZE) || + ((2 * app.burst_size_io_tx_read) > APP_MBUF_ARRAY_SIZE) || + (app.burst_size_io_tx_write > APP_MBUF_ARRAY_SIZE)) { + return -8; + } + + return 0; +} + +#ifndef APP_ARG_NUMERICAL_SIZE_CHARS +#define APP_ARG_NUMERICAL_SIZE_CHARS 15 +#endif + +static int +parse_arg_pos_lb(const char *arg) +{ + uint32_t x; + char *endpt; + + if (strnlen(arg, APP_ARG_NUMERICAL_SIZE_CHARS + 1) == APP_ARG_NUMERICAL_SIZE_CHARS + 1) { + return -1; + } + + errno = 0; + x = strtoul(arg, &endpt, 10); + if (errno != 0 || endpt == arg || *endpt != '\0'){ + return -2; + } + + if (x >= 64) { + return -3; + } + + app.pos_lb = (uint8_t) x; + + return 0; +} + +/* Parse the argument given in the command line of the application */ +int +app_parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {"rx", 1, 0, 0}, + {"tx", 1, 0, 0}, + {"w", 1, 0, 0}, + {"lpm", 1, 0, 0}, + {"rsz", 1, 0, 0}, + {"bsz", 1, 0, 0}, + {"pos-lb", 1, 0, 0}, + {NULL, 0, 0, 0} + }; + uint32_t arg_w = 0; + uint32_t arg_rx = 0; + uint32_t arg_tx = 0; + uint32_t arg_lpm = 0; + uint32_t arg_rsz = 0; + uint32_t arg_bsz = 0; + uint32_t arg_pos_lb = 0; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* long options */ + case 0: + if (!strcmp(lgopts[option_index].name, "rx")) { + arg_rx = 1; + ret = parse_arg_rx(optarg); + if (ret) { + printf("Incorrect value for --rx argument (%d)\n", ret); + return -1; + } + } + if (!strcmp(lgopts[option_index].name, "tx")) { + arg_tx = 1; + ret = parse_arg_tx(optarg); + if (ret) { + printf("Incorrect value for --tx argument (%d)\n", ret); + return -1; + } + } + if (!strcmp(lgopts[option_index].name, "w")) { + arg_w = 1; + ret = parse_arg_w(optarg); + if (ret) { + printf("Incorrect value for --w argument (%d)\n", ret); + return -1; + } + } + if (!strcmp(lgopts[option_index].name, "lpm")) { + arg_lpm = 1; + ret = parse_arg_lpm(optarg); + if (ret) { + printf("Incorrect value for --lpm argument (%d)\n", ret); + return -1; + } + } + if (!strcmp(lgopts[option_index].name, "rsz")) { + arg_rsz = 1; + ret = parse_arg_rsz(optarg); + if (ret) { + printf("Incorrect value for --rsz argument (%d)\n", ret); + return -1; + } + } + if (!strcmp(lgopts[option_index].name, "bsz")) { + arg_bsz = 1; + ret = parse_arg_bsz(optarg); + if (ret) { + printf("Incorrect value for --bsz argument (%d)\n", ret); + return -1; + } + } + if (!strcmp(lgopts[option_index].name, "pos-lb")) { + arg_pos_lb = 1; + ret = parse_arg_pos_lb(optarg); + if (ret) { + printf("Incorrect value for --pos-lb argument (%d)\n", ret); + return -1; + } + } + break; + + default: + return -1; + } + } + + /* Check that all mandatory arguments are provided */ + if ((arg_rx == 0) || (arg_tx == 0) || (arg_w == 0) || (arg_lpm == 0)){ + printf("Not all mandatory arguments are present\n"); + return -1; + } + + /* Assign default values for the optional arguments not provided */ + if (arg_rsz == 0) { + app.nic_rx_ring_size = APP_DEFAULT_NIC_RX_RING_SIZE; + app.nic_tx_ring_size = APP_DEFAULT_NIC_TX_RING_SIZE; + app.ring_rx_size = APP_DEFAULT_RING_RX_SIZE; + app.ring_tx_size = APP_DEFAULT_RING_TX_SIZE; + } + + if (arg_bsz == 0) { + app.burst_size_io_rx_read = APP_DEFAULT_BURST_SIZE_IO_RX_READ; + app.burst_size_io_rx_write = APP_DEFAULT_BURST_SIZE_IO_RX_WRITE; + app.burst_size_io_tx_read = APP_DEFAULT_BURST_SIZE_IO_TX_READ; + app.burst_size_io_tx_write = APP_DEFAULT_BURST_SIZE_IO_TX_WRITE; + app.burst_size_worker_read = APP_DEFAULT_BURST_SIZE_WORKER_READ; + app.burst_size_worker_write = APP_DEFAULT_BURST_SIZE_WORKER_WRITE; + } + + if (arg_pos_lb == 0) { + app.pos_lb = APP_DEFAULT_IO_RX_LB_POS; + } + + /* Check cross-consistency of arguments */ + if ((ret = app_check_lpm_table()) < 0) { + printf("At least one LPM rule is inconsistent (%d)\n", ret); + return -1; + } + if (app_check_every_rx_port_is_tx_enabled() < 0) { + printf("On LPM lookup miss, packet is sent back on the input port.\n"); + printf("At least one RX port is not enabled for TX.\n"); + return -2; + } + + if (optind >= 0) + argv[optind - 1] = prgname; + + ret = optind - 1; + optind = 0; /* reset getopt lib */ + return ret; +} + +int +app_get_nic_rx_queues_per_port(uint8_t port) +{ + uint32_t i, count; + + if (port >= APP_MAX_NIC_PORTS) { + return -1; + } + + count = 0; + for (i = 0; i < APP_MAX_RX_QUEUES_PER_NIC_PORT; i ++) { + if (app.nic_rx_queue_mask[port][i] == 1) { + count ++; + } + } + + return count; +} + +int +app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out) +{ + uint32_t lcore; + + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_io *lp = &app.lcore_params[lcore].io; + uint32_t i; + + if (app.lcore_params[lcore].type != e_APP_LCORE_IO) { + continue; + } + + for (i = 0; i < lp->rx.n_nic_queues; i ++) { + if ((lp->rx.nic_queues[i].port == port) && + (lp->rx.nic_queues[i].queue == queue)) { + *lcore_out = lcore; + return 0; + } + } + } + + return -1; +} + +int +app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out) +{ + uint32_t lcore; + + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_io *lp = &app.lcore_params[lcore].io; + uint32_t i; + + if (app.lcore_params[lcore].type != e_APP_LCORE_IO) { + continue; + } + + for (i = 0; i < lp->tx.n_nic_ports; i ++) { + if (lp->tx.nic_ports[i] == port) { + *lcore_out = lcore; + return 0; + } + } + } + + return -1; +} + +int +app_is_socket_used(uint32_t socket) +{ + uint32_t lcore; + + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) { + continue; + } + + if (socket == rte_lcore_to_socket_id(lcore)) { + return 1; + } + } + + return 0; +} + +uint32_t +app_get_lcores_io_rx(void) +{ + uint32_t lcore, count; + + count = 0; + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io; + + if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || + (lp_io->rx.n_nic_queues == 0)) { + continue; + } + + count ++; + } + + return count; +} + +uint32_t +app_get_lcores_worker(void) +{ + uint32_t lcore, count; + + count = 0; + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { + continue; + } + + count ++; + } + + if (count > APP_MAX_WORKER_LCORES) { + rte_panic("Algorithmic error (too many worker lcores)\n"); + return 0; + } + + return count; +} + +void +app_print_params(void) +{ + uint32_t port, queue, lcore, rule, i, j; + + /* Print NIC RX configuration */ + printf("NIC RX ports: "); + for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { + uint32_t n_rx_queues = app_get_nic_rx_queues_per_port((uint8_t) port); + + if (n_rx_queues == 0) { + continue; + } + + printf("%u (", port); + for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { + if (app.nic_rx_queue_mask[port][queue] == 1) { + printf("%u ", queue); + } + } + printf(") "); + } + printf(";\n"); + + /* Print I/O lcore RX params */ + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_io *lp = &app.lcore_params[lcore].io; + + if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || + (lp->rx.n_nic_queues == 0)) { + continue; + } + + printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore)); + + printf("RX ports "); + for (i = 0; i < lp->rx.n_nic_queues; i ++) { + printf("(%u, %u) ", + (uint32_t) lp->rx.nic_queues[i].port, + (uint32_t) lp->rx.nic_queues[i].queue); + } + printf("; "); + + printf("Output rings "); + for (i = 0; i < lp->rx.n_rings; i ++) { + printf("%p ", lp->rx.rings[i]); + } + printf(";\n"); + } + + /* Print worker lcore RX params */ + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker; + + if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { + continue; + } + + printf("Worker lcore %u (socket %u) ID %u: ", + lcore, + rte_lcore_to_socket_id(lcore), + lp->worker_id); + + printf("Input rings "); + for (i = 0; i < lp->n_rings_in; i ++) { + printf("%p ", lp->rings_in[i]); + } + + printf(";\n"); + } + + printf("\n"); + + /* Print NIC TX configuration */ + printf("NIC TX ports: "); + for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { + if (app.nic_tx_port_mask[port] == 1) { + printf("%u ", port); + } + } + printf(";\n"); + + /* Print I/O TX lcore params */ + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_io *lp = &app.lcore_params[lcore].io; + uint32_t n_workers = app_get_lcores_worker(); + + if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || + (lp->tx.n_nic_ports == 0)) { + continue; + } + + printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore)); + + printf("Input rings per TX port "); + for (i = 0; i < lp->tx.n_nic_ports; i ++) { + port = lp->tx.nic_ports[i]; + + printf("%u (", port); + for (j = 0; j < n_workers; j ++) { + printf("%p ", lp->tx.rings[port][j]); + } + printf(") "); + + } + + printf(";\n"); + } + + /* Print worker lcore TX params */ + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker; + + if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { + continue; + } + + printf("Worker lcore %u (socket %u) ID %u: \n", + lcore, + rte_lcore_to_socket_id(lcore), + lp->worker_id); + + printf("Output rings per TX port "); + for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { + if (lp->rings_out[port] != NULL) { + printf("%u (%p) ", port, lp->rings_out[port]); + } + } + + printf(";\n"); + } + + /* Print LPM rules */ + printf("LPM rules: \n"); + for (rule = 0; rule < app.n_lpm_rules; rule ++) { + uint32_t ip = app.lpm_rules[rule].ip; + uint8_t depth = app.lpm_rules[rule].depth; + uint8_t if_out = app.lpm_rules[rule].if_out; + + printf("\t%u: %u.%u.%u.%u/%u => %u;\n", + rule, + (ip & 0xFF000000) >> 24, + (ip & 0x00FF0000) >> 16, + (ip & 0x0000FF00) >> 8, + ip & 0x000000FF, + (uint32_t) depth, + (uint32_t) if_out + ); + } + + /* Rings */ + printf("Ring sizes: NIC RX = %u; Worker in = %u; Worker out = %u; NIC TX = %u;\n", + app.nic_rx_ring_size, + app.ring_rx_size, + app.ring_tx_size, + app.nic_tx_ring_size); + + /* Bursts */ + printf("Burst sizes: I/O RX (rd = %u, wr = %u); Worker (rd = %u, wr = %u); I/O TX (rd = %u, wr = %u)\n", + app.burst_size_io_rx_read, + app.burst_size_io_rx_write, + app.burst_size_worker_read, + app.burst_size_worker_write, + app.burst_size_io_tx_read, + app.burst_size_io_tx_write); +} diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c new file mode 100644 index 0000000000..12e88870a7 --- /dev/null +++ b/examples/load_balancer/init.c @@ -0,0 +1,507 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +static struct rte_eth_conf port_conf = { + .rxmode = { + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 1, /**< IP checksum offload enabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IPV4, + }, + }, + .txmode = { + }, +}; + +static struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = APP_DEFAULT_NIC_RX_PTHRESH, + .hthresh = APP_DEFAULT_NIC_RX_HTHRESH, + .wthresh = APP_DEFAULT_NIC_RX_WTHRESH, + }, + .rx_free_thresh = APP_DEFAULT_NIC_RX_FREE_THRESH, +}; + +static struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = APP_DEFAULT_NIC_TX_PTHRESH, + .hthresh = APP_DEFAULT_NIC_TX_HTHRESH, + .wthresh = APP_DEFAULT_NIC_TX_WTHRESH, + }, + .tx_free_thresh = APP_DEFAULT_NIC_TX_FREE_THRESH, + .tx_rs_thresh = APP_DEFAULT_NIC_TX_RS_THRESH, +}; + +static void +app_assign_worker_ids(void) +{ + uint32_t lcore, worker_id; + + /* Assign ID for each worker */ + worker_id = 0; + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker; + + if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { + continue; + } + + lp_worker->worker_id = worker_id; + worker_id ++; + } +} + +static void +app_init_mbuf_pools(void) +{ + uint32_t socket, lcore; + + /* Init the buffer pools */ + for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) { + char name[32]; + if (app_is_socket_used(socket) == 0) { + continue; + } + + rte_snprintf(name, sizeof(name), "mbuf_pool_%u", socket); + printf("Creating the mbuf pool for socket %u ...\n", socket); + app.pools[socket] = rte_mempool_create( + name, + APP_DEFAULT_MEMPOOL_BUFFERS, + APP_DEFAULT_MBUF_SIZE, + APP_DEFAULT_MEMPOOL_CACHE_SIZE, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + socket, + 0); + if (app.pools[socket] == NULL) { + rte_panic("Cannot create mbuf pool on socket %u\n", socket); + } + } + + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) { + continue; + } + + socket = rte_lcore_to_socket_id(lcore); + app.lcore_params[lcore].pool = app.pools[socket]; + } +} + +static void +app_init_lpm_tables(void) +{ + uint32_t socket, lcore; + + /* Init the LPM tables */ + for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) { + char name[32]; + uint32_t rule; + + if (app_is_socket_used(socket) == 0) { + continue; + } + + rte_snprintf(name, sizeof(name), "lpm_table_%u", socket); + printf("Creating the LPM table for socket %u ...\n", socket); + app.lpm_tables[socket] = rte_lpm_create( + name, + socket, + APP_MAX_LPM_RULES, + RTE_LPM_MEMZONE); + if (app.lpm_tables[socket] == NULL) { + rte_panic("Unable to create LPM table on socket %u\n", socket); + } + + for (rule = 0; rule < app.n_lpm_rules; rule ++) { + int ret; + + ret = rte_lpm_add(app.lpm_tables[socket], + app.lpm_rules[rule].ip, + app.lpm_rules[rule].depth, + app.lpm_rules[rule].if_out); + + if (ret < 0) { + rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n", + rule, app.lpm_rules[rule].ip, + (uint32_t) app.lpm_rules[rule].depth, + (uint32_t) app.lpm_rules[rule].if_out, + socket, + ret); + } + } + + } + + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { + continue; + } + + socket = rte_lcore_to_socket_id(lcore); + app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket]; + } +} + +static void +app_init_rings_rx(void) +{ + uint32_t lcore; + + /* Initialize the rings for the RX side */ + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io; + uint32_t socket_io, lcore_worker; + + if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || + (lp_io->rx.n_nic_queues == 0)) { + continue; + } + + socket_io = rte_lcore_to_socket_id(lcore); + + for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) { + char name[32]; + struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore_worker].worker; + struct rte_ring *ring = NULL; + + if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER) { + continue; + } + + printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n", + lcore, + socket_io, + lcore_worker); + rte_snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u", + socket_io, + lcore, + lcore_worker); + ring = rte_ring_create( + name, + app.ring_rx_size, + socket_io, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (ring == NULL) { + rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n", + lcore, + lcore_worker); + } + + lp_io->rx.rings[lp_io->rx.n_rings] = ring; + lp_io->rx.n_rings ++; + + lp_worker->rings_in[lp_worker->n_rings_in] = ring; + lp_worker->n_rings_in ++; + } + } + + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io; + + if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || + (lp_io->rx.n_nic_queues == 0)) { + continue; + } + + if (lp_io->rx.n_rings != app_get_lcores_worker()) { + rte_panic("Algorithmic error (I/O RX rings)\n"); + } + } + + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker; + + if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { + continue; + } + + if (lp_worker->n_rings_in != app_get_lcores_io_rx()) { + rte_panic("Algorithmic error (worker input rings)\n"); + } + } +} + +static void +app_init_rings_tx(void) +{ + uint32_t lcore; + + /* Initialize the rings for the TX side */ + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker; + uint32_t port; + + if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { + continue; + } + + for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { + char name[32]; + struct app_lcore_params_io *lp_io = NULL; + struct rte_ring *ring; + uint32_t socket_io, lcore_io; + + if (app.nic_tx_port_mask[port] == 0) { + continue; + } + + if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) { + rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n", + port); + } + + lp_io = &app.lcore_params[lcore_io].io; + socket_io = rte_lcore_to_socket_id(lcore_io); + + printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n", + lcore, port, lcore_io, socket_io); + rte_snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port); + ring = rte_ring_create( + name, + app.ring_tx_size, + socket_io, + RING_F_SP_ENQ | RING_F_SC_DEQ); + if (ring == NULL) { + rte_panic("Cannot create ring to connect worker core %u with TX port %u\n", + lcore, + port); + } + + lp_worker->rings_out[port] = ring; + lp_io->tx.rings[port][lp_worker->worker_id] = ring; + } + } + + for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { + struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io; + uint32_t i; + + if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || + (lp_io->tx.n_nic_ports == 0)) { + continue; + } + + for (i = 0; i < lp_io->tx.n_nic_ports; i ++){ + uint32_t port, j; + + port = lp_io->tx.nic_ports[i]; + for (j = 0; j < app_get_lcores_worker(); j ++) { + if (lp_io->tx.rings[port][j] == NULL) { + rte_panic("Algorithmic error (I/O TX rings)\n"); + } + } + } + } +} + +static void +app_init_nics(void) +{ + uint32_t socket, lcore; + uint8_t port, queue; + int ret; + + /* Init driver */ + printf("Initializing the PMD driver ...\n"); +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) { + rte_panic("Cannot init IGB PMD\n"); + } +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) { + rte_panic("Cannot init IXGBE PMD\n"); + } +#endif + if (rte_eal_pci_probe() < 0) { + rte_panic("Cannot probe PCI\n"); + } + + /* Init NIC ports and queues, then start the ports */ + for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { + struct rte_eth_link link; + struct rte_mempool *pool; + uint32_t n_rx_queues, n_tx_queues; + + n_rx_queues = app_get_nic_rx_queues_per_port(port); + n_tx_queues = app.nic_tx_port_mask[port]; + + if ((n_rx_queues == 0) && (n_tx_queues == 0)) { + continue; + } + + /* Init port */ + printf("Initializing NIC port %u ...\n", (uint32_t) port); + ret = rte_eth_dev_configure( + port, + (uint8_t) n_rx_queues, + (uint8_t) n_tx_queues, + &port_conf); + if (ret < 0) { + rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret); + } + rte_eth_promiscuous_enable(port); + + /* Init RX queues */ + for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { + if (app.nic_rx_queue_mask[port][queue] == 0) { + continue; + } + + app_get_lcore_for_nic_rx(port, queue, &lcore); + socket = rte_lcore_to_socket_id(lcore); + pool = app.lcore_params[lcore].pool; + + printf("Initializing NIC port %u RX queue %u ...\n", + (uint32_t) port, + (uint32_t) queue); + ret = rte_eth_rx_queue_setup( + port, + queue, + (uint16_t) app.nic_rx_ring_size, + socket, + &rx_conf, + pool); + if (ret < 0) { + rte_panic("Cannot init RX queue %u for port %u (%d)\n", + (uint32_t) queue, + (uint32_t) port, + ret); + } + } + + /* Init TX queues */ + if (app.nic_tx_port_mask[port] == 1) { + app_get_lcore_for_nic_tx(port, &lcore); + socket = rte_lcore_to_socket_id(lcore); + printf("Initializing NIC port %u TX queue 0 ...\n", + (uint32_t) port); + ret = rte_eth_tx_queue_setup( + port, + 0, + (uint16_t) app.nic_tx_ring_size, + socket, + &tx_conf); + if (ret < 0) { + rte_panic("Cannot init TX queue 0 for port %d (%d)\n", + port, + ret); + } + } + + /* Start port */ + ret = rte_eth_dev_start(port); + if (ret < 0) { + rte_panic("Cannot start port %d (%d)\n", port, ret); + } + + /* Get link status */ + rte_eth_link_get(port, &link); + if (link.link_status) { + printf("Port %u is UP (%u Mbps)\n", + (uint32_t) port, + (unsigned) link.link_speed); + } else { + printf("Port %u is DOWN\n", + (uint32_t) port); + } + } +} + +void +app_init(void) +{ + app_assign_worker_ids(); + app_init_mbuf_pools(); + app_init_lpm_tables(); + app_init_rings_rx(); + app_init_rings_tx(); + app_init_nics(); + + printf("Initialization completed.\n"); +} diff --git a/examples/load_balancer/main.c b/examples/load_balancer/main.c new file mode 100644 index 0000000000..108211c89d --- /dev/null +++ b/examples/load_balancer/main.c @@ -0,0 +1,112 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +int +MAIN(int argc, char **argv) +{ + uint32_t lcore; + int ret; + + /* Init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + return -1; + argc -= ret; + argv += ret; + + /* Parse application arguments (after the EAL ones) */ + ret = app_parse_args(argc, argv); + if (ret < 0) { + app_print_usage(); + return -1; + } + + /* Init */ + app_init(); + app_print_params(); + + /* Launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore) { + if (rte_eal_wait_lcore(lcore) < 0) { + return -1; + } + } + + return 0; +} diff --git a/examples/load_balancer/main.h b/examples/load_balancer/main.h new file mode 100644 index 0000000000..650f7501f9 --- /dev/null +++ b/examples/load_balancer/main.h @@ -0,0 +1,377 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +/* Logical cores */ +#ifndef APP_MAX_SOCKETS +#define APP_MAX_SOCKETS 2 +#endif + +#ifndef APP_MAX_LCORES +#define APP_MAX_LCORES RTE_MAX_LCORE +#endif + +#ifndef APP_MAX_NIC_PORTS +#define APP_MAX_NIC_PORTS RTE_MAX_ETHPORTS +#endif + +#ifndef APP_MAX_RX_QUEUES_PER_NIC_PORT +#define APP_MAX_RX_QUEUES_PER_NIC_PORT 128 +#endif + +#ifndef APP_MAX_TX_QUEUES_PER_NIC_PORT +#define APP_MAX_TX_QUEUES_PER_NIC_PORT 128 +#endif + +#ifndef APP_MAX_IO_LCORES +#define APP_MAX_IO_LCORES 16 +#endif +#if (APP_MAX_IO_LCORES > APP_MAX_LCORES) +#error "APP_MAX_IO_LCORES is too big" +#endif + +#ifndef APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE +#define APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE 16 +#endif + +#ifndef APP_MAX_NIC_TX_PORTS_PER_IO_LCORE +#define APP_MAX_NIC_TX_PORTS_PER_IO_LCORE 16 +#endif +#if (APP_MAX_NIC_TX_PORTS_PER_IO_LCORE > APP_MAX_NIC_PORTS) +#error "APP_MAX_NIC_TX_PORTS_PER_IO_LCORE too big" +#endif + +#ifndef APP_MAX_WORKER_LCORES +#define APP_MAX_WORKER_LCORES 16 +#endif +#if (APP_MAX_WORKER_LCORES > APP_MAX_LCORES) +#error "APP_MAX_WORKER_LCORES is too big" +#endif + + +/* Mempools */ +#ifndef APP_DEFAULT_MBUF_SIZE +#define APP_DEFAULT_MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#endif + +#ifndef APP_DEFAULT_MEMPOOL_BUFFERS +#define APP_DEFAULT_MEMPOOL_BUFFERS 8192 +#endif + +#ifndef APP_DEFAULT_MEMPOOL_CACHE_SIZE +#define APP_DEFAULT_MEMPOOL_CACHE_SIZE 256 +#endif + +/* LPM Tables */ +#ifndef APP_MAX_LPM_RULES +#define APP_MAX_LPM_RULES 1024 +#endif + +/* NIC RX */ +#ifndef APP_DEFAULT_NIC_RX_RING_SIZE +#define APP_DEFAULT_NIC_RX_RING_SIZE 1024 +#endif + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#ifndef APP_DEFAULT_NIC_RX_PTHRESH +#define APP_DEFAULT_NIC_RX_PTHRESH 8 +#endif + +#ifndef APP_DEFAULT_NIC_RX_HTHRESH +#define APP_DEFAULT_NIC_RX_HTHRESH 8 +#endif + +#ifndef APP_DEFAULT_NIC_RX_WTHRESH +#define APP_DEFAULT_NIC_RX_WTHRESH 4 +#endif + +#ifndef APP_DEFAULT_NIC_RX_FREE_THRESH +#define APP_DEFAULT_NIC_RX_FREE_THRESH 64 +#endif + +/* NIC TX */ +#ifndef APP_DEFAULT_NIC_TX_RING_SIZE +#define APP_DEFAULT_NIC_TX_RING_SIZE 1024 +#endif + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#ifndef APP_DEFAULT_NIC_TX_PTHRESH +#define APP_DEFAULT_NIC_TX_PTHRESH 36 +#endif + +#ifndef APP_DEFAULT_NIC_TX_HTHRESH +#define APP_DEFAULT_NIC_TX_HTHRESH 0 +#endif + +#ifndef APP_DEFAULT_NIC_TX_WTHRESH +#define APP_DEFAULT_NIC_TX_WTHRESH 0 +#endif + +#ifndef APP_DEFAULT_NIC_TX_FREE_THRESH +#define APP_DEFAULT_NIC_TX_FREE_THRESH 0 +#endif + +#ifndef APP_DEFAULT_NIC_TX_RS_THRESH +#define APP_DEFAULT_NIC_TX_RS_THRESH 0 +#endif + +/* Software Rings */ +#ifndef APP_DEFAULT_RING_RX_SIZE +#define APP_DEFAULT_RING_RX_SIZE 1024 +#endif + +#ifndef APP_DEFAULT_RING_TX_SIZE +#define APP_DEFAULT_RING_TX_SIZE 1024 +#endif + +/* Bursts */ +#ifndef APP_MBUF_ARRAY_SIZE +#define APP_MBUF_ARRAY_SIZE 512 +#endif + +#ifndef APP_DEFAULT_BURST_SIZE_IO_RX_READ +#define APP_DEFAULT_BURST_SIZE_IO_RX_READ 144 +#endif +#if (APP_DEFAULT_BURST_SIZE_IO_RX_READ > APP_MBUF_ARRAY_SIZE) +#error "APP_DEFAULT_BURST_SIZE_IO_RX_READ is too big" +#endif + +#ifndef APP_DEFAULT_BURST_SIZE_IO_RX_WRITE +#define APP_DEFAULT_BURST_SIZE_IO_RX_WRITE 144 +#endif +#if (APP_DEFAULT_BURST_SIZE_IO_RX_WRITE > APP_MBUF_ARRAY_SIZE) +#error "APP_DEFAULT_BURST_SIZE_IO_RX_WRITE is too big" +#endif + +#ifndef APP_DEFAULT_BURST_SIZE_IO_TX_READ +#define APP_DEFAULT_BURST_SIZE_IO_TX_READ 144 +#endif +#if (APP_DEFAULT_BURST_SIZE_IO_TX_READ > APP_MBUF_ARRAY_SIZE) +#error "APP_DEFAULT_BURST_SIZE_IO_TX_READ is too big" +#endif + +#ifndef APP_DEFAULT_BURST_SIZE_IO_TX_WRITE +#define APP_DEFAULT_BURST_SIZE_IO_TX_WRITE 144 +#endif +#if (APP_DEFAULT_BURST_SIZE_IO_TX_WRITE > APP_MBUF_ARRAY_SIZE) +#error "APP_DEFAULT_BURST_SIZE_IO_TX_WRITE is too big" +#endif + +#ifndef APP_DEFAULT_BURST_SIZE_WORKER_READ +#define APP_DEFAULT_BURST_SIZE_WORKER_READ 144 +#endif +#if ((2 * APP_DEFAULT_BURST_SIZE_WORKER_READ) > APP_MBUF_ARRAY_SIZE) +#error "APP_DEFAULT_BURST_SIZE_WORKER_READ is too big" +#endif + +#ifndef APP_DEFAULT_BURST_SIZE_WORKER_WRITE +#define APP_DEFAULT_BURST_SIZE_WORKER_WRITE 144 +#endif +#if (APP_DEFAULT_BURST_SIZE_WORKER_WRITE > APP_MBUF_ARRAY_SIZE) +#error "APP_DEFAULT_BURST_SIZE_WORKER_WRITE is too big" +#endif + +/* Load balancing logic */ +#ifndef APP_DEFAULT_IO_RX_LB_POS +#define APP_DEFAULT_IO_RX_LB_POS 29 +#endif +#if (APP_DEFAULT_IO_RX_LB_POS >= 64) +#error "APP_DEFAULT_IO_RX_LB_POS is too big" +#endif + +struct app_mbuf_array { + struct rte_mbuf *array[APP_MBUF_ARRAY_SIZE]; + uint32_t n_mbufs; +}; + +enum app_lcore_type { + e_APP_LCORE_DISABLED = 0, + e_APP_LCORE_IO, + e_APP_LCORE_WORKER +}; + +struct app_lcore_params_io { + /* I/O RX */ + struct { + /* NIC */ + struct { + uint8_t port; + uint8_t queue; + } nic_queues[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE]; + uint32_t n_nic_queues; + + /* Rings */ + struct rte_ring *rings[APP_MAX_WORKER_LCORES]; + uint32_t n_rings; + + /* Internal buffers */ + struct app_mbuf_array mbuf_in; + struct app_mbuf_array mbuf_out[APP_MAX_WORKER_LCORES]; + uint8_t mbuf_out_flush[APP_MAX_WORKER_LCORES]; + + /* Stats */ + uint32_t nic_queues_count[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE]; + uint32_t nic_queues_iters[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE]; + uint32_t rings_count[APP_MAX_WORKER_LCORES]; + uint32_t rings_iters[APP_MAX_WORKER_LCORES]; + } rx; + + /* I/O TX */ + struct { + /* Rings */ + struct rte_ring *rings[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES]; + + /* NIC */ + uint8_t nic_ports[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE]; + uint32_t n_nic_ports; + + /* Internal buffers */ + struct app_mbuf_array mbuf_out[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE]; + uint8_t mbuf_out_flush[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE]; + + /* Stats */ + uint32_t rings_count[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES]; + uint32_t rings_iters[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES]; + uint32_t nic_ports_count[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE]; + uint32_t nic_ports_iters[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE]; + } tx; +}; + +struct app_lcore_params_worker { + /* Rings */ + struct rte_ring *rings_in[APP_MAX_IO_LCORES]; + uint32_t n_rings_in; + struct rte_ring *rings_out[APP_MAX_NIC_PORTS]; + + /* LPM table */ + struct rte_lpm *lpm_table; + uint32_t worker_id; + + /* Internal buffers */ + struct app_mbuf_array mbuf_in; + struct app_mbuf_array mbuf_out[APP_MAX_NIC_PORTS]; + uint8_t mbuf_out_flush[APP_MAX_NIC_PORTS]; + + /* Stats */ + uint32_t rings_in_count[APP_MAX_IO_LCORES]; + uint32_t rings_in_iters[APP_MAX_IO_LCORES]; + uint32_t rings_out_count[APP_MAX_NIC_PORTS]; + uint32_t rings_out_iters[APP_MAX_NIC_PORTS]; +}; + +struct app_lcore_params { + union { + struct app_lcore_params_io io; + struct app_lcore_params_worker worker; + }; + enum app_lcore_type type; + struct rte_mempool *pool; +} __rte_cache_aligned; + +struct app_lpm_rule { + uint32_t ip; + uint8_t depth; + uint8_t if_out; +}; + +struct app_params { + /* lcore */ + struct app_lcore_params lcore_params[APP_MAX_LCORES]; + + /* NIC */ + uint8_t nic_rx_queue_mask[APP_MAX_NIC_PORTS][APP_MAX_RX_QUEUES_PER_NIC_PORT]; + uint8_t nic_tx_port_mask[APP_MAX_NIC_PORTS]; + + /* mbuf pools */ + struct rte_mempool *pools[APP_MAX_SOCKETS]; + + /* LPM tables */ + struct rte_lpm *lpm_tables[APP_MAX_SOCKETS]; + struct app_lpm_rule lpm_rules[APP_MAX_LPM_RULES]; + uint32_t n_lpm_rules; + + /* rings */ + uint32_t nic_rx_ring_size; + uint32_t nic_tx_ring_size; + uint32_t ring_rx_size; + uint32_t ring_tx_size; + + /* burst size */ + uint32_t burst_size_io_rx_read; + uint32_t burst_size_io_rx_write; + uint32_t burst_size_io_tx_read; + uint32_t burst_size_io_tx_write; + uint32_t burst_size_worker_read; + uint32_t burst_size_worker_write; + + /* load balancing */ + uint8_t pos_lb; +} __rte_cache_aligned; + +extern struct app_params app; + +int app_parse_args(int argc, char **argv); +void app_print_usage(void); +void app_init(void); +int app_lcore_main_loop(void *arg); + +int app_get_nic_rx_queues_per_port(uint8_t port); +int app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out); +int app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out); +int app_is_socket_used(uint32_t socket); +uint32_t app_get_lcores_io_rx(void); +uint32_t app_get_lcores_worker(void); +void app_print_params(void); + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c new file mode 100644 index 0000000000..d349df3fa2 --- /dev/null +++ b/examples/load_balancer/runtime.c @@ -0,0 +1,669 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#ifndef APP_LCORE_IO_FLUSH +#define APP_LCORE_IO_FLUSH 1000000 +#endif + +#ifndef APP_LCORE_WORKER_FLUSH +#define APP_LCORE_WORKER_FLUSH 1000000 +#endif + +#ifndef APP_STATS +#define APP_STATS 1000000 +#endif + +#define APP_IO_RX_DROP_ALL_PACKETS 0 +#define APP_WORKER_DROP_ALL_PACKETS 0 +#define APP_IO_TX_DROP_ALL_PACKETS 0 + +#ifndef APP_IO_RX_PREFETCH_ENABLE +#define APP_IO_RX_PREFETCH_ENABLE 1 +#endif + +#ifndef APP_WORKER_PREFETCH_ENABLE +#define APP_WORKER_PREFETCH_ENABLE 1 +#endif + +#ifndef APP_IO_TX_PREFETCH_ENABLE +#define APP_IO_TX_PREFETCH_ENABLE 1 +#endif + +#if APP_IO_RX_PREFETCH_ENABLE +#define APP_IO_RX_PREFETCH0(p) rte_prefetch0(p) +#define APP_IO_RX_PREFETCH1(p) rte_prefetch1(p) +#else +#define APP_IO_RX_PREFETCH0(p) +#define APP_IO_RX_PREFETCH1(p) +#endif + +#if APP_WORKER_PREFETCH_ENABLE +#define APP_WORKER_PREFETCH0(p) rte_prefetch0(p) +#define APP_WORKER_PREFETCH1(p) rte_prefetch1(p) +#else +#define APP_WORKER_PREFETCH0(p) +#define APP_WORKER_PREFETCH1(p) +#endif + +#if APP_IO_TX_PREFETCH_ENABLE +#define APP_IO_TX_PREFETCH0(p) rte_prefetch0(p) +#define APP_IO_TX_PREFETCH1(p) rte_prefetch1(p) +#else +#define APP_IO_TX_PREFETCH0(p) +#define APP_IO_TX_PREFETCH1(p) +#endif + +static inline void +app_lcore_io_rx_buffer_to_send ( + struct app_lcore_params_io *lp, + uint32_t worker, + struct rte_mbuf *mbuf, + uint32_t bsz) +{ + uint32_t pos; + int ret; + + pos = lp->rx.mbuf_out[worker].n_mbufs; + lp->rx.mbuf_out[worker].array[pos ++] = mbuf; + if (likely(pos < bsz)) { + lp->rx.mbuf_out[worker].n_mbufs = pos; + return; + } + + ret = rte_ring_sp_enqueue_bulk( + lp->rx.rings[worker], + (void **) lp->rx.mbuf_out[worker].array, + bsz); + + if (unlikely(ret == -ENOBUFS)) { + uint32_t k; + for (k = 0; k < bsz; k ++) { + struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k]; + rte_pktmbuf_free(m); + } + } + + lp->rx.mbuf_out[worker].n_mbufs = 0; + lp->rx.mbuf_out_flush[worker] = 0; + +#if APP_STATS + lp->rx.rings_iters[worker] ++; + if (likely(ret == 0)) { + lp->rx.rings_count[worker] ++; + } + if (unlikely(lp->rx.rings_iters[worker] == APP_STATS)) { + uint32_t lcore = rte_lcore_id(); + + printf("\tI/O RX %u out (worker %u): enq success rate = %.2f\n", + lcore, + worker, + ((double) lp->rx.rings_count[worker]) / ((double) lp->rx.rings_iters[worker])); + lp->rx.rings_iters[worker] = 0; + lp->rx.rings_count[worker] = 0; + } +#endif +} + +static inline void +app_lcore_io_rx( + struct app_lcore_params_io *lp, + uint32_t n_workers, + uint32_t bsz_rd, + uint32_t bsz_wr, + uint8_t pos_lb) +{ + struct rte_mbuf *mbuf_1_0, *mbuf_1_1, *mbuf_2_0, *mbuf_2_1; + uint8_t *data_1_0, *data_1_1; + uint32_t i; + + for (i = 0; i < lp->rx.n_nic_queues; i ++) { + uint8_t port = lp->rx.nic_queues[i].port; + uint8_t queue = lp->rx.nic_queues[i].queue; + uint32_t n_mbufs, j; + + n_mbufs = rte_eth_rx_burst( + port, + queue, + lp->rx.mbuf_in.array, + (uint16_t) bsz_rd); + + if (unlikely(n_mbufs == 0)) { + continue; + } + +#if APP_STATS + lp->rx.nic_queues_iters[i] ++; + lp->rx.nic_queues_count[i] += n_mbufs; + if (unlikely(lp->rx.nic_queues_iters[i] == APP_STATS)) { + struct rte_eth_stats stats; + uint32_t lcore = rte_lcore_id(); + + rte_eth_stats_get(port, &stats); + + printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n", + lcore, + (uint32_t) port, + (double) stats.ierrors / (double) (stats.ierrors + stats.ipackets), + ((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i])); + lp->rx.nic_queues_iters[i] = 0; + lp->rx.nic_queues_count[i] = 0; + } +#endif + +#if APP_IO_RX_DROP_ALL_PACKETS + for (j = 0; j < n_mbufs; j ++) { + struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j]; + rte_pktmbuf_free(pkt); + } + + continue; +#endif + + mbuf_1_0 = lp->rx.mbuf_in.array[0]; + mbuf_1_1 = lp->rx.mbuf_in.array[1]; + data_1_0 = rte_pktmbuf_mtod(mbuf_1_0, uint8_t *); + if (likely(n_mbufs > 1)) { + data_1_1 = rte_pktmbuf_mtod(mbuf_1_1, uint8_t *); + } + + mbuf_2_0 = lp->rx.mbuf_in.array[2]; + mbuf_2_1 = lp->rx.mbuf_in.array[3]; + APP_IO_RX_PREFETCH0(mbuf_2_0); + APP_IO_RX_PREFETCH0(mbuf_2_1); + + for (j = 0; j + 3 < n_mbufs; j += 2) { + struct rte_mbuf *mbuf_0_0, *mbuf_0_1; + uint8_t *data_0_0, *data_0_1; + uint32_t worker_0, worker_1; + + mbuf_0_0 = mbuf_1_0; + mbuf_0_1 = mbuf_1_1; + data_0_0 = data_1_0; + data_0_1 = data_1_1; + + mbuf_1_0 = mbuf_2_0; + mbuf_1_1 = mbuf_2_1; + data_1_0 = rte_pktmbuf_mtod(mbuf_2_0, uint8_t *); + data_1_1 = rte_pktmbuf_mtod(mbuf_2_1, uint8_t *); + APP_IO_RX_PREFETCH0(data_1_0); + APP_IO_RX_PREFETCH0(data_1_1); + + mbuf_2_0 = lp->rx.mbuf_in.array[j+4]; + mbuf_2_1 = lp->rx.mbuf_in.array[j+5]; + APP_IO_RX_PREFETCH0(mbuf_2_0); + APP_IO_RX_PREFETCH0(mbuf_2_1); + + worker_0 = data_0_0[pos_lb] & (n_workers - 1); + worker_1 = data_0_1[pos_lb] & (n_workers - 1); + + app_lcore_io_rx_buffer_to_send(lp, worker_0, mbuf_0_0, bsz_wr); + app_lcore_io_rx_buffer_to_send(lp, worker_1, mbuf_0_1, bsz_wr); + } + + /* Handle the last 1, 2 (when n_mbufs is even) or 3 (when n_mbufs is odd) packets */ + for ( ; j < n_mbufs; j += 1) { + struct rte_mbuf *mbuf; + uint8_t *data; + uint32_t worker; + + mbuf = mbuf_1_0; + mbuf_1_0 = mbuf_1_1; + mbuf_1_1 = mbuf_2_0; + mbuf_2_0 = mbuf_2_1; + + data = rte_pktmbuf_mtod(mbuf, uint8_t *); + + APP_IO_RX_PREFETCH0(mbuf_1_0); + + worker = data[pos_lb] & (n_workers - 1); + + app_lcore_io_rx_buffer_to_send(lp, worker, mbuf, bsz_wr); + } + } +} + +static inline void +app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers) +{ + uint32_t worker; + + for (worker = 0; worker < n_workers; worker ++) { + int ret; + + if (likely((lp->rx.mbuf_out_flush[worker] == 0) || + (lp->rx.mbuf_out[worker].n_mbufs == 0))) { + lp->rx.mbuf_out_flush[worker] = 1; + continue; + } + + ret = rte_ring_sp_enqueue_bulk( + lp->rx.rings[worker], + (void **) lp->rx.mbuf_out[worker].array, + lp->rx.mbuf_out[worker].n_mbufs); + + if (unlikely(ret < 0)) { + uint32_t k; + for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) { + struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k]; + rte_pktmbuf_free(pkt_to_free); + } + } + + lp->rx.mbuf_out[worker].n_mbufs = 0; + lp->rx.mbuf_out_flush[worker] = 1; + } +} + +static inline void +app_lcore_io_tx( + struct app_lcore_params_io *lp, + uint32_t n_workers, + uint32_t bsz_rd, + uint32_t bsz_wr) +{ + uint32_t worker; + + for (worker = 0; worker < n_workers; worker ++) { + uint32_t i; + + for (i = 0; i < lp->tx.n_nic_ports; i ++) { + uint8_t port = lp->tx.nic_ports[i]; + struct rte_ring *ring = lp->tx.rings[port][worker]; + uint32_t n_mbufs, n_pkts; + int ret; + + n_mbufs = lp->tx.mbuf_out[port].n_mbufs; + ret = rte_ring_sc_dequeue_bulk( + ring, + (void **) &lp->tx.mbuf_out[port].array[n_mbufs], + bsz_rd); + + if (unlikely(ret == -ENOENT)) { + continue; + } + + n_mbufs += bsz_rd; + +#if APP_IO_TX_DROP_ALL_PACKETS + { + uint32_t j; + APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[0]); + APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[1]); + + for (j = 0; j < n_mbufs; j ++) { + if (likely(j < n_mbufs - 2)) { + APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[j + 2]); + } + + rte_pktmbuf_free(lp->tx.mbuf_out[port].array[j]); + } + + lp->tx.mbuf_out[port].n_mbufs = 0; + + continue; + } +#endif + + if (unlikely(n_mbufs < bsz_wr)) { + lp->tx.mbuf_out[port].n_mbufs = n_mbufs; + continue; + } + + n_pkts = rte_eth_tx_burst( + port, + 0, + lp->tx.mbuf_out[port].array, + (uint16_t) n_mbufs); + +#if APP_STATS + lp->tx.nic_ports_iters[port] ++; + lp->tx.nic_ports_count[port] += n_pkts; + if (unlikely(lp->tx.nic_ports_iters[port] == APP_STATS)) { + uint32_t lcore = rte_lcore_id(); + + printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n", + lcore, + (uint32_t) port, + ((double) lp->tx.nic_ports_count[port]) / ((double) lp->tx.nic_ports_iters[port])); + lp->tx.nic_ports_iters[port] = 0; + lp->tx.nic_ports_count[port] = 0; + } +#endif + + if (unlikely(n_pkts < n_mbufs)) { + uint32_t k; + for (k = n_pkts; k < n_mbufs; k ++) { + struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k]; + rte_pktmbuf_free(pkt_to_free); + } + } + lp->tx.mbuf_out[port].n_mbufs = 0; + lp->tx.mbuf_out_flush[port] = 0; + } + } +} + +static inline void +app_lcore_io_tx_flush(struct app_lcore_params_io *lp) +{ + uint8_t port; + + for (port = 0; port < lp->tx.n_nic_ports; port ++) { + uint32_t n_pkts; + + if (likely((lp->tx.mbuf_out_flush[port] == 0) || + (lp->tx.mbuf_out[port].n_mbufs == 0))) { + lp->tx.mbuf_out_flush[port] = 1; + continue; + } + + n_pkts = rte_eth_tx_burst( + port, + 0, + lp->tx.mbuf_out[port].array, + (uint16_t) lp->tx.mbuf_out[port].n_mbufs); + + if (unlikely(n_pkts < lp->tx.mbuf_out[port].n_mbufs)) { + uint32_t k; + for (k = n_pkts; k < lp->tx.mbuf_out[port].n_mbufs; k ++) { + struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k]; + rte_pktmbuf_free(pkt_to_free); + } + } + + lp->tx.mbuf_out[port].n_mbufs = 0; + lp->tx.mbuf_out_flush[port] = 1; + } +} + +static void +app_lcore_main_loop_io(void) +{ + uint32_t lcore = rte_lcore_id(); + struct app_lcore_params_io *lp = &app.lcore_params[lcore].io; + uint32_t n_workers = app_get_lcores_worker(); + uint64_t i = 0; + + uint32_t bsz_rx_rd = app.burst_size_io_rx_read; + uint32_t bsz_rx_wr = app.burst_size_io_rx_write; + uint32_t bsz_tx_rd = app.burst_size_io_tx_read; + uint32_t bsz_tx_wr = app.burst_size_io_tx_write; + + uint8_t pos_lb = app.pos_lb; + + for ( ; ; ) { + if (APP_LCORE_IO_FLUSH && (unlikely(i == APP_LCORE_IO_FLUSH))) { + if (likely(lp->rx.n_nic_queues > 0)) { + app_lcore_io_rx_flush(lp, n_workers); + } + + if (likely(lp->tx.n_nic_ports > 0)) { + app_lcore_io_tx_flush(lp); + } + + i = 0; + } + + if (likely(lp->rx.n_nic_queues > 0)) { + app_lcore_io_rx(lp, n_workers, bsz_rx_rd, bsz_rx_wr, pos_lb); + } + + if (likely(lp->tx.n_nic_ports > 0)) { + app_lcore_io_tx(lp, n_workers, bsz_tx_rd, bsz_tx_wr); + } + + i ++; + } +} + +static inline void +app_lcore_worker( + struct app_lcore_params_worker *lp, + uint32_t bsz_rd, + uint32_t bsz_wr) +{ + uint32_t i; + + for (i = 0; i < lp->n_rings_in; i ++) { + struct rte_ring *ring_in = lp->rings_in[i]; + uint32_t j; + int ret; + + ret = rte_ring_sc_dequeue_bulk( + ring_in, + (void **) lp->mbuf_in.array, + bsz_rd); + + if (unlikely(ret == -ENOENT)) { + continue; + } + +#if APP_WORKER_DROP_ALL_PACKETS + for (j = 0; j < bsz_rd; j ++) { + struct rte_mbuf *pkt = lp->mbuf_in.array[j]; + rte_pktmbuf_free(pkt); + } + + continue; +#endif + + APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[0], unsigned char *)); + APP_WORKER_PREFETCH0(lp->mbuf_in.array[1]); + + for (j = 0; j < bsz_rd; j ++) { + struct rte_mbuf *pkt; + struct ipv4_hdr *ipv4_hdr; + uint32_t ipv4_dst, pos; + uint8_t port; + + if (likely(j < bsz_rd - 1)) { + APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[j+1], unsigned char *)); + } + if (likely(j < bsz_rd - 2)) { + APP_WORKER_PREFETCH0(lp->mbuf_in.array[j+2]); + } + + pkt = lp->mbuf_in.array[j]; + ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, unsigned char *) + sizeof(struct ether_hdr)); + ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr); + + if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) { + port = pkt->pkt.in_port; + } + + pos = lp->mbuf_out[port].n_mbufs; + + lp->mbuf_out[port].array[pos ++] = pkt; + if (likely(pos < bsz_wr)) { + lp->mbuf_out[port].n_mbufs = pos; + continue; + } + + ret = rte_ring_sp_enqueue_bulk( + lp->rings_out[port], + (void **) lp->mbuf_out[port].array, + bsz_wr); + +#if APP_STATS + lp->rings_out_iters[port] ++; + if (ret == 0) { + lp->rings_out_count[port] += 1; + } + if (lp->rings_out_iters[port] == APP_STATS){ + printf("\t\tWorker %u out (NIC port %u): enq success rate = %.2f\n", + lp->worker_id, + (uint32_t) port, + ((double) lp->rings_out_count[port]) / ((double) lp->rings_out_iters[port])); + lp->rings_out_iters[port] = 0; + lp->rings_out_count[port] = 0; + } +#endif + + if (unlikely(ret == -ENOBUFS)) { + uint32_t k; + for (k = 0; k < bsz_wr; k ++) { + struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k]; + rte_pktmbuf_free(pkt_to_free); + } + } + + lp->mbuf_out[port].n_mbufs = 0; + lp->mbuf_out_flush[port] = 0; + } + } +} + +static inline void +app_lcore_worker_flush(struct app_lcore_params_worker *lp) +{ + uint32_t port; + + for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { + int ret; + + if (unlikely(lp->rings_out[port] == NULL)) { + continue; + } + + if (likely((lp->mbuf_out_flush[port] == 0) || + (lp->mbuf_out[port].n_mbufs == 0))) { + lp->mbuf_out_flush[port] = 1; + continue; + } + + ret = rte_ring_sp_enqueue_bulk( + lp->rings_out[port], + (void **) lp->mbuf_out[port].array, + lp->mbuf_out[port].n_mbufs); + + if (unlikely(ret < 0)) { + uint32_t k; + for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) { + struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k]; + rte_pktmbuf_free(pkt_to_free); + } + } + + lp->mbuf_out[port].n_mbufs = 0; + lp->mbuf_out_flush[port] = 1; + } +} + +static void +app_lcore_main_loop_worker(void) { + uint32_t lcore = rte_lcore_id(); + struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker; + uint64_t i = 0; + + uint32_t bsz_rd = app.burst_size_worker_read; + uint32_t bsz_wr = app.burst_size_worker_write; + + for ( ; ; ) { + if (APP_LCORE_WORKER_FLUSH && (unlikely(i == APP_LCORE_WORKER_FLUSH))) { + app_lcore_worker_flush(lp); + i = 0; + } + + app_lcore_worker(lp, bsz_rd, bsz_wr); + + i ++; + } +} + +int +app_lcore_main_loop(__attribute__((unused)) void *arg) +{ + struct app_lcore_params *lp; + uint32_t lcore; + + lcore = rte_lcore_id(); + lp = &app.lcore_params[lcore]; + + if (lp->type == e_APP_LCORE_IO) { + printf("Logical core %u (I/O) main loop.\n", lcore); + app_lcore_main_loop_io(); + } + + if (lp->type == e_APP_LCORE_WORKER) { + printf("Logical core %u (worker %u) main loop.\n", + lcore, + lp->worker.worker_id); + app_lcore_main_loop_worker(); + } + + return 0; +} diff --git a/examples/multi_process/482253_Multi_Process_Sample_App_Guide_Rev1.3.pdf b/examples/multi_process/482253_Multi_Process_Sample_App_Guide_Rev1.3.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ec041da66d51890d8294cb0fffaa21f80d9c6516 GIT binary patch literal 144607 zcmeFZWmsHIwl<8ryQgu7#@z|-?%ucscXtc!8r%u)kl;>mclV$P5~b@ zASY%CTVofHILOq&4D@!&$=Ln{jcytYwF;s!T8JTSfD~4d^JiiSXC5aI1{SRD>k|88Y1~GZ5+r?X-3=Zz*c0TYSN5dV8 ztFv5g-nyo8(a&vSzf9vsq4>be_g!G`A`<=lYQaBQf)E6)7)BvGZfwIm5W+g5*y67S@q}zpVb?(csJnA zG(P;v=Lh=dkPkO#DvVRmB4^s2HVz@80i8a2e7&FK#1K1K3T|vEzqLtvEQ-pInb zI!&*qBp$lA#)F!83%^}LA5o+=a3jEG^m`Fs(s^d)8Kf8lu49iQYjpwheF!UZjX};1 z=etC_@Wj=N_x+ujavR3|I4(-`ym(PAKp#F{pOeL#WkYUbPDcomh8L-Xq>B-xE?hQ> zO&@n0&o9DkF)u8f=d3Vjl{_rM>h$r50Zgq>h~8CTA<{XWz@VfI774&XY^XlcVXCou z%!elli0W?@RnUoGr15oEzY6V*bnT_YJm$83HjGpyXrpYpALqkt<*4!23foN@0q~;E zW3JA7HWeqV+Sr;dE|x z2^S_c?hG`eShT|&G;!Io#jufS0!!APIYO+$4ZFa5^c!EJ^d33wB}pGMK9crj(!^@8wclKFi*}utdo_6?YDLQJL5F_C?eYr#0q&G?XU|z zJ19Q?GFq?HOhXn5jV*GIu?h<_+j*6E(sRv4py99q6ZX@*Yiz4v?^1kz}CJTWv{)T{uMSI^z4NF zu`|@$o;gw#ei(iY_%LbpH{HPYAYpDzW~TAylu4Om>NJ5Dh37DcX2)wzG^AVU++9)9 zzTmOC&<}UZc7<2sv|kGKKT#i<-rBeO1Rko~Ey8{yWlGIe|)VZ%&{Zuvw|@^EdZN<_UUbAp7*gq#SJ=Oe7%<)WAu&8cxv( zEJaBv&g!zUG6+Vc7)+zm2!V=`lxz@flt>OAUxX&Y+7uEFgz%XZ@R(xK!gSd z;tj`bAPv@^;^6)(IC%cR!OF(@hJ)?j;b2gbx9I~Sz0eEVM z2d71Fur~!4CSGh#q42=vZX|7Bj*{VgmII>8rb<&aWZSjW-4t(iJR{vXQ4(x|9e5se zkrH{>TuyJ#y%OMgiIyI3#~U91(!sj2*AK5j=~jlMY*s_W|M1zEbuvQ^debAFg5OS9 z!0*UG{Kv@zaz%jKpcWlMUQbJ~tMwdXnE9v{L-7NHZ~st|LKHjneo=_#jyjKySZ)>F z@d$f72{p;#Y45cp62(I-r`)DvJ!s5dO2EZ!)jTL>yp>;)X8v@E{Bm5#L=w*0v?G^ z=X)QO^!O$BWUZ}NEhw&6#Z4}rcFR#QTE!0tI#RzW0xnx-ty6x7hKmSo2a)UumJsx<|q2n z0Czx6#1{?w8192GN0(MQcD^%)W%NZ^0^ZWpM(2?n$3mo>N{opdXOy9wo9+3nif7Og?7jA#%}v*);CYyQ6mi|`JX{fBCD`dfZ1s-FRZpvT zum?ur>t3*6W!{B6wBGI1i(9)kgLBt1v=JHOfgkH}cX#rsm3H#uo>zs^&TdkrpFTTx zRsxJK@`=`=!gubPbQihq)%!6krkjh4>x!)Xv}+}?tWL(tPSw$K=tciWK~KXOJErKN z_u9hzP_OVnuz*mFzCTk-PT*frOV&TMl7-`U@9W40y5aF{`5xt}@Fn>(35mN*(^l()^RTZy>6 zV}{M|6XkSc$KFvKQAo|RV3N{Y`%Vz=7nmFH&)b&Oo(`&9pI~kbIJuD(LB5M=R=Y}Z zBj`~e?)gctI|eZJk%$58=lMb7fk~OLTl^_%mcM|S?VqSQ-@2a)0QP_9*gBI>6;^>* zLwQD*@gMVJUvg}8;Ypi9Q5t9DlfemhgR_VTqWh)o!j*IV6jbc7vtY7HhQEIwX&V$; zz4Hm@slnwo##>oNIe4f-n;#us-jFaHqKL)x6hQK9Q#8pXppM~3QmrX5-Kd*7{X$nHyg}Q$xj-YHwIc zB>y9-rIT1Rp&m|(Xgh;yVrxi0QE?M9#<-En`w+^(i0G>LR!a5!c&MAgUWk@y7rU;a zx(eA3@~wmLGBr}1<%H$(z7eDcQLUt*Q$Io=eVSNqvRBdCEDM<{E*%iWbcQ`8(zg&< zX9(&?Et=g~(zeREJE0mmAq?*s=|jL*9Q{C`MPX-uBQ|+0{$S7q>zPZNNqj#63ULQiIV` zwW}xbRvdh9@`|PqdM3+wu}EIfSkB|* zh;F)2>bDtNT6xIvYVtg?nC8_ZxUqTnggEiOVW{%77sTM>@~#e+RbW9kVL6&40Y9X> zVV`aDL-26ZML|bU(jl3yeRjM|>s}VBPYwH}4v_F=j)wI^lyrfUcfwr7EOuaw0O?Zh zN9=Zi`wi6Gfd|@pn`47W+tx5!Y7b!zbr+wZ3ry>mY3HTrJOEPeH@8n^Ief0Pftv3g z%MM%G^HEx3lm@=rfp;a!oNPy|X?Xey9eORu5HL^x(4V+~>U;Getg9 z1cw3pwY#YBrxwBb7c7G1pBC}I`T-V{uRx^twL64G(pgke>ZziU8XyX2$*Zf9+sHm3+DN#(n6Oe_(dKoWP z3&MNr=6rC&{P29-URe|Z1W`|uUm<=jWq#{Ou1&QuTe@=V)sjTgDOq<{LNx-_ezh!>_VWfS%V%4Rbx!zHz&5^)7kN=M; zy{V5}yn>7J7;2482Lcp)I`x3vz&tQWuwoFe&7YFM{(mFk|Ih^JP*^Rg9gh~FyA48@ zGsqGA(FE-F7_#%-AVqGfl6HpD8iurC&(P6LyMB5bLcM;hl_pG6RI zpvrkC#o?l!%bg^v=x;ui`;_<@avdb%=3S8AFf zf#W1{F$`7V0^MqDFE8Ca^3`FGODJXzMtJ?{n{6Q+jKh6yP}ORTYuT7-IVHl8OL8I_ zg<0Q!ieY=ftGJGf$5cFa@7Eltj`@6R?rPhk1gsDE+>`Anpi0o=cx`K-8c6olzKl+? z2+K@~!zcfot$nL!`a_npKSlgkBDsG-%=N!C^%K99J>(6s5{=MG$Ze1)H(unvkqQ4? zI}5>QsQcdC4nnW290xZGS5k}u*@8rIwUnwpu5mXf5)Unz1lsq3(Ja@kCN|$llNL(E z>!mWU#8|V`is&p9QG&?~#B-81-cR7V&UuffeU}h|bpUs$7h-n`zy@)($16x-kA&fU+5s;oQ$Hyf8lru3c2CW=?Ag6~!fr5+21r)Y%e5Z{S#2#W zlXt%M>3mqVy2UC_|0c^R&%xvyS&#%!*G-2L`Fv?@&vTkN_Fna&B-{MsE!OOOWB^|m zf)2|pESI3iyS?wL-9>0!I)-3RJ*>O!iC*RBn!e9+OzWVwD3;0H|O2Vdu63A^VfR)WNVbXySoa zXWyWdaN_oX9qb{)xY!QV3G=3SWJ46(NWhp46Y`)DI&cGI@y{+M?;Y%Mv0In7ecCES zs|ge=@}jozF3-+6hAiEbYASLbxLqfUtCA)-q(662S#>(=ZDXS;+$9SjVkaw2{li)0>Gp%5hg>r#i7rzGk(E*4oZFi^~_m9i88; ztSJfPx!AaVD{WoTXy#vfk!K*U7FtFoYb}+r;wdkLEpyZYF`_4GID9CoKdMmN9a{8? zG~D0c8i%{^5mHd*(8H&PIV+MThomJ#^}WhOX58RXxu&OszbAFu!sK zU6rTK>5N5ZJi#Pa-R-qws&SvJly#0r28cLF`$An*yWs&AIwbM?X%9dzI*mz0U z@IK-{wO*$`res9wHQ7WzB^lVm^75#|Qhw`i25V>&tGwp1Uv ze2{L(#cI)NNE`Y=3l57Jw5*lJ#o$~SMVet}{=J++Z@7{#GH*DVD#zt!t}}hRhUJE#+Ro(JKYYZt_}Ij%T`F^pww^rd zCBRc1^*hea(bGKdevx+vOxx^^)el{E)K3POo>-l9(?6#XC-B>(-Y@U6jilXazWECd zH;>LgwVuBcyL0~6I0*Oewt@xt?~JD^NkkTk1u67Z=GPbY5q2wM?P&qTdJo-upd~u< zEXW+{<-v_>Ao$bm^lWf^_vLq~Wq593VbE1C3TY57gc({2D@-g(1fU(g42dYL9wtJr zmn2>yaNVAJ&r|lMId>`4#DjtF8Ll1*l}!2q3L)_K4!Y>m$>)j=V!d!B`~)`QZ!UlZJ{hh@kJqCxVv_BN<76zPzf#p_7H0lDtOsutn#^QUl-_;lu$t=uylLa3u9B zq>`xk-aI%SKSsW7(E>|{_aL0F9TCPyd|1bM9*NS~+=_a$v!(*I3vYU>evo0C-%-R@ z>4orMABf5YrM0Qy8r48Jhqjr&{pM~UI3qIkuHE!O+=qY>j!)>@0ka6-zPu0(9C=Bo zD>t)Pfs7c#*AHmh(n11i z%8`hoRrU%o)yZzdD>Y<7BliRpMk zeaI8)>h`1^%BtOsv@0Mv`*EqZ*YhrABw<$J*Z`OFE)I9~Sujf0g5U?7Y`?3Gidi8K2lF*TfNN+nk?=}DypX=$ zjU$|u`@rSjI;8B8O5)sey zO%GpDtlWLG?!DmUWvUhY~A-m8d-dO5m`xXgk$jA*p%XAoVv81 z%Bd#pLHJ^_Jo+LA2-*lI;K_n)3PyPu62SWmPbgI~xmolgRKBPbc$dR!1%P`O7Y4c3 zaGN}Oc@Khd3|t;jGXCCL2zTI!MG5|1dN_Y@x1L3O68b3A;|Pi?43Aa81i*rE327&B z9350OMx;F0K8~)OvGFVi2s4Y5TAwCL*lb{xEV8$ZD{C#F{Iq|i%3(h&p#(W*nl9|G z{z(qdv=_SohSSqxk}2FnZ<;IY4_By=|C#u6jL{t~&jS1zTWN&OK7NF%_+TvBEM9{3 zWvdwk&j3p-`W-!M$?YnlD+l>I2ZNk5PJ_!C5n5ca@jHo-Xduzo{hCVhNT8+#6Ye3q+~kY+w%J^Zb=vz3S@AS{*21=_^YO6=|M znh1TK)-!IHIhtQ`??E${oX(7C3jOD4@?iuW*Y!;DhDZ;4p}U@|ET)$vK=#HnyuA3h zlRU9_`PqhlS=#ey4{K(<IwLp(QhPkwQ9Lh<=Bxh5w4wEG`OeJe?olsws{7^Q=@>$Hx<#S*6^hhv0~%Z0oo_Yh8&AyB{bei!E1_4_d{>Mm7yb zI6A8L%9o3ZX*UeYv!opP%ir|qowR(*!V?Z|rLJ}D zvq?v^x*N`++C7eUM~kxFb5Et#D!pFLdhT3-ovm$YGx5Glq5LvK`8jbqPgWmeeW^{o z^6*aHb-f))$U+bDMLEYZjC<^h>`qDja5K|sSR9o4`dv$yJmbsshDq^Q#+~NaFLd_> zgE7(IaqwfM>h|hN&*(e!aM#84%2ekJdG};S2X8f*Ck&8(om-mqGY05zeePa?G+e-L zZB*qFyXf3XY~5Icp3kP~IJfyg;2`mS!QP^;N&Ehy6gfviXhN4c(Z4qo}1u~9?TNsk4t z6M?Jl2b@UCSY>u^r4JpKBPi{u6kqeqb2LI9hm*f$PB@q%nt^} znAJn*7>3=^iq~`tJEQW5kCxOf832W9>~p1Pz*Q+b`YIyqkoW zlua<>mFbXg-vlm67BR&*6Nz1(_#ldO=VZtOaR+F&3@&`CH`uxuakH(KdcI6>-$EzFQ0;zPs8U^?DR77GA9UpoeLWO{B+*|xSU|Gq;qal zo7~Y(Rw+M2y9&z1Dh8EIbr?A?hr2Q+5&W&3Ax*4%?ZD~n9Th$S=(~uQTb+hv`ay@ zE1KtFL@-w*!q_GT%1KgbFt3zhv$zAxZ5iDvfhD*Jp=)vMg5+#JSUg?LXU&AsAzAfK za*Az!nz383Te>7^;f3ezC{=91G32_`@BDQ4x($6NYmj>il}485aPjVK@RSfOP5Mt& z`!5Zi{>ywX*Z(w@#LCH`@q50PLj}n8xA|Tf1_jr#Df(|8Wf-KKN5*9pWf*7|9%RSm zp`%(Uj$zZNQhrbVhJ(S20n1TU>@C8p7RfnE8JZ~Ac$7Cen0bVtEfJ<+F!KdO(2|6& zTje3(fMC8nK&3x{@;?Ll!!mhTIRACYO}6fc*Y0pa_bKhQd2e3C*X|#KWZIM~D{?9| zHB-5e&_Nh$dUIgf1V1_24NaJCVs3^K0c92U<6OAt8N)FVF#==-hD;EuS!3=j%YwYs_X+oLnp$_$Ax6i{x;9c~he!;kbp6x%bJmc~-NQ>SdfP;Y_8&xsa z#}~i`rfpNRvn-TcpY(YclC;cU7zwV$h~9kQZ~t*Rz$}_|;-0rjrMx^eR?%S)Fyvz;9o%@41soK|<2kU6dpz-f>gDs8gjw3u^)i@DYHu_rLk zD?{-R;sD6CpK55!3?p(DnvMr$AimW1^(Kj^7t9(B+5oK>rgK=t(~~7alv%2`emwd} z0H-t(o}5_UW0tsd2~m=z zXt6y%k-b&;viDN0fK4*shrOR7W@IMKkz%zGh%2fALZlI+b(mV$SW;rss?&Z#G7;O- zy;Y3DXyU0qTpaoCyC^0Cpz@!w7-+ zouGSewNK3ymV)=Ez(_m9IF}T0p?L+q+a#yF{YeWSDV5ifw>wrq&aDB11BJH0(|#6$ ziuR$EU-}V~^d=p+<5JWUgW99*6~_!4Tp;?^^AE&2JM57zx?kf$J<<=m5krXd_wy5? zffF1L!5(2o-~-bdEECiC7DN-8h`os{Vu9X31?Bm;4h&c@FC94STZ)buv&jNW9z9@^TXn{k+hHFjS@m!hQd=bPayQMSZlf0r4#`4zp8#HvY z;w=J*9f+KYXzBnTj&Wy3mYrw8qR(~w8kIbHw>&>@zK0A-6tNbUl_?076eb8me;?dP zcnkN5bPYRw)H{EJg6tPK$GV!ZHe~H?DJCNx+!2Y4EF6014@5Nh&c1sH6~sMH=P$e? zQ%bgpq~)~RQk3m7Wg$Ebn%`ATAhcO>5vhVYiF_J-OBl^;@8$8tAD?5#ZyPDkm`4*g z+z40JPD^}GO*c8Y@wYhc2t82kiH?sK=BK)DdhGvhV5+rX^rkpGm~q?VQ?re#=IX^TfOcAnLXYAXGZSLy3)qc_E` zlo!ZI)ATi^sFy{if|u6TA9Y$_-P%>m@y|uJInMzxDAI0bI9Q#zOk2X+)40hf3ooGY22_J-T{KJA}`@jAqo$8t{667Zqg$n-ScG$B%AhcOPM^=H%!1*-Mg!1uGbdvW3t71S4Cp?On+1V*=99Sn5Dizj*VRmGMgwzYGsn$->iy7UG@|B-Egjoi*J6&c zBJ!mMl%8Dpo^*y}3&jLZOc)j^6A%uy>}gp!=p(pL8Ko775MGZg3oF-0B7nggHdJaV zJ03@bi2+}}3tIq~n+ReiMf0O3r>3|oJ&%i*)5ZSx(J{DMiwxYQ*9bKX)&+_<*9^%uJnlwKo?v&Ts@IU@;#Yd2OMVPsZG zO)WXYl4$xW)009X5wUdP+6~Cdmdkj_*+?Djuv&QQfpDOF?M3DLP!7AB-#^Tb>saavu~9-_fTI zw!5$zwyJ6C1YKNGv!g04QV$2mh%_*bdP-wAT#Rcd7#BFTJNMyb>!6`0ICEt9l7^Az zxl*Ntp_>QA4})R%t=5VXd@N~cM=#n!T3l?z(3RwU=?xRa3RrZ60v|~L3z;N#=J`Oy zhWp;gUOJ!LA}&K-DB8Vsr!OCEZ%uIu9rm8cag(gG4g4v=fDe%8apftY<%}lhSP

    (4}T3L(A+`w|-I(fdBd?K{h zvOB0Z_23K3Sc@w)1MtLLxz#XWlX;%L4*D5?$K^(K zFq@O@MaAoIfJ$D!O+;q4sb~)ECG|osxmd4G8K+O%oWh%SU#A5+$3WnoTXdeuPVX0X z>c}`+y1M35W`l7QARev!XzQ#B&8`nO)Sbz!>8LD72!?1dlKE_BqR3*FZ#q*GVCHj5 z6=zRfJVhkC=E1wDdNe)_y>^UziXlu$846a1yuToXwfD|L`(Oz((|x=#j*Lo@H2i5N zj6zAGa4e)nK>R-69xZ7wQIiaIv7n=Q2v@^D)sZUdB=0E>aP%pxiH8gTP~Q0g$kyV= z!i9RFm<`hpRVWR6cVT`YLX+OWPjzsiN^RwIjn-uCtH89j4F(>R{|Xlregu+9ITi=I+bB{Y}Fd(*{m(r(Xwog}t6Ewt$Zb+s^s83c3 z39Fi^FE^Et$8wky6&DD<~+l2}HIA-fzX-bP#6 zK<#K6Ys^!dFYM;ewMecPL1>u^spG~Qp@3S1kEhe-5p9(yp}UU7g-u!G{3ej1_H)a++WHhNKLx>R3i zYjtI$EV`3~AMP2Ty5##^({r5JbzmEc%_m{lub4{(UptZ#f=KPi-NmxHqaN5&S)D}k zYSH9PyiTo9#!88mIKH**_LeZw)fN0q(UQo0_r63msh73X?zD>nmoFD*{T&B}py0ezEwGPo@-m*deG->q=Z8j0PM5X_LDfVhqEowJP6CFF^~>twH6M zuKkL^ENv==JxF09no&AtI%utvHZ)cu)TN>x7bB*k< z@grh$5buM&(=;|%_W=wNYU+0t>4R^*Lt-du4(C!a83Wqful$@2=9}p$O!_3b0!EF? z1xCD6D2SCBI>+O+7ybCkMJaVS(!(&RwR^4X@us0|rSAjT2qUFx!)_wKrLjqyg1)R# z?v@s1$7YkO4Sr^)l$4m>{R{LoCH3G@@Ilm6~ex5p7ILJnv1y%CJD>u+AF;Hn> zL|HG$mMS@)W_t&0=9Du1SNWq_X^@U7r+(e0x+Y=WCZnxSM^8^~95)`;PBG?alip|w zb5jT@E-f}RC>hn~Qy{4@U{BSUdaHPOtU5_re_vcuv*XHVeJ8OZDXF3#P@)L;W;;}|`fg+R zZRXWUc^4_~+mGF9#;Ekad}<2Jklrak40op7S%CQE!Ps;p&egs*N0UO0&Pd~_RPa@d zW_m>Pd;5)$l8VF#=9uKCNhNfJLsk8nLpFERGj5zrLD3MFJFrHDKnZErkz=r@TpKRL zWfC3=9WtoS&(p8x-RW)yxeem8iwU{jQWr=jbyq?O7jlx9Os+W5(8%`hjt_7 zJ1C>{G8)csA)bxj2b^RO;+-{j^b3&*8Xubh{UV0junl6Z)K+XtWxzzBO~4y_~6Rh7#88 z5Jr!qtw@ybU#XR zTwEq71G?RVKkYw6js`J*9U0mnU-OR<{s`vTl(!5x&e9%=O*CsRj1I2 zNyF+pm*y(|YKFn~mg>B8nyV_SiF=x<^DjpX--!8eFmjlkTuu+{*s|a~4JK|k!lDz|T zIiSWeFn}}}9o@GW7N=#I60-7WKT1$GIhD7W`n~^>iU?WMJ6g=&*Wu8v;v}T~nkBp7 zrSP>jNTW<^1Us`=+gZ4LKUVJ8yVp4Hz)H-M&IYC4(v(46-L%{?mgqDCtSGKH(@oPn zR>#hle>asuiQIVQ4seP&W-T+Dtb1EVgo#7}&x0N=1T!aiGw0L*js)oo7g2f`!&-00 zTtJBg-N{>IDOo{f~J^XU0kNXZKwyVB-Wlt)BuM#Ap4 zMh(&h^-5QP&pg#xFR3AJ$BeR1T>f)H(K^zX0dXr11;}$m<3HPI}E;kRml2 z8g>HJ;5c80Ek0AnGOgH1<=h#}l$uwP403+R$NK~|)n0mhuQHZDKhrHhmp|lx@}#ms zWQ&AmBtghop8JusOT`Kb_LQgy1(U#aC;=A1yfg_`VzN60l?%r{!6xqVN z74-cCwqbCjyr;p%v>R_i+Hy|+IZakzYLu~^FtAv=tsE{gMCay&dfDFK6w8< z725ZzCP>D%t7;;!&zUi>dOj{UicD3xNCV50I%;nD4)Q^|(scED#4atzD|zZ`)@Uc3 zCLszw+$UB;nP5oXp2P>NJ@TlsPuqJ8^UD_$a5JYo<>~lRjy^eB~{T8UFELO{*$b+$MjpOG!3}l?-$bLQpKcZJNnBBMn2wJOrdDQ zRzFte;ZquPUyQ5prQNSv`3noD%{22fC@KqK-s8*#o}nm6yqe3n;gsurq|#hC)w%ub;v0U`l6;;L*M!Nuu@h?{F;r5{EmA1Qx_eevAdFhf zz=e18rJ{N!vNC=UMRU(dMI06xHrQ27F`e0Ug)6K z_Zid`>_dD#(%Up};!QOtke>Q}*!3|x6I*j0yF6#AJWU>@sMyn~FhL!1LP6pQcY*_Q zvcEN_ufT&@zpbe`LX#KALJFGLkkN9@&v&64!ZYe{pKFnovrupB%udswhLFw|*s-f2 zf2$nG`BnK*FJOzw6*T|Uy5nQ5pYzeZSg^_{JFL@7Qs=27yp`QsHnApv(UMh|J+dZA z<#=w;_D61nHpzz{pOoF-nKCCw8hH0pZkl&x4E-|3B4*dXfc&P}pQrzdN9NDHx8ctF z*-jz+q>*GhB`wj}L41Of$X?nU)nsqWhC(h zehmv19(;!%7rOnItk368Ak}G=o6&O(3-0Ac_?n4TuIvvz_(cxj7zrx})rlfP_6&_o z>kH4f_}SHymCO{tVxJRT?`$eh`_UEy=;hnVl5Q2z3jktLntb9Dd3r`l?af=(PSB5E?fkFcZK-EcI|{s{ zjZBI5pPWp?Vo;&6eDs9B9bTUWA*$EDb}Rqrp2(knB76u~+Kf*w`m|{^;TLZy{3smB4T!nTj_y*^escZ2SIs!8JHc|tAt5?N{i7KaQW+>I#Tx-l@l`gT2F{~t|MKWD z+TZP`LC6KggUyS{et3jruXtG|?pH6E#{l+@g~i(utuH)s3;NODyuB-6+o10HT_^P) zUoB&tx+0V~1NPswBExm9JT$8kET+#C%LRWUPLXSwzL|S;;CLR;J$c@2NAaIyzAmQO z#nBjrF}nHgcM6u0?};@f+`>vgaa1X=SKx9->YXaLq^lW_HPary(%ykBr!3^mRf^K9 zJ@T@%LcqGk*}pyEhPm)n9Co^gP@?^n3 zrp?$`-j)Qax|+EBesEn&UheP91LZ+3#%9JY#{V^;#xLaIXl!Z&asiltEUfGWs4qKu zr~y`H0@Ruu@<4eORJnU$6CO@h(c)856{gVElZ{C5_A&qEyKZ0cm?=wjty5BQy@v5AAL zivStfTRPwmBzBJf%-r6Y>2Ii*Odaf)J&YZhS(t##|36`Q`Ttqm&hEcmVP^VIaYt7t z+rJgu%#;~q3$g>*yEwnyf#tu+JN^sS|5lv0gg0TfzYD)r2Cuw>nU%Sx_}gX%0%UJ| zvNHZAlUGgoyBUv(2@CgIv4C8R>_AR-Mq^_ZQ$`aOGZQWo4xkwq8`~eHd%IrR-r2?2 z-W2rD>p4w;+#pj<7Df;!D>oxMrwJRQF^I>Ukp*aC#%9XN#>vTI^0({FOnJ>6oa~I> z>eblM(bme;_;+3IpJO z%gM#c#TN8ieg2_9BDOAn|Mz!&vbFlHHN3XQ_7(zU9*kxnb7NOq7cxOHUOjbZkdw2X zor@#L*vZ*J?+-ql_1us z2bSOP{vW;mM(zKjd;jgW|0%$K82AqZ|6$-i4E%?I|1f~?U-7`(o;(87?!SBc2nc@? za{SNFcm5HDuyL^cSFB;Vai%kpxW0-ucg7~lb$Ztp|;M{ zk?N-2WHu!^!8Pd(AFsZyQ%E_q#L^xQ25v^>JLKeLG#b zZgSseVa>)yhpIqWn+RInDuPDV(4Qdj`H58fB1ML*9Sl(}C(dMnI@x@ab!XzNgqq8L zeX_k=xIDXDo8mtDl@DbFI^_$g8Sza2OK$xOulLouiR%ZgZY}F!ET~yc7JR&=mMJ$YRPm0|-kbEDP7cey&(o*2^D<%H zNlQX|NY~1eG!(}qwUgdCSJ=;8m9?5muKc^@`zA=Jpovf0{X^~FSU^B<+N6Alm;nZX5-#z5Bz8Dy1!}f%F(H9F zRHb@w5(2L!*1R9#i_`7sr=RD(=#2~}@g8IxaRVO%(!Sk_l2a-{Q-{kSv#)P9mU%x05q#7v{)D184uvh!+kL;yOk!C0{TWtre9j z(Ky`F-llgc5DATTN5&j)5MdZ$Ob45qn1*i6)}t6f1U>i`dbL!uB0?$YV~&Gn@V!{J zbvpRhy4km30rU52OFG$_LB*Vjr7{KoM0H)alo5Z%ZX$$C6 zeli?^+iO>%uS~GZq6!CQfw9rShaI84x1mxdy?QU#CC0uI^I;}T>eK93|3TJUgs-ZY zOK9d0@f-_L#COb1QflX5oEQ^f2&BN2Ya+SxIabTfBlB<~UWsR#^zl>6@@^m=-?+I6w1(yt#Dx zz*zDj#e)N;&De}6ig9RD@2qOHdq{rzsjO6lW~7uE?Z^v%628*PllwN zVkwakeF}oUUy$B7<{BW1b`UUWnjwZ~XByqmooWbF_$an!RZ657)iXekdQm5~8c zF+tjgpN&bq=F%ptMQR9$$xlTR%%&OBOAk#d(27=cD*iXK@YqHk&moBCrL&*zw||~} z`N?xsc{7H;ASC@s6`=;Ul^of~vtlyiOO3g(vetHXbkI#&!@=o<@n`EPf0&v9j3t#T zsb5mW@V);Zdv6sSNs}gvmaxUl%*@Qp%+!JwOD$$*X2upXtJPwuTg=SN%*=T7&zYH> z-Mh1P&cnSA*A!(|mYETe@tIpyW=43rf7`J*Ml^ja@A~u0Jilog$nXlz7txVfAy63} zP4ma-;LwkjXM0U@$Ixh}w9RyB)RvE(DlN542KRE>%sbmP8{7p7mF2CMd@|p?f@ias zfwlHcy9niQls12I(X=+qw>7U>?F-GYKBJlcS}Zq3KR#CFK3JInzkR&D{Z6U3XM}@g zN!jq@&&5%m;D{Yqh&I-@s4k>4T}>joz}l( z5eh2?D>zAsU9^bFf96_{5ql{I3@Lbem}l}&_v{~WPD-RS2H8S(gASZnf0bm3?{~#7 zg-oF5?Hw3rSqD^=I-Vj~;-%S)^mu$&teL2OE6TzPQjr9NNFl*4RA8vM1-t0|zRM6Kvq zF1<@U`U}QKE59Xu+}#;%NeL?mhAWJ2t>L~<@_G`5;Q+|vj`x-gVo;w1owwMn2NF3# zmH^FhcayR7vE{S}5lP0;ti~2IT_>X6yVeR(Y}}?mf6T098&3doy}WgA?jZY;<(~fV z(rbM_B>Fue)b9N<6a+p$m^*b6H33LAR_GgBXB#3Z6+-hCmPSs?p51Z!Iz)X$c(o4j z?=Dlhj@ur501)LaH=aJ!qCDGc3EM7^UyLr>m#iNxWmmTB)f|ELF&$kkKWlR0z)jLZ zXoFeVC8|e876w?$_i1bI>>WsLoF2>xiDzrk_N=`={*}S1uP<(;b1}o5yh2FiD|xjb z4Sk}H_dwO{XRbA>dRE3m!H{u^kw(Qk*t{bKLcGQ0BsBO`x-2Etd6j= z;=atuiLfamICbmPCRZywZOjv>82Al>o0A(!nM)ZtS4pI%`0yH z3{60hyG0#X%2QRMV#pfAv>jLg*kdn1It+cuT!P3ZNEpAGWXk?C-&@I=f|pr5#hc3c zIS~IF^2>|eMZ!9^_Y3*Mqr^qRIV``vf42BUYmkMZ#R@6~)r6Aoo@nJtZ=Mb}LSQIA z20u%Cy;-vcdZbo8%dkJqlfmITrp5}0*3+|q^aCiZ4^AD-IV|laa@~pt51X(`0xD`c zw>Z(QI>hgxI8BU|*64LPIl*=f-PIGcXLmHMalt<3CPBCrVVgdXrmC#oA-jm--XUXU zIcVO!HyaFi?H$k82-HYwHXZAkAot*26<%__4%(k)T?SI=$KZDAInZzfU+Z+q`&ycv zxD?pU8=cR1D(?SORK6uX63kj@W=>GL%0l7FH}}6f6bUhs9w5(v%cS}!UjPDta>h2` z1e^(cEOlpZh$S&(@T`KXnzzrglHwM8m^~yEc@t%Lst z^$ytv($DC6#)*zm2jN+!Nuj1s_Bc@Us$aoxutNU^N!GYNJUf=z8a%+QiXV5l;`(YV z5B$Y13b>UMMa|1(VOAd&_nOFH z<>Kt9F3u%^qyXo%v$m_oLsP$5)}gt)j0=qqJb$m;VKW2M(u=c()zl?yt+v18uBKV@aH|pN*?3$> zMm;1-wgY`d!&QvI^hm8++H;sphpW>c? zCu_7>TBw&A~&Wq}fF41*cUgQ&?aOHNm{&}OOyqQ+98t}!ZoCn2;!LiU|i+u6g( z22{~`2gh=2utyr|T1(%iBs=)NTC>m3l}C(Tt^c~p4^Gu93*#Lz*u8IN*9q#WF3|B*D2d`nM{x^4#*S z7AUOQ6fUmmTm70nR9kEB3OzB|X1*&f<+F*Nu7=0-#^oQoxp*bQR@6fak;J4mwwJvo z4-~lpSA6SIar~>vS%rrT{wzW~Uv~Y{U-*eG{A2kSp-h%K);W`~C&+8!HsZkVs7_xy zEV2xr%?)7y1};KrZ(c&bnfg*jT4mk>iXN6HABt_QV(b%G>LVbEPv3C2b#{(Nw;xkX z-}f$^tgFr6|1!`~-QYz0Z>CrOVjA<0xzs14^U3IZGCH4(&R=xaC!_Q4Iit^2yiWu^ z5%@&l6M;_zJ`wmoXLK0-6 zdUy10Z*re?zHNG&;5%>iZeOe0_FfI;Mf>f}$1w>*gr3Hzg%yVy^h*t9)Ol&&gYc5K zUoWk-V!&KaLo+FF@gii@fKs;T!hn)lw!EBb*a}h~p?-!bVF_{}FK}ibg6Zv zOv()YTxoA9nlwxp95ShY-z-k8DESru#)o;fhsI6I+>gwFXJHsshCrpPP^TGa9oH$m zDimJ}m0K%?4lI?InZy9BS){o_&%!Ai_g3gvS~vJjOrM4ZY%ncw61L%>P91))mu%=a z(d{;LhtQj@M=&dsgx?uM^KM7dUY>I%=ebTxEc&suhWFYm>m|YKG)W#P* zX0*C=O9? zz*rf&isDgdPSQ*ZS8yi=8GUlxFb zAci0aq@~%Qbv8gCK-ADCLNf$>{eZm{N&tP+!*&AIIZaL@Ok&tk#^z{L$A3?wc!Z?V zU6YX@7@F+a+L>qnq-5MukW}fI8|6DwP32Wjvv}R@Mg?^;#F8L zIIMA<1p6bhXfaU<3uAo|5<{ z9qJRbPCxV)z>;->K)Zk5gxiQoHX^U(QbYE^hMOi0Cp(LSc4IfIRmhWFxBe~a^@Zv1 zk5Ff)tOEA#pyKsGbGQf*RXB|u9wJ33+*ysr}yQ(v){o2!)8x`dtq~YN_W>8wQu;1+`ej~fDyS8FbZ==dO|qQ=s!R=sNH1xQhwW-UK$DvIhbuoh zox_OMc8rLUKNRym)3Y|F7I`WJL!~C=27)4M;Z~X5RKLrb-Z(|QWvcY9>=*p6zG`q8 z0mn`viO2cJZy%+T5TK5#;!RUAL$ZinwU*dvnn-b!l*g{%DK||ymUZfje9_Ur%b|`p zoT$gMxO7mrQ7#xMK%KV=*CKtKaHgC!kjz%H`ml_9d!2I;zqIEKC$*sRmP%o{?mm<5ITPwy!XTCOMa(l<%4zO@f_@!;H&euG!>oi zLH@87(jXXalkuVnMxyZv?eM3%FSZ)%bw*Gbqa||T?gS; z%_`oV42#vKk|KPM6pSQzx~yyEmu7njShxcUPST~(7Q;*wRI@uq59r3oFVbJp$IZPm zZ>W(~%0baQWfTpvS;K6q;pk606z5nf6|zM0h)4^%>b!E#9umCb+K?8mE^=bQ zhrvyLXv7D<1zAkSVd2z{2bxwP!FGG6;IdVOoBsMl+b@{}Lz!mx8(s%7D;uNk)?~gH z{+6>TF;_U9uZBvz&#E2$M}RoBu+yJE$du*`YxDUf_mvtpKhA8Y*Fpi)1mWL=fq z8W_D~tSZ%t%aDf@9L@15AR55K;ja3ne zGg8W<&DtvvINO_q9U5SqL(6W?=j#ff6)4gZh_d1Y}q z)rs`6i?i~+NaVnkGDCN%4UCfdLkwb4pzL6#799h_$K9#mjJs0wkU$c0eHSL_7}@C| zN{f7rN{PGVL?Zplh&d|KrWkj{Sf597*Mi$yu7ZrAMj}T_zxG?~R@jnwz{2GZh6f^g zpGV5X5&N;%a^&mVHTxQZPx2a^=njyz#Ib@56!ftO6~Xp`=AAkJ=@YM`#$?Mt37gyF z>1-9=d3E0#)Sfr<%I|H`2N6otGNO~CY)J>=w+kEo<6R%uHPxRZw!MDWWD4&oL{k%s zu!pMgd5B*am&;?5l0ry-lz%gfBlQhvO@?R^qiU_J(ek{tlg-+RwdmC+a9n?SYryaQ z3C&Nl&wYtsl`Onj4 z@6ob+p~v@ldk0lV5w{-t*H+OVy#dl<_nuKq7dCvY!72C3gJMAS?3Y-&p{MX)$p!YezNVtF zLe9w>2JRMqHu8KOs5P{!3usxM$+qJOkUck` z1zS7#-{+MJ@=QF@y%e*S0FZj&QL-Y4bjY?bz; z0*=ScMSJI&^&Y~!s198fid7y~9GC>GdG$36USGHcdpx%CFWpJ*V#YzuC5BnVE= zqzp3k#+vOZ#MN3Cvu%KEi0HFh=qU!uBmzfETqHlN@U37=EchS0BjFB|(J?pOgFZ z9I7MOvJv<ZfV!UKTccBvA~T|W-kOs0Jt-5^OQqnJX&cg7Y*h)A z!r^={WGENol_*ua^_qVw1joz$_H4Z#gS_X#aie_gG861~Y-F*cp?4E28t)Of^*?9~ z99T7-ToG-K*$t`j)EuK{-tk@ z8G7o6Ebng*v0`aksd*nyC99mB-6cSA88x4PTk z(|PQL9lOMVqXE(Vdf=1;u#^L`Y`^VuYgf)bIg5#W!XLDs=+wwtkCY1*R2gS6}$ws68G-A~bDHIBAt5&}ydM{sq# z*vlOgtMM8#-bh`DQsfjej(d{UXhJr8zr>zp)UKxZABUD>Nt%L5E#O zbIFU@zLS}_4{3Eqj~EG2C$5!_da6RruqiUCvpIj6?ZZN)9fMC35JZX3spdBk&0~aD zdeMZ#J$nX$qsXcZ5f(=K!|S+eD(B07>}Q_*_UCY?><|Y+Ckk?dK`&qU$EbA%GUomU zJ)&kuY3}=4lZcTI0=ddun|{j^a8mvH)jVfX=4$lshqLN+8AoZq9%PDi$KSu91FDu0 zcbP5MS$W}w*2d%LdM4P<5quw!`-ay!bzjFWpE@7V%X{BHa`eG`75(+!kdlo5;;Q<| zN`C6hK6PfFIy0_Mo!P&qBLAi4b0zu{flmZJ5%@&l6M;_z{x4X`|2bjgKUqo6|H4YH z#xJuax6LbW(Cw%|lL7qs7q3*@mexpq*kvp%$yzOb)v*je+Dk<;j{7cex^A9j*X96F zP;prvjHK7{1!TNmXMzl5qe;~n#A(r1qPb;3C7IQXFl(Z1P0)D1CM?h*#ieMGi5Z1| zyjxDKp|E9gdD80gfB_soO%JB|;A^o9CRybgdzg z3zd_jpjoCi?75wCxwyBsQC2Jt;@e6%x!-`D>h=$~p!KGH6el zs5s0vFrWOil=v2-&Ulf{{8%=2gS5d4hAv!5W8E$YXqrMESprx#6HmZ%CpOQJwpl52 zM5cGT^h3#43VU_BNE%r>0+PO8zNiB%w_DEo8$Ch^FQQOi(^RNJv$ay85)1ujl}l1F z$#s<=C=^LZ!|>ZCm}YyR;pmbiqGS>ggDYs2Xou?xm`P9a300yKHk6FMl@ZEI?jsux zZmHFnOGAKWF9xE(UyCf!d_j(tVoC*=<`(+77ou|cfe{XBr(prT4)gE{NEU`p)cMk1 zYkFcpmXn|GV$p6^$t*77xSODG*t&z^6bGf^TVW?I)7d z14!%2O-wXS^`)u3QHGm}7ZFWOiYY=s;gqP!2|t-c!A##N9^Ic-NuQEqHuGiCmg?OG zwH8&Q;IH5nWQX1vB$0RPig(+*%($Y@Jd{OC5;X@E$jNz0#0m>(&uHN(MK7g6O!oe+ zznZb41W-UA>wuB>SE{SCtS2E3AVAA~LRu`b&)8-)LIR`RB9GKpN61R<2>wO}_DhRj zvGa|mHn2?E7uM842%PYn{Wg}&Y@L#nGJqAwp(P8 zuR!gI5*ES2J^n;&4?lMZucY!MiIWoQOF<{N609vpIN@PThbY|RJ5LPO~ja|k!rm{!MSfwIJh?8qF*cF)41Q3v*d}ol*=;(F^s-|Q=L&Hhr5i?4Fr<4m5--D_s zPaZH0M(yJ3HIK9#6hasQ%Ir>!R%i%9t&GH9SIo+aK6orz8H=8dJYAAS7WO7OF_KJK zVYrCBTh4=6>Tq2OmR)pYx&}38?^D^&|?UPBg zosP0FRb&T}ebUX}-^exCAi-~eY;w{!kR+77lU1~9(if02j)Zp;npC|y(~bj{z=;$Z zX^z{iWj*D9$gbX>9e!x{C>i=9c}5^^Ao%j zKO6w?VD= z!ZJu4G($KqJ8g@QP80VQ{Jn7An%rI*W{0&YHF*xcS6>HdAI-%vGljBdOM`fpMSA70 z*~YPk6N0EBL*n>*#2AfKiWzocbPLWt-i0gUq5GYe<;&%2jZQzsI+Q=-vF%t*gWk+t zrMl5HPP)!&Xm<4S@!kb(i$rB*u2%020$M3_ge|8%X;77jnMcwTGa@HNsQ96)tx@Q( zccc0IH}Rst6cv^R)a80;8qXa?+uTAnb1ja%LCPVTAlL>I>tm*XaoX7*Mh1FQ*8Qwv z!`p+|;-pA~^2kGz)nFt5t-NkyO|xx#Cc=-rZ+t{P!Aw-{kNL{m{7J}BONL~+;}p&RSwaE!51YxTk3YL z;8Mg*|31e1=#k#!v-RX{8C*>9GX8Q>#D6xIx79nTIKYmb2}*@egm%!pflGdcza`+# zv@a}ea4Pvc+bH|0SjRrE-rs<}?Co`uUcBS@ywb8z*XrD4N^}e{oM=9c4)v=M{QMHl@x=8o`rj*a;qeQ`Do~#B0@Z7zY>z{qTqR?JNXGKJ?a2F< z#b9Jt&*x@0*StmvxL^!UR)0G`t~+emH1_L!RnKSU{^PV!wxh&HppAL74|3I2}ln@OqN$i2nHLj0o< zOLV@Y-QWdh#;?;W%vP^%4C>~!0gG>_+CWImMO&ZMdcJL=nF7}L;^b+;`Pj63r@{Di z>s^V)1$eb1exw!W{C?EI)I~RFpnZESu-YNom`N4oc=Y9%)}vTo`AovjUJFl^dEcMW zmGX)_Nz=Y&nFZ(Z=lhx>)P8r?C0a7h9{1BYx1tVryi3iLq5~n>z3d(}2-h`WS8&Ne zheANOFjZ!ox(`Ho0d%&7W*8Gl{R`-+-quq}YgcBD&edg}5Vrx7-qPJHp&Hmz6jW)f z_;RpA`_iMrZ!x|TP64K05GYpcmDwPt!PQ5m@kqFI)&*(fy;J9V@rjbq9SCA?W1`mxA5_lLI zoG$c@w^O_$#{J}MJ6@|jg8BHTB)Hv5fB4j{P{p$Ty5e@C*!@mU71xR@mQE!9y}=y+ zNB}$RTXE{QI*UeHIpl~-4u~&a409b(9r5;YJtIU{6EKd;xN+^UNfs&ij{z(Am);CT1n) zhGiD&^ktg6gMw&>(L=(U;Mg|z`8>13Y4hZlgNs9)%VPV$diMn9xIt&-T!FLQh}yQH z9oJphb27oCj5t8_$h|bH+6OR(;IAn5r}9zR*!)XwHY-n$Y+~ikI1~`x@YIz;U)SJP|E zmiS_Q!0D!AW9zZAYADlgEHccRTccyBUyYDxp)ub_Wd~BdO=6or5m~bs3l|=(}yx3X1bp<%~w&wouo6-S26X z-IXtaNs(XIVGPb0;(CX8V8CE_0mbU3R$de`bhiW!?du6&HZXGNpAF&KU+qw?sh5bg zOW1xCc~5+oS>>&b-|EB$JqkF_U!|Zwtd^5=Ij(*Ub z{5TTd=Hg2xUK*C- z;!Q zL>nIMIL>GBY%UKwFQ2R)`39k?Ttf}+;UH)wpQC*Yv=rrM32%WRu{ZnH4C_Ya;*iLN zTfU`3b`$OpCDz^Sb}o)hu;Wm0_{N}*XTH0t;50CORBObdJoZaXa4~sca$dxJJkIgh zLjZ%KJ{FFU;od7YZPAKfF0J{2NfZPoey4yLpjLAF-V?b;4vIrWZT=pA6V>D==h=5} zoJ>5BGP{bSWo6S8UImg%GG)A9V*VrXyv!rQMmXC0N2T_yoX0;(Rg---?BvouUYiPW zxxBk+xFrPc(P33}+3ns7Z+V7CJbyrl*dOm*o-HrWUT5faZr%*>h=7IS z=DELl;S0WyNkdK(>?BvD96@rgqG-vt6WzPmbr11$x^UjBpyBPST&UK5FO4zCjLJ_=|0i^Ng==GrcP%<^*S4@c%2PXu#BldHexuR5l7LsfJl0!2QiGFoZf`bTB4 z-NK6}P4C5`-!XdpuXD{&u9tpE?FNOIm1#ag*N@^Nsgcj$>mQ0gB$cK{<}Cjmd&x?` zKwx9=9hQgZA88*yT{1siGCy51KV35a<_P<#sr(nZ^1p0;u2_E}@QJ`D0-p$cBJhd8 z{{?%=@gH-J50h_4V|#ir%fF1*jSX##jQ=?eVdrG{&zj0r`;`XNw)Nr;(lkVKU;=?( znYtv&36i+|&UINmb&MZyE|f`h z`lgQXRH@UCJ?~A_w@UtYy`Bm`AJeN^tLEb#3^Eujl2WCEx)|>q82j~}VUVp(?`__A zTi0x%cem`l;CRYniNz?e5s{LQI!eJ5x7VE9ED<4uN!(|`j(x2u@q7HDdc;!8XhJ~f z7nSJrWrTIo0FSC`?0`ywqiSIqWhYDp60LG+Wh@hIu3N2Com8QpUbxD%hh>8pMWz%q z+)$BL3NXJYrMZi%LQxRPSH4PS>F4V_fJ*|_Lw6CWH&Uf?L@`1&YD9c#nMO5D!ULtU zvRT~1OtMxJv7464MJH6XJsC5I^zh6T&`F5pKnYZ?5}4?ZB=QIp#HA_{uyO>hN=L#R zk~woo@>tXK*;6Uy36w*%)iaU^w=CZEEIVLZe!L^}<9dk}Au zdRM2fLL9NJ0@(heUVi29zOg&N1?)r|MaKEN4;l4V42wR2u_`LWPvfvGqrZzN33E-8 zn30UkvtyA;Xrq6~5Aw_=rBQ15;>ais9>WNV%1N|a@(7EA-R;EfZxIwqN@vhgJYNN^ z?o8BG)B)(EhQ?F~PcP`n>5xaMB8jxonm}dDrc5P; zWKrUHUP3}Po$Kg;t_d;CWCI7bT4JYw9MFJdR~kE`4ebXeX>egkA%v176DRtMz=qlg zK$m(%fJP*PGd6W{q1p-N=p7_X! zO-dR2W_YNHTV=x*FT{Z1w#YulFp^z zW+c$1D}yK@RHc;Je=^r7gi0h*b0m!e)=-Gv{MfFEC6z(Ww`Ud65u$H}j1&SBh2FuS z1CAyt5m*oBmQfAk=K*ZJvEQVaDvqnFREaX1%Z!pU1T-U^dZiSSgstE#xP%7L)+kC} zpQ5L3Tt#TvQBCYJDDIA($yj>Bq{uDOFX}p^mu!ZZG(h$#k=Yg>3vOY|*+&Tx&QA1! zFZ!kIV>NN}OTG6qiXel@iYNjtVdq8?hl;+kkL5=LuC5Mmo(0|^6A z9@)jaH_d@4jhlH;FmubdR)a`DVmUOxMw*j0D}NW{plU+w;aJ1*HP2B_aRQ%03X@B* zjgw>cPfN7#{j7C5gALcyaEEx79m3H=iEz@SVYi!giObW(AX z)HMWF{v&U>oo11gMoIa^N?Va_5GDk;kY+XY%9)d>!%fSaGVPkxu})+%X8qRhD8_OT zms7SPO`0Ra^vZm%{j75w&an9(k|Y(SuE^w$cO7L0&Pg*gOg1>*qtPYf2;-K`6HY>! zLZ6Co#qAVb9Qk7IY_Wu9!fo!KPm4NvOV*=C5{jfN8GZ^ji~te$YcW}DRfIa+9529a zg>>(gk*aT4i#aGwfJoxX%*(~tz_W+MWmFR9N`PS-D{=b0{Y)?o6=mzE_|=1_II5%4 zJv+k6+7hdOl&pknJUm5BHH%}^gKmhtC0!JeLZT=BdWN-hUL^a+3-m7mWsjox{MdkS zt7%)iFZGhE<0{-~m3i-Y$eAJc3)o^_;^fRB?(%uzw$=oKsMfWR8fWJ)uwH}X1X1QF za3=rz)3m&)!lHrBpUlj#V=FIa@6h)nPwgCpl;P9K z^SssQ$_MsG^-BbYE=#i?t;71Yt%yFzAk3LsMf8nc?}tC%5p>#~cl)KC+%Atu+u`!& z);Dez&Z^$}%`>{qFI+v-J+&WB&HzhKMSoWAU(qTkY{{@X$}PCvosI7Iw!WA+KAxQ} zXPeY!V>AfNo#Scp9iaAuvnpMG0bM~nVD4>kV4TNO0i;LB^4FVgHN-%w-=E@M)r(*X zIzyq6fL_WEvSi1TOFgdwpdhywwh)H3p3*etKo?8l#AOzrBlsd=$fGz zo8vg@TJ_Kom7F zca^8*+5+3_SvZVQWxywUzvNY{XFb&xdy`C`M3_*j|t3zMTV_n~dWt zlv0>^y@Na3^`DlQho0CG!-K0p=Nvbut}%Mem!hEF_3N?d90p+uGgvl^e%)wG{% zoliz~%H~p2Aa{i1_>7<~QfMGcl@hlOz4@l#!LzX@)5s8!x<8q84dZ1p<-Yl20)M~L z$a#NX*uxBxFc1in7owUD7|HGZmTuh>zWY@Ri z&KX^=z#<(SOCBZF8DLJhXjRacNMtvsi?byK*zQ!<)E*~(P>!e@q-|7@gvHVgC{>^O zqrE95VMZ0o>_nZdE6?81YfC#zNIBdh;!R!@l_EI?yos%huPi={JmzG>7g8~F!E!R@ z`>%4kp~iB)+{DWmykFLUCg5hQ6|6l}>XI4OTw+>c^3K75Ch;c3acaIQHXCNaH)7-& z$W@=BttdpL&A$dqGtH5i?GoFL=oMxesDp0{T}*nCjbv|Q25E}c79|qQ6q^+woWgCo z$isOLeCygzrpKY!hNA}2!Bm+cKHdZt%#U9lKTT*XAd&(1F1pL!Jq#{p_#?%S?s-~G zkU>GVu)Unz)$_xwyfj+72QsN;>|J$tv{Of9g?48)1*@>;H2|m={u?Kqj_Tu0!=Tf; zhwW)bkTc&CnCB2-t35l{Cccl~v15-CFF~nF&P4CQRmu4o<|@jH4q*Ei{;XYb<2Q+? ziw?XhUpmnTJdSa~pgdgh-fUX(rk`2uXganMGMo`~yY#uJqcYo_yZ3GQWH&Kz>a~z1 z)fX^=ux?|v&8O$D5&(1=?PBdkMspK+MvfRDIjC?OWe*AXkX4v-FI=oCZ&CLX&j!N>c2ZAQq&054~ZOF-KIE%)nz45oQLYl^}VTK+g zDxUw4Yr2S}pVyW?Am~_@K)LVgOkmp+*w4!rI|l6LdoIsmpryM@uQlPXeup{3m zg-qlzB-Dcxl|1{l**fJiEKg*V;#~SM0MF+mpO6zv!MS*E8Rhb5b{UkG9PhW0?l1l# zn_<{(#aEa{=Ar9?1dT2QDF=hE zS~#Nm)uwh;fIeIam6T#{<~$F9XY!U13n1aEWt@#GD6?0rAd)Rc`5lH-(!RUmht$#> zA$M9UpxW6R>XGU3OC`0Wc6i*^dx(p?4E4LiKc;tqa9K@DO^|V3_za@GW>EJL8|0m? zON|n+a4cnP&Fj6ifHb^G{2sS+m6+_l+UAhUlt5%aEK$&S`K2| zd@`6%^EsJ4z~@gN3M{*BWAEt@B-lx(-qm%o=sF8CJzo=+7ys4Z+y=;qq?Nz@A zE~At79svf+&d^+tCa)+zva079m3#x_xTY|%9+HC1^CikzOCy(Bn7ClQafP5Z@}75f zP`G`6L^(2U2_3Q35Rzd2cSIcLe@u!$WrUwH!cQ6Dr;PAZM)+SC zw|}$yx%m2tz$XHq2z(;&iNGfU|K~&;)4!OT3Yj}PC>Yxd+gRD!SbyXwCg7kKwz0Ia zSN_O^ZA`$-#Pl&eQKJ7lT_+-}@^`XM!r0u@%#nbFmEj+}9TOuHE7L!-Y)e|{n;QS` zJlh)b27l%FrB^pMax^1gWnuphcJF=R{R{y2x187jBVPaj0Q9~L2m~O(!NDQGp&}rl z;vpj<;}K(_qGAyfVPIfkU=Wex5fKs+;gOTmedJE2Bj@8265`_%`OgIi1sNF)6Ac?3 z9UBYn<3osri;G3bNJvORNXW=XOa4)4`8c?lnVGpcL`41{-9<#+I{=6fKwZGZAV5R_ zU_>AgM4iR}ECz)|9IYlMhNP?b6jm|5)oCxyR|#w%ZJUK%>E zqUHSxAQMSYF%s>{cVt7KQr$AXstxR(cG|ucBTp;{s!Qsb*|{mK@15PfRWor8PA+Qb zo7=l%7gslR2}vn#?4RGiM+5)^e?$xVx0t?wgR%ZC1VTn6(2of0h>*dU{Nhmlil9?z z@`9PfVAI||URW8`(1GQW6#TCMzL2qYO??EBk8UKY%I0Xi_0JIgeFWtH5yJZt00!i5 zViqO?|}B!z(G<$DNNPWyLM%) zn)T=~n6eQMy)(vg9KKg1Nh&CXp^EQF2 zSJP|U<$jT1RTJLo8RQ%M<(+s*1*NuG-K_Z|$S3&IE9q0!x8qyn8{F5yk3Q6Y?LDl1 z?5z~umAwO;UaX(s-p-UhhEiWDf1SVN7kme-4(WZb*0Z<)yneZVB#Bo+8EZ{5%$OUX z^%n$dmyT9M)slQx^_QP8$!zD;5;zMYiI<@KKuSvK<16t4uEG!K{@*)bv3seOf{qJ7l8=>5>-BsGaE$Ts$B7-XECAv)4)7N#yN$}{$}mp5XbIGOT;qsP=2RVx zSX05~I5(?J_Rv@?@9-p$^Ki3TD}L4o<<4s30K88(+g8W8kT;>^@KL7gLS!qNv+~K! zNi2#gx#f3MRuwGl?(5R?tnl_Z7D3Dz%V1HLhN#Uf`pYa!yRXw)x`f9&2)n4Al*=*| zN|q0o?pb^0)+M8ua85JQt|wJ2=$!XDQGXoK!dNU!OyRbG)ISt%zZ9$h;J3nBD(qVCZc{)n6+A6bDw{mWD zvR>hzk1eRhXX84?Yyy7D!UU_Ltr*Vu!c}nXv6}cK2(*?wwQ)d$-Pkel4nS#Al}ndq zYT#@db&%U*+H1Gay0bELgSB`a%sy^Em8(FQ*XTaC$ep#k7eG+jyj0~1qZ!T77qoM5 ziJuGBY~Xh=zqistHCN_s<;`DwZn5=4@kcLy(#d!Q_vGgeGFL5rQvJs^!~f?$3llrt zz_+*zPvxQ!q@^43o5=00c-uwlf?D@1w?3H4bWPKJ?_+T%6_W$9{qY_j`*@B)v{>2| zN&VM4pCX?%{$~1yn49tE{(Eu{@g{lbrugn>vv+{ZJK(pEy`B~RVfKB@ne-BGSCdXp zGXApolj-aBbNqia(VaX)?K&f=;alLJ=0C?@Nm>5#znk2}*jAxikp??BGCI6nz7D)H zm$VY~PnxIHSz9!%tg|c{5@}@PZ1`u=v+sZ){u`fWKDp)K>q8X zd;3N4Tg*L8UHrZ2^VG9cQ>MK78_GLi@@?mj-h=WxK>RsbOVXqE_0X$OTduhL9RTHX z@D8xo{rd@qX-WPpzTLD8Rl2wLzG<7a<2oHg!v3}-=hv;4eI~A9(2d{(rhWLxv>kocr6Et^f=_Az z?m*m4`-I|u@Np@c+Wk-0p!}ox=hNq|>KPg@*D)G~Ltr&yj=5zGPY3=aQOW`7V8$(0 zk7*ixWIRggKpiXV^>V%Q#&byb`nPBAi(h=bHrY=vQ_rYvKF8O>5%g#9mr30b_u~&$ z?f<@=^#2drm2xG8c;L)S3nBo4<8GyA>}As4Wv}5-HKQylfHV^czOL;;0LuW8ikK$K zPqA|MJ%rpb_hF2%a={jA^Pks0UHc7|!9)e~-DW`5%xc|zRn{r4%kEVM`))w)tC7E0 zT`a6mlBXz}h8A9ioptDUA&`_i>2yScj5mM$8lNQ#tkU%YGva*S@C$>}cDCIgwN6LX zY2mJ$7O8uy9$S6kf{BJ5ra_CBVQ$b++oMG)M8eR4q89E#RyEF0@43i3W|Td0_67`X zUsJYTZJ5S-Zn>O1o=0CA8=06QPMRJ3l^hp~&-^J`G{v~x!`a=`(BgHj@mEd!wz@=* zA#P!n$BI~bG(Q!anG5B2J<)~pn=^C6-|%)jLDb@`ePc;M79+pWnSA2mpV74QV1t-R zwGTpGa(qgBsK!aP4$;-s?7XU$Rs$s;ipF=TxOv^BvQK5;Z@SN1?n$4OL0%c2Ghc_W zc$3*m@GCV8uQ97X{`iB%rQaeW@T`CeD}W|W4PD}EpT8Er1B&n++M8K=)_PS|C6!() zr{4k7@XhNhDW}%iZ>e5mZ#x|F2H~_bN%Tvr{C_*|p!eM2JK!*6<@`)K@Qy0)-*jt| zHz@lbyGbBO0**ML{yEUDUxIBp=^{oNlD1Kdi%+PCKsJ1y`7s)vPSB#WD;w6A4oZEe zqji`eY~1JVgX|c8uqumRRt^%hbg|-KYI~gs`Ky)`(s7 zcH={7&f?3i&t>F0pbT>1&Gj(|b}fx0UaKPfU!T-io>FcMFQHuVCx2YO6bhI6i18y7 z`@Lee^AD-;@3tTL)Tg|Jz5~pku_;O7)58Ak&>3g>(fiHOxGQs6Yg5Xz(L;R!DxTx{ zy1pN`nZs2kM*%Fq(p5GIKxUTdFNAQu?Xpoq^-*dQrR3Wp>d6j|C4xGH(2RxEY!;r&kD$>Hdk3XWbY24}(#$1c%x{KOCKskDP~%e}zq zU+MC)MF=*Tq1zBUm`p*3!^2XR>T+cl%g*E5fQ&m^6lA)G0t7K zVV7}*xtw+(0)@}{p}9vDK}q4y$APCMygth4cp3(CI}{Xxki@Jzxp*>KGNhJH=2ub{ zgpKGN-bfXb9z-Q=szD5fz8^&;14iu8$(0f0603QJB#Nby^pHjm902=y)HbJY;aE`9 zQ5bau)NuYhTBXI1)kbOB{k%OA`BO;+LPu#+)wmhsukJsm^z*1T4z+_-tH%6Nr}2$NnD_T^$h6m)y(@QEaD~O2fr(X|Y()y>Cys0IS>=5a}zW)>Oh`e#a=}$zTQ= z263Eu)pq-u*lIalF;ss3$4ZDk@_#!lJCt6rw|Z&Eb8cJ>Dq4eITyLOxayOZ z9!7hvHOx2k_e?oY7GnoY1NF^ITfhxQ*uv4u&6$p=(6UmLE{e%SLg4|;HOxS=#thK^ zuz+JJyiuNdeoK*1eM)yfNQfT&X}-Blwck z7dpxk>k(Lp+t8x!9Xb(ua?|5GXC-)_1N}vaWIay!ANq7i)^q;P`d+SDy`UtASGx2s zANjuIHm^?`HsP^X=r`XZycJ$0LDqf`ipL5D99s_xclcv%m7OzCx@KFSY1>{@bG!9_ zj(uQ>U$HSZ2Qg2pCqq}9;hO4;o}!BZmT3_TS$Vn)?F{MtvIv>~(7-zbeH%Fg-Re2{ z6zF>fTCz9;y*I@u%KLW-4bmn9``r6L4G+12SlTP#$ov+cL3l>eD)C(Io_;s@K~E=K zKqwG*1)*sjxN;B5$bm3IDE(}FNc*sjh}UIoHqj8IApV@+((%JoJs=ECI1O|15;Ad= zJ)L~NCY5WOQRD#HxL~>4D>7&!M?E>W`?X$6CimgC&y&Yj7_K?vo`K39zz0UKwo1>| zzl=fEFv&jHMC6{g{_o2&p~ielsz|fvXQ0f&)7hAfE8zVDzyT#m9g~ZUvKhYxSy=<3 zG)A--0Y(_UgqwA!RO%V*4uAB`ljZ(n%_?gPz8OvbvINmya&X0Wz|5H*H$KAAX8gJq z{m4kE*Wv0tp#`PeYHvFDwJK`4Ssks3FQkzS2oHf&-?`_ub{CdU&Bcsw?x-4THg}ia zsx{Qu*b!{q6e0}}CPM<(VH?ga`5d_)PptND>$-upq0s>`uTr|499hh(Yb_k5OjN`? ze01CETrWyo%(cgkI0x!m)V8p#2X217_lXLI-|_jz1`YP*ModUTXvlc3dBvh@boIuh z$0V*xNZd?WhChtC>(Ye4}<}pOlP1wG1Xx3KaQz)K4z{% z%;Rtj$JQD<_^%Js;rO<5{WX>GC+7*8E*b8 zBxBd?@O|)x>xnzX6(2ozLf*{mJVa3!>ou_~yQ{XiKBGr>e;B@qZ<0%yx>6$OoBG!1 zO+}qRVotQi1-F%!Ps_b9Rlx~u%L(Pm&|AUoJs7GMrquK-?xqoq_g|v=IJ~y#Ndzeo zfw)e0KNvp-Bh-dx3| zhICmX#wTf%TTi||+z^ghTZTVtYptZ|_J|r;nH+h}7DH-U`6Gz@NwRZWx+JOe#JnF@tac6%E0nTpdS zpo{$bP5c{S;9L0EFsNF;qHWMzzFI$2%l}`>M*f z@+r7f8J+DBmoy~_#iK{_LTX~hvN40e_WW-hjQ?{-5vQ58YCbgqN0hy${s0HIq_FGM zr<-KVD1srI%{a%{Dc36ZQb86h*q4UioJg&g!vpk2wSp(cOGqd(aVq#)jrr1CoEbyj z$0wFnsv2xX5p~z7Xa+vYOpmxy1W`9K7K2~WPS$#|WaLO67?uy6R02rE2lD`iOlyv% z+I^q?=P{b-c%TIMy(1qk7)S*{9|GtjdTLDP56ac;${*=bZJ!@FN0Z(!b9M=m*W zE3BNY1r;sPZ!E3H@I^>c{k5&WTIev=#1MPy;5WNQDd9%@-M-ddJ~(2ltkN+md+}`^!CZ21H+>?Gy|Fu9!>k9 z9KdTDCRT6TSPg0Pe(9@w)M)vdtzG6B=_G8ZMc1*<{OzPjTtmW?$-K(xQq?1TzLMU# z=eJY^Jc3P>Vc&X1N!+DX8JXoDL+_~We(l_N(h9nRHG78%*vcQ$5Vd7WGXJ2qnO$vd z)m5CF{zOMw24 z5mZ<%gAN2*rt+XVu}T^QAHY&{mL#z8rUwKVrRCvh;>DE3CG0Bzn~v-P4*vqEc}r#a z>J9gU!J$M%ni;qCicchWb`%vt+Rc}~m$KxCkYd)D4sX73o-T7uv@bqXYbxIwtZhtf zRJrZ=Ii4+Ptr~iZgkyz{4&%4DRnIx9XN?{TEg!;$1CB^F=bwh)gz$h=t^VQ7_=bI? z!Ry*BHIWhZ6R9&$;(WuUaXT86IT#nRDSH(h`D

    VOh-|meNaudRx zBZZE{c?nO69S_H)B2lxO=47oZc9Ygc=(RE_;*Hf05QO$f74?Rtgb6L-*yMldtt+=d zxxokiVCZ*qq%-G3febn72~kp-I4?R+=i4sb#(lcUyn-?gq)jET6t3lUwm7#P8BP+- z71Ck5-~$ZD;ZX#!k@**#hNqU?q3*Sfgv&3kZok+@ZjBctwu?93^Wu2ZbGN$Ka{nB zgM|3oFBGYdA25S7c$23Wv{k?r2b#c(?z1%Y0BU9><~AEEY@vX`P|dtg*m4@dt%N?K>Gqct-=2&)4AFFdNo2A zxc?Q;(7E|C7~G7weGlQ=-CJ zyo`msif>`zHSFaF-gGaKg;CYHwCbe)QD=8#gTI){*dKU-3o&Y4r&QQ$33pZ)9SLHye zKnsn8NRjyMmqlLP3=cCEZJJ)5l%OYkAv6C1yq4pj5tXP&ihUGrs~9SG7N0qi#W#44 zWwVs1!s2#sbn_(#yAjkzrBjC-EUZpKX4yOFtf3QcR$DC(9-jz@ah3aT2m53}z=$AP z1+j(ssJ_X`(6iy&B!?XBe3B`V#VnOFhbB7qYAf4dcV3O*#$|?Hg^M}m$Mp#S*uahq z6Fe0#$|%<#eK&6Gv1Pp2%@3ZtzL#CQFfB%%o}6V}ISzE5HHnq)T7ULKC#0{33M)1o zPN@xjI?%Fn-nNo+Ar&2;PMm>HPSFi-v=VMp&hd0$(9`1-_AyPCs z0Lpmen=$8;bp8xvfA;GCTD|EJD1domXp2_=nLm7p@YE{G>I{|DmRsHgf>orY6XY5w zF)KWGUCGsnsI@qB8@;Bb7yoTfy+fPN0fxZKs+jGoxP1i>;3I_@s35UADWaV-fey7C z>$cIsS>n2>T|s6@Ty4I-16DQ(z_3rAoQ{+K%VLsvhlrzR$Qto#67ireoI+{ zm%apNKj?}k3Sx2(UfycU8+%DE=X$-(TXooI7U28HQwTQtF;DVfRvyL3eeB4g+Rl<_ z;fqM`jcD(T2(WebeL}tsl-<<|W*NCDyWNb#R(kMa@Mj^DyKy#0IH6q=STPlj-jYjiAy+|b}`eXA2oUZrK z`OLr~OlMqBqN(ou`%ER+{YLqrJQoLKpFHenDJ5dmp~!wg!aa z4~b0h!O!JzL(HcnJclHvtEpw87boueX-f=x_u^3#A64gxPITX*ZTf7owdlhs*rA>yC~d3z0Ce|{177Oy4h-e6K&`=n-tSh>bcF$$7m zVK1ELojv}{d&7EBJ^(KAgwYElx9x;8?;J_8sQ5rp+q?B2F?Q~m%XX=@fw{OejDs4c z=iZX6uX7rI`sh68w>?>Mo{w1xcjll+*JMvH&GP8a;>6PCO!}oCJW?sIWR-a#y)nk8 z6gnlwnqKD{KL`NyTdzJWgD-NdYeESPq%J;KAFFU*}T+Hgn{m)*SAz_k1j-)`RfzC!HQ<}||T^}cyNAD0gJ-EK3Y zw;z#a?GkFFWOn=z%$jl)R>Tb%1EQ|Sr^-#`B+GkF=HIg)Y zX2=fn@J$yGBfBk;(zcK$6mg-<-?>pDT0+!|46P>u*Y#$2zS=iTrZc60;1x9H2C8Qt zwq4;8vTmr|V)1(Vb!&cU{i_bqe1~ECAD9D%|2<9gf2f0h8639%P8}qxU)pYsp#1d8 zF@QrRGdD4xN-|nNl{Fl+DIkvSG?7;b=Cx@@z@%#bVo)Vfg-|n=s0beVogxvTU3oTUcsC&wAqwTuFm)ev-nwr! zE66M&q*6|_+$Z*-LiXzqJ2k^1tBC1iD`%oX%;rdn7+U96GgNXZ3MM4+MNlkyBMXr+ zIk;mmFWCiD=T#*r+PavV(C(nk0BqkRjbH*)$n4g?RXS zq6%5;0Mhwas>&HCj{9(XFp7Kp-u%5yB@$M>w10DHF9w zEhEv><6tkVT#At*5!(*Bv6|GwFe-3t$pmPVvG_bWq!|(sn^_zgRwaqwRKk?{DFKw* z4D@IKk{p6T-(rnn`N*}PUO{W2*WW7@2}aI>C|X83C#YOGoxV;1?70cmIN0<#42^!> z9DLAK-OQN~fuTXhRJvdI?k{N5)E;|CscAa84XkqqL#&|H;K5>PfN z{wBl`)f$ZhGUX*uXRu@vbJ!HviZcIxT-Y0boic%l>4R`Tn3uP?KEYrEPKuK7l)!ut znsFt_e=!C?&6M`Rs>u1taeT<;j&A_kjE6Yy{CP=*{APW8G7p4Y-hdJ-b^H+RM=UvPGi!_`U$^zV(PC z`JEui+?%xqAgLz&Oh`~DRhXF%`4hX6jKE)P73g)EXI}NPB+v)}&HN6T{$BJI#4!Re zXkv8Z&|<2AIWXsfD4vK0sR@a*N_dLov8@l3tebyQ)bRQFIFFDd7Uc4M6j?%<5^a6i zY{WvC3b;kKLm=57fB)4;0~x>eJ;snfBJlgy@>Ho;?w*A7?F|8W(;{nEs{t`&X?%8a zqqh9swrlP-DqDaay$CPekC;pE78`*L|Q{|Ezr)sP%esKa`!TsrUi^r6-$ZO32y^QoXJqRc6|Tk}x{AKQwEB7V3 z>|MEFOqqw|lCnMe_XI}(Z5WbVg^XEr(PjaZJ5~dkBp80nfeymaP?j{riZOg$NoMQG z{HI`oaIk^I096NO2vAHWSnqxIC*|@)>SEkTmQ!@|VxYUP2}Y}4(#u9>Ckyemc3+g@Ya-j~7IIx4bGZAu^YYS`pd zAU@|thBB<^rJngOcKqh zGOKNA`M(H@7}J1_pW}Q5YTK-$CUF;O9b!u(zggd)&(kMUA!wLJXx`U~%0_3i(S7(I zcHEz<_548r#>rw0&^96I6!b$t!%>b}k*hZA!|aI52O>B|+~GH0h1Y6Qy%&Z{5!4%z zcA3@s&Gbmfib=UXU@2sZ?T{m}@(4;d>I!UZ`6DH@KD ztzCRCm-bopab)%i-vV8|${9}5#r{cA)Bc3e#CX7%HP8?p|D-6a(n6^EI-(`rL3E7n zx0f*`#YLAyE~?swz@Lt-xj8`F+#az+I2m{{;E%cf!}1B7EZpvhgwgq$%ZwXQYr9K0 z`qo#A{>Zr@D+&#Tfir}Kzf5NEML~HQ)Jlj8N}*%o_{thhJ1*2^zG}2;{IFS3EdgJa zoW1!r9owCb&4?~;t5u)28g83(-L!gAwUVrDA!y|O$bZ7T!ri3C4(eUMWQk{M-DJ4@ zdPGL5hnofdEbLN zP_@!}kBJijEi?)OJK%l-WO`SiyGZg%9siCA8P~=Z_R-cLPPk79<0L|{KsJt zz}b<2js=Ef#Q6qxJ*2A-w;^>-m=s~vjtY~d z!e$BtDbo9SqIzsTy&j%RYPwEUFL7Bb&R`So`TK@T^b`?e3xbePul6f)S0mV8g26~| z`@t%}FT6Lv;4Xq3g2;@y|B2E|qP6f0ST55k5BPBz=<}3Xmg2OQz9FH$>!ad{u?crN z5s?n_uTS~>hhJ@%^5J<8`IlRJoJ__AC^u8`WOD5SMOzQ+!Jg~T zJz?nr$J_pd+<_2@fz%ikN%;(a=*2|p?9)&u`891<%C7QZVy8j8-VE`NQUY@ zV1#TQofn31dHzVp3u1?u&6XeBe=97&0Q!a!Jga^wsIj}D zJ*-s^S#OkBqK?0JK7k_)iP;O&0@QPZ^9bCmxXQR-{5Yt(>hQe`YZgeFoVotgm_+&Ib5sG5BCY6y}o!Gq+16&nl_0BI6 z@kF{*ti}+Cr6L9p(eI6BWyx510x&@5?flqQt=f}`s1#iD1*Q~pO{v_Rd<#aD6Ul;< zniy`mRSS@56tKwGv?}b_umPUsP~*A)chT66l+oG=F-`ZK1OKC5PA}D@(tCn9g9c!~ zZUeh0xV37-_1%7n@y}cDRECHrg?MbA3D1c)F@VG&*rvHUi96Y{<+4np>qjgg9hs7i zSLf4(q4Z9 z2nT5Q2t0py2a-}tJLBu_la=G+^!##={QCR+#rU9@!GZqkHO2yW4%aPa7K$WL zyBzWMQyLY8RMwvhW@@za5pl{kp|<4;^F3BC*Ea90eoYg;IHnax9Gt&Xy(5}CzS-kP z2&1)g$KO5~p8RPHRFP#_(Cfmxp7yE{_|+Cw3_VB`N)%k6QKOj?*xH)KOES-l?>BQ- z2@W+g$yZVE+6wcgE1G!ts6G#@d&aZTautmWrBU`G<4`r)uEZMWxol|GDALP{QaSZ; zzvgM$YR82aY8a(!j#ju^omnMlKe?CG=<}Y&`XfBEa{I%wvs{o)Yxa9wrn&Zu+GmFv z)gYH|f1k4C&ca7b50=%b5lik%VoLY^=5}URyPL!Bb8uL$QH{^r&*kN{G3v3^=!AD3 zZWNwAY9VZoe+54W<+0ngcKQjpBn;x40NoGjUn2>U%mlTzcuE{w}8z_QGs3Z(eX)!&D1$NAOw4mchj z;apeMwwD*D_>KVOP{6&8q{#+HLJ?yD86#QHJ6g;6UDpl})&*D81Kp+(KBcTQ#GjbQRwT31ZD*HL$= z8AX}FcAQr&6jz8DSutWs7U#EI(R8bh9DuuK#(Ur(`%VP9wZctQ4b5i7c^RGg!=YaJ zgvR`{4aQYZS8SD2P6nB2a6Wp7cph_J8Bk|G z+=T=L1Hdo_2|bf1im0-~`8b2ldVsdq0Z9r%t8=BnQQ?R+v>25>Z#~^7@a@2%@(Pa+)#4geTZ^=tGhQ*8UTE0Y@>iLi8RcD2*Br z5BmDqU)t$dY!<-r0#AT5kVgm9hX*3?UXMVx z$Dv@a9;>P%_kqD5VvjAt|0DBesTDhTAxc>@8lz=J%M{g@q5EkVHfizXR#Kh z0CKEXkOn2S<-A^F_0KyfsGKeY!Osh+ydv&Pm4Ue22}vvb;4WMcD8@`8SEO$pLSmp~ zssJJmEREuqCryM&(N&@W;2u7{zrRpES5e|PKFxo~?6($IG*%WAmP(iua5{Hc6 zK#0MQ0#A@s#MOY3wS^z^b$HUpEA_wFk!31LhGXUaD-CebJ-{&uZoLUmk zBUFJAxwsz$wg_KlR*nIr8-oJ`f6=;kFoWFyNL#tV@|{|rLh(d80k;F&EIF-PswW&m z&kw$#Jv`kIYaOir)5UX(33#7hm%y0cln|({h`g-gEL@AA;7>+EHb*f2=O=)5o9N1^ zK~6QBQW*R+U`Q2i0m74TuOEmDpyOX5UWhi95o>;A@i}t#GR;LPU;4?Gf3_-uo<$ak!*vTTb!P%-ve4}@w?}gQ zifmpF_QRyoN%3}5MjdoTHsDMOVM0O4F6~={37{tS_cZ#LDYkI~5NL?B%A!mlv7@bK zyxsw(Bw586o{q({`BWZqpM02Tf1KZrJtG+u^$^Azf&B?YNh3k&b11_Kx`~0|-uO7Z zxoZ4_kj_xLv&;z={wk6bih!Q(fcj2X=_N1jpT0ZYPp9)L8s=*gjdR7g^I{h{S8H-( z;r7>9ZLB$Xc~d+B`7Ph_crv4}ejy>e1BtcClznGmlNkPf@hmQBe%&~*)Y6e>pb5Q= z-eX^KkLtMw2%?B|smXS1f}3tRc*KAFbDnN|(ggCUJkcg8HXv)9`}^`pu8&=hkxcgu zJCvhOPSz)oBS7Hc*7>0oIGwlj9)RGiSP*}U*8mL3wNd%`U=-Otx9cruxF3XwG#dCW zR~}j|h;Y>KA&?y(DU8xiiMbF2vv410xoxPZ1Zaun?%fDdE#<^|i^X~grBQkb1p?hA z+qU##^xZkVK#nxVbG|68BGGM{yZD|;qnKHd%niz;2idW*C)Wn$GE}2vA!P~4D%?p1 zs)#cRJ*ZJ`$@mZIVSLqc<~bowD*X*CH3G74RZmZ6=Iq6}Yl|kM9wx1m*dmE%H!y5~ z&x7k?)v{aXy2{|nf;YN1*~Z!mxAYeE=Vj-!HLRat4?#Nbcr(JwwSmEHgvib?0mKkq zf)ir<%L$20l&vOuDa-Ti%oSrswG5g#BQ;bw!2_wn>>>kULRHGDykC2^%qiHp0EM+Y zAZyxA5p5>`|J19Qy-2BaO{Pm@71pz7Yn%0oUIAhBeQcG*y6ARuf^3wfz=rlNr_U#SO_3`6qs`unFa%f2)>g#L zCuiKv-69B}F!$j;nUV)$2XIkP}1=321Hm23x9W@boMYl!^}@0jo2z z?6i?QWbsS)J7q~srn8GBB_2RG>`%4GEP#)5cOt0SC- zE4=^;0IZqPip~{Btdbo$)ok+)7u+7yb}&95f}<3kK`)A_&oX=6VBCg4sYbKH%X{ao2=Ybm1(;%nt77NYp$Rp`Bl1%vM z1HRgh1~4Hhm4~6ioCM&ttOvH%9qD$o_mP*eV;YxM{00UA5f)>Hz+%jOHOR zA&^|L$oY!>0LM&a64Bd_J4Ns`6nEAl7+oC42t6J%mdB5CGs!jL+_D_n&1s}^2K8KMuv^3K>|tC#WFZ^N9z49%Av5FW1D^neBIqKaQTd&#(Ac^b_?#X=W$(*6)vRwCum)mkmB1Y=++@g@q9j7I+}uC`k)e4aCTDK6*WE^>7(?KEbCV$lFQ*gEGXdHcy^3? zNjsD`Zr{*oGtt zM?TEJl4e0eTfBCqQHz_z!L|o1OxkvAwWygBwyL(6+bAVUWGLHRQpXbG94lzHl?a0)r0U4<60`V<<3|yDM<1T}a2uL-s z$}oj0U(AQnpyPJA?&#%{@cGt71p$)G{Sos>qk^ z$xPQ}z>dUjL=4>^F>lS~j=!__4XB0BEA)}U@imZ%oP@Xk*;&?N)zkkM)$RuDGZ#+U zSP}8hqcckFxflPMk4c7el9L~jdNW!maNZhLY zTqBPPmoX|%jhOhVD!dOu?*T)>V;wn11BbA*1Jg$+oFgCx{(H9_=!1($55kxj@DKf6 zJ{FYgmp$-Ni^S`;>E{r%ZQyBd*xaw2e(d{M-Y1)!cTz)s-tX~|f37!wjJuXRw?hae zhOJbGbKQ*I&!0Zf$*NC#)-*Pf_+ONji(j;=?`!}_IASh;Q+qV6Xh%JS&tH@>UnY!i zv43b1F}o`1e?pW0d&T+xL6Z#ZZ1n$K0Ss2#wA~bi|7RY)QWC&8@w`n%^$|a07Dob+ zD9nR%Fd%2Psmvo5Q(YkB?jd>Cc z#x7?3PwI@3tp2*<@8hLnqPPrxTqp)9+z{T6 zFT@#WRFy|e8N|cGH!<6XJ{X(PC|6*ZkP$nHC%n3dCMWV|nggY!)G~;cwg-b6VfYs~ zLtG%oi#!1?WxkUPyrz0%8`Ba(A=rebFi2L0;Eijrm=CFi`ve~Tue=#^pZ+c$YB#*V z3(Az%P5cb+y2CjdH-7kd+_PzZTNyy3G^7%d>MdPEI?#}WP6xPg6Lt?|VD9U!;I zC@jktBBr=9!{2Bw$8$sD@-qOYB63q1mDe5UMR$6VB@tQSKFIFEykrwN6V0*=17m%* zgxLTRW_fyHD&UwO;1vT|DZZnx_`zt;PBb3VY-f|`v_cG^r#Y!S71y+1=EQ6LI{QKo z!6U2UyFRu;9HI`gwxA89JVXNmzBWNClfc=zCX|wD><)^UD!S$TlfjjNyimiuYWn4j zd&P*naBk$7e~zk2?LV6sl9PE9fBwV|)Zl1T@czZ(FNzSd=iUT{B2x!|IK-yWShOh8 zauUmGB;bHC;QF!iJ-fR2o16Jzi>d)hDRkj})`f?lk??onS$O5f72s@wkOP9mzF z=K$#&8{l`o+Lnj=4oYRc)u_^elqJ#)k~9z@)g$-Eb+=uD}dRyP!-yyb?8@JN98- z8a!D>bZoB*2~7(8B9HlnTmc~2UwA0E&IocWkE&!cKGKxoBB0MdC0+D$0Lr3Q?0j=Y z1@je$ScRi(x#ptTJG~GNa-;Tna@fF|a?!;wruEYk2s5oPq!K$y`BJ=nvquW#ZxYS% z`C~{%CCQLx<&^TI*cN~Em*?*|j5J0a)VQnz5|PY1*7GE``CCD*_y@^dP)LS~Mgp$e zlS3Sq4uB6S1nk-}mOxS6u=Nqzo|?*ym7PNaPcVS(rsmv39;`d&l6{mN%FdNtZU8sB zND@^H>@9+6ReMTq`VDj^bj)de9T%~1UYZzQ_$S;u{g4K8Xj5|)C>3-C&J|Vdz-rt9 zVLhJo%s1jR;HKcYu0t7MMhQQ&CL-N+zUgacPMokK4SjX?C(zyL)9c#C0HyGXQJgrN zZ7HoymYii@{oikwJ3rPv*#=E(J2A3xUHCp-evn==<*Y*>qO8#BU?Dfr?;8ow%YHMa z{k+cR_5~T=iS%NH6G{Z(>V+J7)@X(*ed}`sif@KI*an~Tew2Mbrnf8PFEL((tK1IW z9m&|Kw;&o?kk`Mmq6_)-u|XV-H>aUb&1g=f(#!?BQS4AFHIm|2ailLgPDc;sw{?20 z(K?S!r(LnUZPt)I94x~OXiprTpzF~-p9?#$8%C=Yux7O9HL6R^0p`hVrP%FmJFDG4 z96UtUEcvHqM7s~B;x>F2U`0O7K%n+6FKsugH)XrN+B|n{dWXN~&~L>vuk&AOmVCcH zD9rx&T<=L%o`a}}VI;(HtMpzDA5MRG{(B0#Zng8+P*j?)1AQZk6MWUgx-cKo(z2Kn zksKaIqzU2;^De-4FX>ZyVfYgIHNDmqE0nDu%AVl8R&dki9pY$hXx8bR z9&JfVZwSrt(OCEU-|iorz7p>LaFiMUpAtPR|5h`^?Efr@y6R$ajCfYQhCmux8z(iZ_=#Z zZnD5#G;N1-jL}O#N69hwSdv3&wtR)PoSk{WX#0lwEy*}9o=0}`LgT*Hms&mKxz2>a z#HwcH9cSSs;9#(nIC2bw6XbJ7=bf?0BGMU!X{;Hnz68JIT^d@;95hPhXd4X~AtN0kqt(U5WgY78Cm|i_-!$|{4|02JxEmt=c%nmN(flZ_Up7a_ zn&!%8*=EVW#f(`KSQINfAGO5Bm>z@8xW|lX6lD@AOs`m+GG%_;nWtW|BxK20XEu+4 zQNgXUnD?~ot)JhNZkDKDw^FirES^Tn{H>2^`uA1K8|;F`Lgz^uUt*eFm%ZX&37R&@ zbx>-DuF*AHdOu5aPIcz%W;)0|bCY$s+F+BNWxRFie(1CUK*jg^U#0?s+TeDxdW8#b5o#eYRU9}Nu{Tu)rAnwME z%s0K11eW#}@~*q6VTB6P;QfVPp?9I?6pWz{Z~k&hSvKnzcUIm5g?6^1|0jDQ(v$e2e00pJvA6lDurGYPLw@!5zdDp6 z_N^_M2NR>jX7Na*($dIrwQPh1L$E(81D@Moq^oVle71w<4fTX82Fv} z1e_+Unq)^7rmLJ{Ix7r_VIIaXS#d;#gSk2nTB z{4aI9<4K2VbU#^_l&Os_N4(9WX%087!etD)hl zpDn_BGmSx|46nv~C@I|=lohI`<9uN|@TXe)9k4GCfcaQKAwDw$@j0#x%`Iw$yj8O_xMk6DY2Hv1 zZ;$x)cI$^9`MVt=ROk0?$##ar0NJS>*#U)PXEZ8BQkV}_2xcb&1sYkM0kabr=+wUm z#xp+&)-|UDC?^{S)!)aka zBi>SR(`3*6qi+7T5PY^)&{jbw370DFdG&e1b*}76Xcm3g5@MaiOQPU#UOb!c>8o-~r@)~vA6grf*>x@pD8rj~zWnvbIl4k~2 zdHQKpxX1R*ctlQnsv~J=`B*T46GK!RXpq?XGsGJ~{i85&4)hq(Q+-C{~Z@?d;@U|>Mg2Mk`naUlVK!kP;mwkSM;ejygJe`^9=g1<3i=1|$+ zQQiLeA*rpfS{@wmV6z?BT2}BSE%nGLw9iBE>9f6gMf`ne-PHTt;_Za75d{Mc8BK2q zXnB#)YK~QVq?$Qivfgd+=xk&0)9QKBcv+#*ApI$BU01z}czEmE>}{i!m>7H4+Ak66 zZ~{f6${f15I3fv?ULV(pn2h;oj)|WGM~G=^4?$QfpjLSED?6u`cIR$&mWUnVqnobe zed%-OX~K!|A@g|7R%&~bE*SbX64`i~+aF<=M#`%Mab>%0%6vPmiKm--(YeB8Vf;Lo zyY7s-$%1$&DDZF>MnBEk!(b+V4MXF*skF3#OeZ~G%ZIWGp=+Er5fl-PI@FF z4^KrU%Od|g5Kj;G;`-_}^IWr4_!uf!I_#wwEL-Xf2v92lwB9o8FAT&HQK+XVUDnJ*e zwN^J8qJlL>4S&Ln7OgXvLLEDvOK)i3XFfd0VGGF{kNc(>by>S1PZ6d*xdVt#ULCOv zt#ABYH&{_-A`7ew-`tt8@a%x_(hQ2(>dJD zdmeMMJK*+*4Y*0e!&?wqc`Udk?2KEN2dhVI zSh+F`XxK(D$>$lN;=3uhKn9S~n1OxgVD@g| zgaOZ~IQ+mQVms7$HzC;0!f#^%iF3AowuD| zP)^CTU$S^AaM)0|e6CGrp3?dS=exI4GC53>&p0;XlImn{qZ8{V)Pw+Q%DgNsB>U)= zzHB_Y)`9gTer}d(Kdx}iYY9Kv40j&r%0(6vzKTG%Xt*F-Lox`t-_dn1purq)@9!s{3sAWXk=ZSgpM<|9qLKSGC8wV@nM zzP1VMpcl#H1F7b3hvyf7=oIm1N(7(9=c8dY>Aupf>~!~M7>COxjeO$DMLh%@4O2sE z7NX?zVTu!U%=Ov^YhT#^h_IO-`>Mn3VvkE1ZwF5;Vspsa@s6x!uB621#uM5bu1%s^ zM*agM7B`bD33kiMM5vyvj-Mv+)(}`ein|%T?}8Nn%HzdpiS9B-AZmKQSSWB3-Wud_ zS4MJ>Yw{88`rcyHW%h#Nja7@49Ib$QUEdHkj;2gR9$vzKOlDS7p8GfGFgWT#bmkE& z_pr?Nz9WISrlyB)HDOM7mXf9cr$)=|f(p@Afqk_SQN%uzcpu0!fv5oTuC&=(oa&;& z0~2iabi!GID5D$%>6l)9Q|S@?tHxV+%O)Q%d5FKJB5+U31x;l}?beh5o~FXLKJX(- zKfwDRsKqv&`$$CavXD zX6Y^3EGE9!!yUw3gqS=#W^Hqjg1d6dY#t0S4DJ4tfhC}F!tviVbcd0rzmr|k9K6yu z_gbKALx~46<02)6m5qdjF$~%(1^!k12-zob56t&2jWcqe&$5r@e+ci(joNAG<@YXm zA9696b+fR%|Jl!Xg1{W5#U}q=HygUBjD%7yYQg#9H|@dHqL`C;Q|gtn@>c->C$#_i z+TC(QP#|LW={^9*%CVfXkIzLNUS_l}dsUbUs3COtL1hbAC0_61-Ex{9$b(+#@^<~; z)IA4{N_{Yc`r0h*5Z)!HXwADAfEuV0b9%*~IjGaq%f#9F0AIa_vPj9Mmnij(1#I1e z27Go2JEzC;gzkgsb3sC1vqj12eeuQS5Zx7$sJvc}gEV7wo18^NwM_yty?lg|!{Yb@ zO@mx$Cb$-4%8b$~&lyXfunH2&h~Aznf#t=V7T)Hv`aLrFz#2;Yo|8ruB0->IHml7H z+M(GjA2M4|Z3YE4 zkXoP$L%mUXIlK;E_J!l)+~%5>Lzt7yMZJXXiE6pjeu%DN@VK~@p$D*PWhKg_musn2 zF@C(1i!^LPy88Q3AK=EYp%p8c=nhLoTk2!T(DV^af(wv zBNL#_IJ&K>6UUdmgK(>OQFKXqld0*o9$0d)gOH{2fOY*d)=h!bll=>dTM+`Z)}D_8 zmN^xYtag9hgnv!H#2BDOQEB!ojoBS3$Od{v@J9zf(a)kC5%y%jQphrhs&vOaPR)S) zEFI6H=4g?+!+=f{<>y+C?k${4Z}^Ums<1a*olT6a7e|PLIg1E6CiDC)OLD$ZFjV3M z+xdHPc~5lE#SkNcNW3-2JM&{J=S`3{sNLwKynD4rg_ z+a!-~UvT+Y0sE(gme(Zk8Bn2={H;+x7GO@F5!8;a>K;bEdU>(h=)?1I`6x-xwLsZq zgv7yFTnY$HKK;kU0{&F{QngYF1m;KxE*XKG@Eo5XAfXKGHki$5TO8Jb_v3L=i5)-i zwp9Fc;?4=TFatk+zE6j?O`soSCam#E@$Djx#`k?GL&2dnnny~6-5;Ep)AQbW?L+X7i1vddV1HiLi=Um0-Tw%7F@;yYHWvn66^psy-x8ox80=`R#H``PL~Bk z2dbX35jljU-`vGy11V_TeAXR^ij)Wg*YV%-_DFY!c(996GLAlK?a)xP>z}5FQ@YTp zsS^zi|AvUh7NQtl53r<-zlVL$KENX#xE_DJdM zR+a58JB|+3W=I&62$Pk&4#h(`%%KsMO@po6AP}^wOZUXNQQY=Uddtwu`i>14H{x`E z_Zct8!VYsas62y5he)FfsNiX;h>fw8U#MGS!+=V|bD_pX{}~?O8=WAbHZKdIy5d=L zfODfyT}xG!S}B7S1osT&>jHC{2Nrx+^^|tI8aIX|3xFDI9r&aVw;*9x;~g)cId;PF zH=03wO1-V82LNOIGRg{8!a&0i1UHG|5db2RdfsPM5Fo3-g zz;c?g9`cc5UU3+^J_KljX#= zXXv(Vyy0`BXu1Nz5ac;kZMZ;pW)n24VSkKb5m{*9oW1hD=1k|x^&ni{4p(aTx0!J} zkx2S(jfSQ_AfDmjUglCtZ11>6YlK@pcZ_uPRsw_i_jGEEzH+geYTWiZT>sN+czb znIs8`3@Mc2cktAs=iBe|JipiP^ZNb%dG5>0IcM*+)?Ry^wfEV3pY`5HiE)}?9VUu% zi{TRyZJB*~{(7=ENJz!`_Pa_YA!MT&?*mT${?j&+vtT_vX~e>~#I^C(h-UDS_HKUo zRL?}RP5&1!+ZAg!ZoYvqtA`or&#t8I31y`CuDa(fI9qGo=U=f!)p$i6g^Pyo7YXb8 z9B9VgxDEeiIT60?``|yqL*<~5pKFI!2Krkcj5EMeP#6>i_2)98(lKv$6xtO_0Xbrw zaRg4(ARIu$wy5MwuN!XLVhGuAASF}7v=$HzVlDC4lo4Xqz4dm^H zCy*7qm4#3kq9axTINwMH3xPICD6YyvY8x3KD+6PY29bmX$w<`jNyFt} zaxmmk5DWs52LoTKha#n*2n7gI0U`tX<0quT1e__6Fir}ln%aL92wW)(xlkzX3Sh98 zmzT5`T$)I721Dh6Pc%bdU>HmappYW_5GW{bDFXTMmXIHEG_hnf3Fl715ec9TxhO{> zm7**pv{BH{-&W1sw-rPnOK;Xv8clQq0~uhbG#va>GQg+=Bq|t_h!`pwOHxrolZcKe z3P=-2rr_{+ED0np4UvX`#4T|I4AF}$p|m07Px?PfR|0RSv`xj$`fo`8OZwlb4&0)j zHu^<%ZO5nee{An(wjVuEq z2Iu6XiK1XtU=S!w3L+x~g_}X33Q)KL40Z%Kfk3u|Y@_^(j7<%&zmWmB1Pp1jO@}R_ zzp(#s@ZSjdpGy4E7&dAD+qTD`f12K%O2ThCCk741;<0X60)-5iANs5LF=z!RBFPO! zQ9-%8<8f%zhI4NChZ6YDtZlUg5-B(e9{a;Nw%m5pMbz+=UsHeh4j#ABdKB;|g0r%a zw-g5JgredpLMnO$3Krj30s<_70vQ9@Sdb>x6N@JTO`Jdhoxo8*QXn&&8{i5jQEu*d zEJ*E79YN+~AYF%w!(g{O7mxe3^sUrimD;S-U-ef4>V)3%z`sb?aQu^4k6%;&tkqTp zHo6ygB5Bib|4EZy1#N2bms0IXnWGB8lkB%-Msw%5~Cp#qjw zfI2xLoa8VNDI{7>MoI>bLQ2WYKp;|RSqR1nhLw{=!H}D6<5%8o{<0-kk3goN2x#mN zxlULFQbxuJBPA>62$Pb5pfFO72rO0#iiJBmBAsMpF^H`u3&`EZ`yX4&mi2(H8hB2C ze?BPw$GzKf*#GkXKP&uS7Q9{DzlChK#^0X*Yp%aF(RPA=&9&Vce|!F~x&GEf+X?

    M($XG8hT^kYzu?SZYF8tD0W~YDg}=tY!J$B?1y6t zHbyyPfjI~k42)fWWNsz`ByiX!$ws^lSOXXkt|U+lp5= zD$mRyo&(BU`@D4CBf4A0kWXy%ap;8$s&tYbs(e|OwT@KY&$-_)GC8nF>t%y`1ipQ! z-r1S8^cXB4h|iuc;^9JS3b6LL{>gQ zxdv95cGXbc4yTNzkM?-1Kq14OpbQN))V1()Pp(CDyF8C+iY@7ypW+$L?HxQBnU=)y z0kz61tLpbGD<=f|Y#!L`0c5RyJnvcrb3}iBfz-;x==j5_N$kC#oQFfa*Y|^o=ay~V z$PUT-qJ3Wnp8s@jt#n1;;pjuk+E>c5>pdUju~T!>U0t7NH7)r9s(j28luaYs%=d(! zGMC)ZT>4^ccc)8vWs06=hpkA@fvS$FH`BeN%I~LutwKk3Neoh2+Nm~uBskYmy6fJJYz?}MQwfFVF1zVeT}JN-pMFxYtDWWIHG-#_`d;-9+9Ky7 z{Td=g=?f%*G4!x8tDTPrBF5=a31#g*4J@bP+CIL#A+;S>mgcJ&Uq>Ig)7>JgT*J*K zT;2eOzm`nnexZ8vl6k_8YEM1yp&jg$CqlKT-c!MJ-*46y_+a!zC8quJ3U(b2ysBxr z1F_0qqI2pziWndt08#uUf4_89HU@MBt4Sb`$_ck zbE^~4qz>Wy;S*5IB3qsfck9KwITl@AOjoDX4dN%lVS81gVK=Ur+~H*lht0eVJ8FDK=#dUXEbTniAX&8_ zdgN;J%QKmyKH-azYxE~6wONtF4F)&Q+Bz?41u-0;u^yLI{cW?B2b$GJ6H-zFYvb?~ zED5ZQ2bKynvFMG4_{Xx9EMnV6RJu`kGw3CZAYGaaj0dZ$%XVWLu8B19d?D^Orv@ek-84eXFJ?hB+ZVlsfH<}hDYHmb;` zf#%43^Aq@T>{7#!Lr{I4eUN?7c}^jThc)fh(SAh++F!ouwXlUIRQsWxwC{0yyZ;*n zGifJTjCrlyd&gxmepQw|x5pO$$$&HMJj%w|i7#MZv0&Q^IJwlITrbIJukz5${t6BT zcp(@&%Ob{v;yjW`J$}{lZdu{k8FT>qLE^g}F3+}#{n56s`*`F&rpmlL|3sU`T*&Pa zl)Beruj9>g&s(O=(Qoy7>3k8XvZOp-5^IanQTitA!1EZk=OMfMMHCA2;yJZ@5d4Mv z^ZD3%Tp(|oZoWS$-SbZNl|ESifpnMn$B+8uA7~;u)M3Nc90tZsn)GgxnB4J~til(g z)Krr5C2T;lyJehuheZL@> zl{H31&+gZkYhS6a+@ya#ahH{^x#=)HNL~nZT8G|S_f#nZ$hxN}*knk_j}hBqhoB^6 zE`JMYpZ8;7McknoAPsJvnM+i8;2EF!@1akdiRvK zBvS6|(Ht3n&{f&%SI!_W5NQD`;lUQRx@N8VF9})lv6*U^(1)F2>sk+3qI*hTs>pC| zBJh}Bx7UjUU%Zb7@;v*(_(|$0XPZ~sMXe@=Fy`pz$H@1^?^=4|eZ?x?XsR?Q@s?BG zgN}@60$a0l#R@0<&aY)Q9zB2W*K)u3wV9dPMPWh4n!Y37RCz+_zQzP-=-_9vCvUCS z<3%rZmEREJ3enFZ;HVVvU2`{6g6g84crV92y+)^T>H_HALRQ*%K(WcJN#BdJC!N(7 zS_1CPJH#Cd(0qz+_`sBHo3# zXPV~yZ4z@(MyxQ$8ANsMzpV7DiFK6H{0G~AV}z9e;p(zPS?=H_M~ch z)?4#zwAo0`Wr1L?T+h6~S>K*p2!`?WK#39 zUt4gpq_5<>Z5863S^hk!&R<3xpH3Yb3&;vxXd^)cYJEFA-zm5^%G63%Wse5P-N`+e zK<#&ixjC8m9)_n8XFGA8>ax@aG3{`WqjAOxONFI#zRu z?f;O7|oN?>>bf`LCqYL5oN4$W8VE zcw7kvCOm^r)j_+B&b&T zsYfe=yM6T~Lr2DTIz&DTV`!Wv_@@`58p2mAgMIfk?Pkz1N-Da}E^#jVw2Ogp*@X<& z#g^U-SzmTRbFs>>uWxzT2Hy)~&fn3mlUN7(@T>fXxqj>G|7bTs+3nvU(2Y7}cA`XB zp7G_Rgnx)L5AUM0m?`j)<{MJ>+p zYCRuix=JhTddNBeF^US9KOENpJ#N$2<%WuZO-_V z5zzfW1k(QX>D2-Tzk>~)@^NvB<$D-u&q~i%Ca@t3lEy;X7QgHCZn)F)&!-a$qbig| zS*_vphlr(fU)KUy-X4Z7@I9H_H@W}T7xfof_j8dnt&<;PCuJ|Y`6{2^{iJetfC$IP z4)r5)?DxcuY7Hp8gISe^#}S2ij=y!0?zH%-&Ngap2BD4i=+KN|DO^>XM>HkH_pGvF zJ2Z<`MfC=+6_w7A6FMB?%XT?tx8^B#D8Dz&H9eSBXqJ&IMW^1qKpXvX@&ls_*Z6nU zh)QLzEWe3Nnsd)oC@@S95or4x?hT7^Zg0~Z;I$o|8G{+VaW}cw!_MY8; z(A;C_EqbB&#KbmEM;RK3th7Q0vPPdX|FEKxg5Dp>sXYUM43M84?z z7!Ou&sXk|SQSnj9KDA5t8Lf<4vUXr&Bj%=aqDN9!BIHsPFaa872M0@vIW^9!oxTC; zU<~FpN9@vh9^cO;7AY?Dp>;fXZJ4WM`NhFKen-z~H7=S9>I7BWHh%jQ1K($SyMr5X z<+vT6yYR`lvpU9e0^&9$gx%xTmrA6n*_>ZaGA}9=$kn)7jfq-o2}=yikFR*t<%x-P zcF1<~rhjJKtys`&BqFL;kG&>oWKrQXf5|FDvdb|&&Mn*N)ad#0^>rY4#9rNHsDb9& z(d?y-{WFUy5A*)k`~3aZP4XX(OFm8S$B`GnOqz-ne+_&-n%6G12uqpNR6{`#i=R7sL%Icc^(^j z@T&K5IrL0%PhG`%zF<{`sKEf|C#mAd`|pJ=-j2LuEo9qfXZKvx!kAVe_re<*dP~2( zq(g|$9K*xyj-9GgUHhwt^sjdUTh-n+IrdepnV@z&x5zfJ&d^h)fr9wZ1@~0-Q%@48 z=%{&B&=luVvAm1eG(>Knnq569e9t^tD7f^5(f#V(O@i5PSsbpK7m9VNh+BW&lb2N7 zU1cKKQmxdvTS2i?_t}8L^QdPpc@>j}FzXNCIxtmchY;|&{cj@%pSq?x^W5&sTRzGe z#b+}dsbW(n5GfhZ&UUt?FcR#m>$K*4iRnR_&%2NGPcL5j;43(?|EQAtt4uZ`WY~R) zMNnquik%dPYNiOIhN0X;%xj*2lwnJ`7}g82Hp(Bx+U71HN4f}S0(EM3#qfx9KJq`* zrBdhW64%Qbb2O&&0q@Os^iSYvx>lEb*Hx|4?p>K!)OGdKr%_%Giu>FMDqB*B^Xf{| z3|%r$ev+t0jzIDVxF^rv$Y3};I@*)Uuz3B(O;6(&rydTWcbn$7%A4cWo%oT}YwK)p zsNd@X?Sj%}+Vw6=d^#sXpbZKc@Y)F}9ZJ zUt`I1AT(8ce4%x(uqw>2-AiN4>)>8lrt7{V?_Mvpp2wmY`kyAyiE}pl&UzG1p-<_h z)Lh`4sj^70GveD>(dj>X?m;GpYihPm&;=UJHHLJ@uwuguJ{DMx z=Rqhk>_`}T0Av}y9AhGux|D~2s?7^Dw%@{kwpXq%QNOjoE#9#Pe6X-c@$l{6dj7VT z%Ku@&{%dD%mh?2156Y}LcPM<{0kwTWjtAmUv=1W&!WQeRqTllmS*aGub>GmvH2C#k zx%J5Cfrx|JBL;qvSEu?u+XSFZKdnVfoPP6lfTrXuH0;(R!V82bTblCAq4(9rwZ4Om zFQH2!t5*Z<0&cv7+{Zfb!=ce0bF#$=DmOlYF1)sHL8goiR&q}qp)0CL|LD0p_ntft zw0^Q*`I<$7dorXl0z;19rFE38P3p_)OXqmh+{|GP_JQ=p>bX_FC+s}=B9fxl3~em$ zD#q7YnNRe&GphT!@P1yVe!pjt`OqdyWU%g7QvX^VZkaE{R*vGdtmRhtY9b%#x_>no{u>a~eML-R@mzu+mhUrCHl}z}V8>>T;&&??Vbk&Kif? z3w&wN8?P$7;8VB$^lQNPn!r=_L&cXHt}=nMCuc8&O$7GMjr6Iikijt;N8|=eYUb5n zi14m5wwi>Z$masIVm*uHJI-6~-jxc~_UJ&09gfn~Tgn#+`y8764!c74oqor1h<-J< zXMgjaJ-699s^A>6IK4PSE>%Kn z=jm6oqNeL0!W7A{u(IlF@C&iU3`Yx*P^oFA=KZ9kqezUBZ@u)r zmDv63@6NplzDd-)Hu~Yo@z~O5^bKOL~StQ<7-iHAV={n61du-@5!i+IdS3^2=BrpL8PbC4^aXStVTBL7h|L zxPo?jhwcTf=R(!%Lh)sOfs8xfc?{$|mgJ_gM#(-+lC8|pa5{vI)bR`1TUw+P$Ez@U zQ|VaNSJp$fqxFiDL6-oe@n=~9|Ysk_0HbY~-zcLgWr(#xehj?0~4bFu9xPL^#>%s||JA0E_I zGQX;|-&@5?fga@FQRF;XUU6IzbRg@#B(tpE8QbG0B+&bA>RDi}BCrMdkw?h~Z`&jY zj4FR&N0#$2+%<_o@7KZzkB^e|sOHAYW7tdRN|`!bZGc za2FBbrF^yR>7$izp`PTM%p7^l`BRvTo97?pSqy&}X*)2V?Qmyuq4KqVY08*f&0Hqr zeD=Mf@e((QvIi$ei%u8%7&@7tb=~C0`Bg8q)TY!Qi~3T-cXA>UcE%xSz57CILkeQJ z*Z7uQ9=N0QVyO|XGa$ktZ{g63sY16Q(5-c^cl|X%3_;=gvD4$>4i}~+y)R81*nhQR zb-^RkE4y#1;BnB6TUA9DJdBN?pA@J0GOdOzcVE?9J@MdjIyIz)-INRmM?O+mwz8_I zksX|ltj%$a@elt)#C@k`<|&>VC*wZ zoP7PApV9AxE8?(0?Tc~TKugAD;m$1P^9cAG%wctQoteG8#aDtuo2^Os!-0LG7se0l zICY8oWU?xD`K)hN>I?tAa_(d0nZ0&JvG*+FldNT~N?WnEnnLcmmqw5v;wV@0#n+RU zC6}oEEumkY2E!@%nj@dD`0O4KtJmRkZDslTlH-%(AuLm0VKn}tM85VKb3BFX;LX#o zbMH-(doH>Qkga8C_Op_4h0H@2YzzBQ`Fm0eIx2dc1x;x>)~vmcu5nC=W-ehK@;CCF z&M-RgE%Fo2?xA2tL-Bh-E}9dbA#rJ}iFOSI%12Y;B9LiTrxHz-`U5V7iA^qq%-}Qh zUfySu^L|y-7klr_!CeXIv9Qo4p%0wlO}kflWo(U}Wt9-FqGsW}ebFqk`xjY-zA9*I zJfAkX-Ah-9NW!lbHV=99Ej^t`x!6F>czHCTzbQ@rc4}Mk;+5w54BvsX-7EY?_f=U^ zBfs(K7HHfp8je<3D!P4bXvt{k7(T-DW#=kEhYM?QH1&iF&y%UMY<1OAwN95Mciown z8SYT&ywP*(gYCUngjGWAAe9a6ROQ;8yYId3voZ&t1(N-k>vc)Ye0L$Z)TIe2$dj*W z|B>C|SY}d~fS6yc7;NU-l<5hl41a|aX|BoWy# Me*v59=jJ;3jd7jf1M7xuQOHuN}Z=HPO6~P~4dmuao zOCkX4kzgS51&79(>Zt#RWl9)AUJh&sg8vyu=khA!l9!oTA9uv_S}))1XOI^>zKPP1 z;7`(^LkuPdoOT{~Li3Q>03VsJs>#kyt92nOm^L?Cw>Z*KP(0s(O?R)cBP2RMAEVuS z+~3D%ZhU;XrFLY{d5*k1;Fnn`O(Se1APwGei4K$|_Ij5uvdc1Dr{nf_nw{RkwAX1M z)>8#_YO@big~4+BeeXTq{q6iKW?J2Eb=Y@PMmNINjb0R+)zQ#AKHW5dH&i&$^hL<< z31?pP2e{DnN5auO+EHoXXjJFrIW1OuEYRJPJ`qni8i%&%Gv(84wrznl z3U}y0Uh8b>6M{6tS^=*03(Y<*Z+xQmGn%@^#KPh5*ebqD2~}D3(?JT~SOON54=;Gu zr$o#?iNZ1^GEh8*tt9j$)K!#~lUv_(u&gUJtt0147p)FHjvAkrS2EGecX?7(gEYDC zgD(kAZ_Fk8tQSPBLuQO@GF%hHKWEY)o-pbTXVJZ#R0+=6OS{j<_~qx3)rl~+9dFY2 zUst$|T+ZC_eYM0rY9*mPc9m~(-vNE+-CualyS{PV-4RS9Ow09{o-2!%5+Vqpw`kx- z(#hSTTM2R4%i2rF9l5JDm<^^T-OtRV%HF?YDfGB8{d={;F0^8*yO1o>!B1Z+RPCM( z-sQj$6E^9vvxatYF9S*trWx{9&|G&9EVvD{6T?=mreX}@WYyB}*vqz0y}E-tpHVVg zGTSDf{VTV^4!htIb@~ov7tYI}f#I(nGU|sG+=3Oe@IGYj-@SA$Y!uX z6nTvy&Sq5@>63nuO1 zQM;btCC(fjnv=_Ha!1_Aj4?X=xQ3f5SFWUqj>(67{%)ym@iRL=?>5jrlFiZ~>jM5H zN#RrAunU&fk;@wDNa^tE7<(sofcf0H&IeW~CFp1>Lz{v=bb{tO7nvU(%=56#@wSyu zF>NpFbOhFMM?c+6;0$y|eg@>px5};4RPd(FeYT2`k1ZwQNDE@L{mo7fjw+uTPWwH? zu;qv43j-_r{FUqW1koRh%M{b)pfGx#bx@~6!Lvg<)T=bTb-lHO_w_{eHqi*BR7F{- zDH+zN^NG|QZoTkHPdZQc;)RMt%2~=)O8lp2eNorML#^x>S|73M1pkC@soyL@g;2ty zA`dS0i<^jaN<+mL)c1i4M5ctREnqrjr#|MrWogAp)FjYP8b)f1YY_8HuqasUJLt7n1eZO?ih^4%^jJOoTIK%RG4ZE_EFv4ZRm036C7=8_~ z$z1R&g8-`$^<(z+$Jfu%;vH}e2&4n#7H4g@uLKb{`~szMF*^};4plyy&;IOji}>^W zhws8~U-vk>Kp3+hGZN(%)f4?J%BGLiw@jT#^GsVibDj7IRs_56W+>|=H{$XNw?;~P z0BbJ`dpc`leAHj+jy1*BzoMe-t?eBvLu;IkmaVxh3p}4WNE=KZM6yTLipG43}OQ%n7J!^-Wo0|Ad7 zHlnYz_!w2(xI0OBjsj!Bmn?Jp+L(&CrY%2tC)R}39I4G*dAB+raL*q#!d2(zFHq8L z(`q{~y6aobx1|;3k9`Y9i|Z;h9Ipk8x#nd$WFT_4cl6783-5?)b@Bt$~; z6wP2VunH(ex(!h*gU2K<6n-u$i8ykW{7~`mE%t177WVR@VT(CCWqusH^~;J zcVIg1eW1HXPxFv6t@8Jj8n9jUOyC#C(vx%5M?5VJPghp^zCSo-(?WeZ{ULxm!0njT zw~k)oz)0Z3_u7*N)9H5fPj|JmDlpqujr&f2bSoX|n=7t?LpWg+FC@wPt@P^b+uA%o z@SO9@%%EFSf?rXihYNJZT+l{w3 z#yJW(ViTPbZ`^o!gP>yj9skYe{o zmxjXSW#IA~$PEB#0{lVXQc#!z5T*|VbpJ#Q!8V zdmG0;is|1*4+QW3KK2{P*n*C15dVstP!`&-=LU2FB4-AXSAZiF5F6(JbYc^ovO)bP zM&h4P{|K4dfI0jF`dWSPBJ4a3-H}qqt*HBr@@ofe+{>$tm0hcH%VF%@GS22y+^U79&cNJ)M1jMIdZl z{lGx{ihkJ2`xWoAmG=K9hWdzUkm(efxpDm zi+ODf@R^u?H_zA%p#NjBTT@LOtc%5a0?XfKPQX8BkOWxM6bQ`KA^^d-z+eYB1;$mt zAm>K{5P-iKPp_f^)+Q1tK>j8_Om5?isVUe7rRfNeY`iowRRMm$(m587A=}z*i=5vS z@IwKdlM|MN1%QZTTbK;kkp!Sk!Dti-K)j-HBmny8gvWY=F+>UqjRsJVU>7RE8AYPH z;ZZyz;t8X9SVPi~pY8iNCjxKRh2tpvbecr2NW1LL9 z0D{Uwz*dJ*P-HZ9 z1K0uZAU85Ss3_7#U^#H=g2Fp(mGDO*83dJwfz>u01FW`Hn%XvJQ`>YUu-eb6t8F@% znig2&NAX%)%vxK_THBblesbw;(dcc_=xw9X`$?lkaRD1{6=$?XZnQ;iw2j>84}zau zZU9sp=Z^OQ8*SZO^DS2MEmrewtmZ#?thX}FT!{QnxWue{%S2Wddy!FoMsIWV@{$F@AJ# z+d3-wZ6SYK=mcXF$&C#3c^X7K)r~*~$$*UkC&!_ItrCDfX0yBdAAkluO_Wx8*n3h~M2-eJGozJRS1sg|&;S zvnRNB(el{EgPhMxJAo2g<}M@(?jHidOIdWU%{nS#M(0^f`N^GOoMC&*7Ii`219F~v zA9svrUyrwpV=Pl%ABVqU`s~=_szSe99nGbC>Kgqq-fm6dO5!;^diS_Mm)zWhS*!JL zf!^!jXS}V?d}?;q-r4C`bbXF?&LY|I!JC^FekXj>dE}&H?=3i<)f?w!i>@yKH+ECX z9EQ71g91BVXnN6BDP$=ddC%$#^mHLUfa>wzlezeztT8oLSdQFcUwKj`EA0qz?kT=l uxrZW|%<^@w15L!Rjs0()rT~bZMDZqJomiL3;#Oh5}sx literal 0 HcmV?d00001 diff --git a/examples/timer/Makefile b/examples/timer/Makefile new file mode 100644 index 0000000000..f4db575b8c --- /dev/null +++ b/examples/timer/Makefile @@ -0,0 +1,58 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = timer + +# all source are stored in SRCS-y +SRCS-y := main.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# workaround for a gcc bug with noreturn attribute +# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603 +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_main.o += -Wno-return-type +endif + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/timer/main.c b/examples/timer/main.c new file mode 100644 index 0000000000..7ecfad1086 --- /dev/null +++ b/examples/timer/main.c @@ -0,0 +1,156 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#define TIMER_RESOLUTION_CYCLES 20000000ULL /* around 10ms at 2 Ghz */ + +static struct rte_timer timer0; +static struct rte_timer timer1; + +/* timer0 callback */ +static void +timer0_cb(__attribute__((unused)) struct rte_timer *tim, + __attribute__((unused)) void *arg) +{ + static unsigned counter = 0; + unsigned lcore_id = rte_lcore_id(); + + printf("%s() on lcore %u\n", __func__, lcore_id); + + /* this timer is automatically reloaded until we decide to + * stop it, when counter reaches 20. */ + if ((counter ++) == 20) + rte_timer_stop(tim); +} + +/* timer1 callback */ +static void +timer1_cb(__attribute__((unused)) struct rte_timer *tim, + __attribute__((unused)) void *arg) +{ + unsigned lcore_id = rte_lcore_id(); + uint64_t hz; + + printf("%s() on lcore %u\n", __func__, lcore_id); + + /* reload it on another lcore */ + hz = rte_get_hpet_hz(); + lcore_id = rte_get_next_lcore(lcore_id, 0, 1); + rte_timer_reset(tim, hz/3, SINGLE, lcore_id, timer1_cb, NULL); +} + +static __attribute__((noreturn)) int +lcore_mainloop(__attribute__((unused)) void *arg) +{ + uint64_t prev_tsc = 0, cur_tsc, diff_tsc; + unsigned lcore_id; + + lcore_id = rte_lcore_id(); + printf("Starting mainloop on core %u\n", lcore_id); + + while (1) { + /* + * Call the timer handler on each core: as we don't + * need a very precise timer, so only call + * rte_timer_manage() every ~10ms (at 2Ghz). In a real + * application, this will enhance performances as + * reading the HPET timer is not efficient. + */ + cur_tsc = rte_rdtsc(); + diff_tsc = cur_tsc - prev_tsc; + if (diff_tsc > TIMER_RESOLUTION_CYCLES) { + rte_timer_manage(); + prev_tsc = cur_tsc; + } + } +} + +int +MAIN(int argc, char **argv) +{ + int ret; + uint64_t hz; + unsigned lcore_id; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_panic("Cannot init EAL\n"); + + /* init RTE timer library */ + rte_timer_subsystem_init(); + + /* init timer structures */ + rte_timer_init(&timer0); + rte_timer_init(&timer1); + + /* load timer0, every second, on master lcore, reloaded automatically */ + hz = rte_get_hpet_hz(); + lcore_id = rte_lcore_id(); + rte_timer_reset(&timer0, hz, PERIODICAL, lcore_id, timer0_cb, NULL); + + /* load timer1, every second/3, on next lcore, reloaded manually */ + lcore_id = rte_get_next_lcore(lcore_id, 0, 1); + rte_timer_reset(&timer1, hz/3, SINGLE, lcore_id, timer1_cb, NULL); + + /* call lcore_mainloop() on every slave lcore */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id); + } + + /* call it on master lcore too */ + (void) lcore_mainloop(NULL); + + return 0; +} diff --git a/examples/timer/main.h b/examples/timer/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/timer/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/vmdq_dcb/482255_VMDQ_DCB_L2Fwd_Sample_App_Guide_Rev1.1.pdf b/examples/vmdq_dcb/482255_VMDQ_DCB_L2Fwd_Sample_App_Guide_Rev1.1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..ab4b67099d5d029c43188dee749eab0bee68a42d GIT binary patch literal 71737 zcmce+Wo%qqw=8O=n3XusSs#P`S9HSP6l9&VoGb0BA#oqqxA_5#MD}We4>}XFo4tU5tDB3t zu{{Eu)poOny)7O$@(10E7qq-)9T}_)vGZB$T=VWqb9fR`WsNK*e%+cn$@?QGYgg<3 z_eIJMx>&xV`8W#+A3|fYlB7W?3N@jEa4;cjr&A@J0tRWSYBE?6Y0(b@YWs0km_u;w zLM3J!;x{M}s!HY;UKz-gi*N*ZTL&YlXUWN=a9bhe6$Ef}cWsI%a<^F!` z>ARUr)=M`sk4sL*@!g5uTTZ&a#Gzbyr2tOZ{kJ5hl3EMF6>m+a<=RMhhwI`zp!z^< zNfo8_J0qv&Prfua!fuaZ^Xk$`x8*NI9X*SS4-b1|!RV5HPb+(%vSp0k?pY35hlzp$ zK!>jyCyK59a-1Ew40h$R8Iy0FFRLD$*5jp2q7JbYI~mP(M^sf+=9rC@vc1prd#f-V zD%ScDqpNu@Kt} z9@)C1MhRVFJ~4`$BYbVdJkPp6hUO)HSc8sS&fS-9?w05UU6V_W?9IFXndkmBvI$*Q z%1dl67f+t?la*vM0y))BK0EY(BIT<{McbUwbfB# ziGyxqP!YO+#qQ(R&2P2af-!(hhivJE0!kJlT&-=yv=@nma!qvA0(ZQN;!h7QK^H?# zUWoW$L>Uy+#|UADz!LUU#ePOu6mbD^n-o!JY{Uu}KkjHli4~^RfgXY@O~~aK40dy$ z7sPo=e7_(vgm`6bCJOTj>rxtR+7#O2$2bK~$b)$zJ8VtjWGdl>d5!!kaO6k1J@gs; z_7c4c8<|I4oblC@~Mg}6n(N4t#9D-bvV*P-XDFE4)R( zbJX8bB3r$X5=G{Ug!pFsLN(BNhoRJ(n?~THUX>8QX=Usl+P({=DvuoKy6}U~9o_Wg z<(4&RdPXq^h8v^=-X`3Mca+sm`=tE(e znXa=8ab~fI#`73?g9U)>Aj>KKEp=@F zf;#s9F%K6D#~&sl%J=bxxNFFxl$-x5;d`|3GLc9|QgJ;L>c%aXK+#EA64REi(%E?SE?;%=Au zOY&|KL>9rq!ejrHd$ zl1sF!wHZwY3qe;W$RaCgl8tHzcqhC1}Ssr5^zzG!hH6dq0F8#Ek13@u z8OSNIqdH1L^j76yFP?7k=%h#2VX=X0UMTN$U6RI= zR7-5OxonWoWn`$m!b1d5QmIp1qY$N8f4spGU10@;tEX*yIod3TyVh-TuqAT}Y(+vj z7^e=(2~s!udoX^p8x3-2I6BNM};@UgY zEHC-5aOo;kJ#(sR$>MTROLe0|h}6oChdssfZ6;DYA!r9=u;ZfX03t_JpLf8P*soFg zcxs7LzH_$sUTaG%bLY0fH30TUY|2Nwgre*$#0FntT)KJQ;efPoTE&)+(`E)uTF|Y{ z1LZxit9=V&F!w^B=T=|fktWgu)A#$3CjiEKWr$ez!3a4v3W4VQ0Q%NVQu> zz)A-2mH;~!y*e+Cr%122U)z+nkXX=tD}xu9_)Em-w{!9}mhzi=wcp{k0@LTi#FMAq z8r7d&?e}kv$G>$Z`@e7|Gb`tRsu~;XUv>FUY5Rv`Xm#3j%q9Rn=&!Q2C)G%?Q4g{1 z;K|5O&)b3Ex~kwx&5&Zg5`Mm+IYSwao<2wpO~&ma0Q?GNJ~?6UzY)|w+i3L2d>A12%wf(@w(CQckf2^+Oc4(`ob2@Q9o1fJvti9u{0 zoSl8N@ASN4h-ihf+t6{IH`ztND^eV^>wi0!H$h^NaW=a~ahs43mp{-znEhT?clfBf zLh?vVqYh$O0HS76UD!8u#4)1vGy8djnkr7n_dwz4IOSfpixMXxiXP8P)eO}+nL~88{VTtPDFQali1}3cjE|=bv^W}G(V8TdH|s;3Z+IfCw&YA>ln%>E zZ#HU(n?JB8++t?t*5WrT=fG%KHL>eVH@;Z(?q#=Ba+U8T1BJzfL>Xj3A(Y zs|${Qp^HB)$$xhz94vorcK=who7Hq}Hvs4#bUt!xs9V*s1rRRUS@rG=@>O66xfQ6kA>bw!rL7N%}wqygGE#8L-d;z^feV9P_e=u4~gzP9`D`2TNTt*|B z%9TxwI4|DT zOaOE~T-1|C!~H^2=3pcWZHTV5^Ix2wE!$2_HjcBN=XLYW!L#IfNU~NBwC=xz5?5Y% z9k7AkPyL9JS)G%^4rk32wG%WJ$@qu8rR_pVeH8%(gyP!2h}tJBToD& zS&i*xgqz^Amc__%{9LSA$;5KW2fi2Y7JHSpjHks>rCB2;OJqWYwvu2{Fp01f4TfV} z7qQS#DEEU+iWoUKqBbZn<rg+{zn2;gs8ac;C9fcOk8!%E7=*1SIQMLy6yS}bYvkYy} zLLvHoxZBs^fjTH#;0i5Ihke zGsshzs{@^!+D(!NV3SEQ&x8A&Nw>~I&9o>JuwC2TW9TaIcVld0?I+DvQ14|RPFz90%<^fxcp~1Z> zMt&79BVNg}DrdIRe?&Ul?Jgo64Xy)M;zDdik-Ed7^Tiy+u{k*9m4Hr-wuOtIAl{L) z3JO|svC@E3j%nD>&ERVAX?RWBcP5VVu#?-nC}O5#X<~pwZqztyNP=i`G~)I;R$DoP zFi3nx5=-4xKlklL{?bYuA{&sx)x>c66(mu50qUu=7SMmn~lTTFxhq%>^vFDFM}8Qq<`RX?S-lA}m_SI$Tmq~H7`u7p>)#=3=V#`#YL=g?z+os0-F zDn0&I?Ehwt!TvvU3>J>RdReZ2l6tj@yev2~@;luv(oF+v3VmwG1HII>hL09oREGST zzVK(yqTHR5vdh@X0GYR8bRs+^emekU(1-wnIPMoc;0pms9A`CFt<@!!`-_WDu{$6{bhAiSyIPRU zO!cfKSdQEht4k$SmXqdR5kshTM-dmIoP_3M80O_YKDe+h{)QqgFU)`2_P8o&AS0sFTJXSMAu`G6s5qpkzcddQ?2gtHVwqx&5^4>w-5P29O>b_qb6qx|^+ zabm2TZ_tTJYiwcq=&V%hR*IZF-N@4=XN6E)p9pq3p%?(&lupKUX{*NREN)%USHBV;tpLNA3i2)@~96+X)lv|(2b zY}#Dc26it-54rq?RpzfzTFDF}t`*3#UbWj0e44C^Ub$##g9G9UimvlL(zuc!Is8nz7?Uhp_=!OD?KEjex;e`5K0VX)J2Uwu9 z%JTiLY$lMiHjqI~>at(F)!6@h82=UoG!vI4WkZQ^g3qF5b1GQN4g$(agcmNwY3|GK z{rL^E3APmpSA`ej@}IwRDodM7$BlVx)1q1`kj!dQE$cdBBTkHdxyiWZdq7iXW`4-s zZN~j@z`Q0vxIM1=v`qtM`EhrW!sHO}ep`amxy5;l=k-+m@O(>^SHHJ+bU*znXqd~N z@xBHO*wZ2%?fQHk*VL@5LuxOJ&>7+jRirGuhhq3q{_8 zL{m7}1=ks7491ve(t72bX!~PBxJyvUPfXewjS|EgrTdOZyU_&x9*}ZVLsoK;(Oyiy z@C5&wBp3$Y4>+Y`;s*7GP&hTw$=qJaUXjV%BiQA+`Z8m4)^1Zn+xYFw6)5TXbALC2 z5cO0{<&fwvu6@NwV9ahwzj#Lw8}G!=iFsEtb;@~M^LVK(^B07dt(Ku0`+An$zQ9J% z5**m4U6LeMGx*jL+~hZeHrCjzEhdl@Ao$K!xI|>}QlllM6S5c}yb2X04<6`59U+nv zqypr6L#H(12-Mb?hcRz3l@>5*2mSzwA*VV-oUqW+DTgyIOXrV=tn4sX6hW|A^SuBk z-Vv>z{R2mtlG`H*cc~F+-XMLmF)tfc{lj^m=Tvq(uSY1CpqR7_bEP`T6c#cj=Xl5< z0RtUDy5-_fONa-HtjC6F6pTm2`M*}{|60xetUCEdy*DT(Lwk1fyATFr&D~SO>C1%8 zKb6vPd4k=*v@4GxTpk*i<_WZbX~rPj652K=5E86ELDjq%@7=(xc^cK0P~aU5)+EuZ z*D01ki+@UafM)_`S%UD(%mjnnV8{K%(CH$-?}IGBQJjIOL?^#SiDYUbq71JAk(H|@ zDiW!AVwR3#3FW?9d`0=u{e)unh$@jppT-q}E;^4AW+_c(m3jkWQKTj@M;(viytAgq z8f4a+fHDtjkBB2gz06!Vh4PcR5Pp_N!VG9I8Wp3gL#kuobpcwW#sd(etJi@}0@&xC z3U_(t+~uz`0N+5?pY{epbRylHLC$I2d4f{gJgq@ul_$WhrWX-Vm?6LrQ7F1j%%i9w zy(W;AWjG2DP_*mrPz5fk%P^F(vzF`$dy)I!YHX&ggC$&Z$hr+Bh$&f%^MqlZr z!Ta^YkYioxV_j|8WuM=}z1#iK&y%%h%f&X$x}7Zp#k^i$Pv6_g<=5;s$A`wa){J_- zHv6+{<(V}f92;-G$UBxf1BHNqn@QEoyulmv*=TDVVqd~)KBBa%H{>qOOJyy!TqNV0Z|_VN&jQogTU zF@MWOfHv>0!WUKiB)I=5fb`t{L~_4XSmhnloco;X5QG=g-4M(Vq~?4k`MfBAJS^D% z`uY5Z8F!36jH_hj!ta&exW*?nBSzXw zp8qjN{oG%3ASd~tgdwN;7=1)Ef@i3luG5zhYpq)^LMUZY*MqE+1i6Lrga3u`^I`FTwjy=HG}YDDaCwHas1#vzo?pVd zJ-$_rQ&eA_kgCW3$#NI{0 z2Q^U1oVZe~IVQuLQy4Y=7%;mIi+z9k*AyW~`4L*P;j6T=l}Rjw@Q}f~ z2xqeK!g897kNrqjj*=Wn)YFn?EBYEn#^Vlv39VNAA{JOD$-oYc8CeGd-nqD=dMK|*o8C+B(b91xz(JJ zAKYFX6#Mz!zePL1fwZUg)EelJZE7Kc4#5=b(bq81q$jXA$!bLjX8HICtB^=y(5Sx( z(WHr)0gsywMi+BbP)E52q9Bjaq6kx2huE3RETB!L0Pv;~K>JAPx1i@M7`S{eTl}fq zL7|DrSyH|$pDJs@?po8RQSK_OQy4I; zsiU@lLGmkuZ{kylfyK^eaiutp}MLYRnOb@>{d&?>Ug?GDj?88z46;=QpFl9`v+OTsEPadWIpO|VXnl)X z+c4Tr;A)%ZVqTkrYJAukI8Va_GJoH`-V7KS<{0_}_#_g2xP(uv7Q`VnGv>kUx=QnT z^3V1V>hdPk&6a2No6B=_tJp709L>UQ&3Liwrx@fndeYIY9H)^86wy}JdEUxgOnNa) zSTGCf7h~6IX!!L>2mX;d{a|j--%9=8&D#H?bY{R`^QV6lItxeRUlAw=iza|o>8}V> zhF-yaa+YpBQHEa1b$m)zQHGvo6i|Vr;$RtB{gaHn zjJ}1Om0Nj-orzlz(h6=4N;aQg7&%4grd=Kk1^^P+3DEc(DF1s4^q-k98#~wk{f0`M z?wSkPXqrDz^B}tl-;CX+ZUGNhh;w(>0z|FL)Pzuoo}3;zs6I;cGvXcXmSu+vxar#j zqGazYLekJkdwbz4(%{?sbmxxU1>6!hkkCGi>5S8fj=gr&7IQyrZHj#!uET23<=Rk5 z|K`_c!{PT@q^sYor%k0y=Po|w=uT>QwX15W**-qAtNT5@B6gv1Oa=?XTb;szQ~PUJ z82Q_&$5=vE>uCoZe|jby%9tm?hG?wI*-Qn$iRme|kypi&~ZLNg-YQ zW#>amEKO_F67fTmq#EBUJ`d>_veg`PWkM{(z{)>Ec`S9c-C30cp!6WP_g65#KkwPV zklt0X#h+>%otCQ~gwQ42XvlZ!FeGaR2iVaSvUn(mbAAiT(Nj}4_^H<#vtX?It(Y%O z6cZMJo=+^}WaVN!$wJ$NfA@o(JcIWaBo;BM6$lI3fE=R%ql%rJ>w=+ZcO4cDd71^| zWI;2W(0;KSj+b0~H=DZ|DaOtFm-_D|LJ{Y`7-fsFamgH=Y3S-<+L_f-QjzsSnv!Iz z6!eqDFy=r-lB1_?X^%>=PeT`OM6?>+=>}cALEm7X5}eV=%h)|SiZNPffAQ^GfRVl3 zR`@IZtl3P{#-KO}RHm{v1I3ckZkpBV+=m^p>{i<12NG;*quN=h1iv@jP}xe{`S5(USUb)8;RZ%!BxMzr>vQV~Q1ubmWoip)NQn2w` z)LAwKfuZ6G5iwo}N`aD+Cp9{%+~TG3C0YnhGurtO?H~hmGOT0P#2@vlpA^yW89T@M z+J0{;siVL~F7DYh7>_l2m54g@0=cF(bljV$wX<5h#I_`Pr>d< zXGph4n+Xq`)!PJOZ>p-FQL#pWDwv~|HNJ1PF@6{|TFY5wtt#|MqUYCV ztf8g^8-rGsc2{<0jfZ0?5mADMeuVmdxbaCSTUG(VHVw$5snEyR>~5zLa+d+6ISf0HYx6kG|6!g?he%N z``)GNmK)WnopWUi`{SU64Xg*1cz|P=OH2LV~HWAQ10pZ zI5TrkGs|;)cM;Je8w8_w$<*Ay;)Q(|A!?a~UY&cfBk(lT!>ATHn?a&>r)_`B+iKC; z!igV#9L}mZHzb;P33_@2^IVBOciKP5givqi_2468v$*J=2|$jM62sMdgxaBYa`P9y zT!0-^K}m*Jn(uwBEgr87w1gIh^J?$dRlq-AJ(G+svxoFTi@E2ANt1SBQker{x7r5G$p5{Ww$@jK;%i089Zq&%_RyJqfIXFQEr^Xm}>2aP0B>@B<13Kp<{R z#@=U_k4&I(aE!XzY#VXIwKWPAWF;v5fwkN@$|po6e{ zx@*@oQb12Q9KSwFH#j$#oNR;6APqX8>_r1b&&Q%+@|6O<-aap~`FosA{SEZNufo2D^xwEWQxBquG7Hg*W3mShMU|OvQc|0O>bZd@nqo z^VY{)dp+klveRpui7d!@96A^>QKg?>IS!GTE{+%C7PRq;&{t+9YS*2?FgOghR(W&Z zkTKSnF~wMOHIpFQxh<~;^`$-6a2%U6K93*HDbL-rUxP9$Azq2UlRDVKg!nJCh#qB( zzzc36Z5F8j(EaZ=-!mi@;ct{$wEc&X38g|ngzFPk-r}AjFb`im3B+$#&$E*-CN$$C z5Z6DaMnhtNBOk|Iw=`4$Hbj%+cH3 zcWDa`ru-z9|KZ!3xCoBNok(l}nND!BHV6YeH?5ZydeZ6|DVS|K67|s!@95XECKOs@pNs(C0r5vs~L=? z^{ubG;v-{$CE@yz7am>1IMp;Y%;g@F|fei?6Y^!w8zATp{2Qhe}%tJFTKx?akSR7@d+mq62vNDY{?(0M*p z%oW%7uE1~eM+Gdc4k1=hRVLwe@cgUXLjsUlTwHQ?b=lQO5lZuN7{6EYFA$+DiA1V0 z9Bc-)$1D_+iSLLrE+uwX8V60xu}foX>gFJy%pCi7hB}iQ6+7jnShz8v<>wVk~;QYxy?GU~F8y7=)s* z)|9rWsdCi&S^jDiOCY!i&fYVcQ%*Bzi{!T?g4P*1jWCu1S}HRtFyd6E>gfQIn7!VF zWxU5vE10mAgWZV`9>xM+_E4-`rLdAI<}NRKD(<&q z-}0FZf%AzM4kUN}DPm9W-1hK%zPnqx^VLkrHg!hZHoQ8GsV3YwYAGQEYX?nSE_cSYZ>>2vATIbI+UqEf(N0K{w1G}q_GO4sGD_56Bwh2 zR_8UeB+1Rcq#zSh&GuwB8PK*vXf?m^GohTbJ1l?tSF|6mY91kgKT2)+Aa}%1dHRuQq3n?@Hu>o08XN_7!3nU&X~+)GSGU^jwSsCzMd#{|$*=VQjZ2P#Av%av?qHGexgJ@)dJ-nGF55zlRmvB1$7k+L; z2%Y(FzL(eLyr<^RCvE6p3)V*qf3h7jmW)=sP##y-rYpza8u>=9C%leFhSM&uhQ2@{ zyga8OTiBQH)Na0_Emw69*9%*4#L^Ul_Nv7^Et)f|sq>pSI zV;>|ki0tWe)ZFf$M>K~uB`uGtpmZmcpm{%58A2RP{dTxvF{sezpY_mit=n=i-J#+M z(w?$681V@#sIH|5+}O>zNQs53>E_(HM|W=z`HtJZWkx3+7ra`3`oYZSx^OoooxN*u z8kub>3PLG1cj-`f{T;)PpnFDb+XCHDWtu@v>NVqjfTL^K$I+3hX^bbfHfM_=H=KIR zuFJI!+JE~zr-x!o`xnXAKDPRzNl&~%t}QdF&5vP_&W6V5gFuxmj(0{!%dqJ(G#c<1 z8DM3~w|<7z@`Hd(*l?YehLS_zfR{UjIVn6?du_6ZIAfJ|%3U2-dZ{vt#*i%T$HE$}v3AkR_Vhr9LA-@a zhpw{qcg7EfgDF?cmFf8Y)oU5ORIw69v&SYvPsi9EJ%qmF1$ra||F?qB3I9I3q2TvP z5wwjNN}tl01vO9P1(6-O?VosA2#l8Q`z6!S>=xdXIg{p-*V4F-?BjAgS~ z)8r-iyxH?ea$^-(KSZ80C*JB{-)flkHB`nP4p1#ymtV#{sws#!Of0WYv4o=&9q9~w zCJz<@d2=()E*laU9`{XGnAE%3sge-(-CjI78=Q>G5WBwJt?_u95$s2!HkOe@rls^? z!20?!gixa#Pt`7_2)%S^*CGgv+DQ8f?tXt&KB`h_=jMB+Ih#q8cgN(ukBBvUDoA{N z+JIDZNg4Jlc;JH74PJ5#f1+E{u}0t7l|A5zJoli4#HAZosT^WJsX7ul3al1{AH*MKcz;bFw0I~_s9Cgc?o$m+tWQ@<2+FBF`#?- z(uz_qR=9@EZ+v)qH6!n`;G_-_QP;nv`QEJCOeaH>m0*QoO96QJEvgIABY=`0OI5nK zJ&eu1PuKS>6m`e_S9NT~$AnPZ<%5r4lRowDvd|u-!3sY|MR)L98VH^Z&XuS(t(^Fg z;6UzH0*Tt1<``f?^tK8U?n_HdT{8$W>+16Y0WkOX>L%+EioG2D{YWtj3la01{fU~9(?O9ubr}in6)-c;WW)1Om-~o`$3nYN5m*9 zcxf1!Y=(RQ_DHYo!r*GkQ{ne4-SPbR`6}<8j>b-u+0(b$vJh~KX8v%pxUr!-mL~xH zb-9Mc(2lW>o=gm_>&hpM=8h`RnSDds#XCK=ewhkP|0PyP-5KYJw@YID>nxKC(u0k$ zW&oaMo&t(6I&_ruv|)RWq1KBEe@(TTV_mW0ysW6Lf~T$hucKGaju8|znq?%HYnGML z))S+;`6Gqj%Wg+A4-R(b!jzS>n&KJ|*~r(q^>@RCd1kr!_TUI2U6s$TO)|Q+#`D0H`@%EwsTM--yQEQmTLsg zt(o;UITzaETu~d*;Cy$qTDe&SpPcq!kEiZk*IqP6`@R;qk*Ul+D?L>E&B%{FLck)5 zl7-RD?~!)XT1C|Hd}yh}*QZVF#DW*r9CL#so6;&52-=ozFQ4hJYxVL?Ketu&gR6(P6rY4^qB-Mq9Pi`r(6>=G0OX>= zH)QmYe!*$hzB9YRl)Na+kK}n7IS(Tt^7DloGfLL<55qpiA(?eEf!R$YMIQdH^m#(k zMt*Kc@ELN|vwr)&L+ln7bO-_8G@`KY4j&Y$phS^pXC3-IAIQ$onNc=fBrB|Qz6Y`4 z{zr~PHqJwh-!5;ufTzl3&tE@iJ+W^Bp@u3OqqCwx7e2_}fBVRG*(9){+RxX{X+vxc zjKSOPTN(>GWaK%Z4}8}7eH6Vnt5tnD877Wgep)U|UBtzvi1%Gcow_#3&%q812vM5A z$<4+2Vtgr2aYEVaqSw7%Re^W;`!VZ6Ya(|!A$tx>v|(xS)zc7jRz2iMTtlyy`h=Ik zBhW0r?~_D1a;O<1Gt+se1=)~q@>HA!9q14s^eD&}{G!_(6Lp<*wW|{7TvznAJpL7@ zk5QCk6p&YAl7F)Fl7jc#m`>zz%#6db(#=X7>Em+J+Bu&>?kO5$MD^acF@sE65Wmy> zm=|2;_h@BD>@gO%m~&R>3O%ckJ9q4dty^*d9lgYUkN z$*=Gv>~l4c^Kh5D1hA5~w0=)R@{lQ6DD<5>cDVb3pcJb) zT{N3(dwsgf~72v6WDV3Bm?MR67f<( zLGo_^JFgDT2+Sv=fo)l25t6-aUAiV0QdtbX$%Ta~IUgGFQwkiT?*e1x;c`!Vndg<# z3qZMYIOQOET-iVIjV&>W5V&={)v_Af#4+F9JWl#8m`lSKYFK8YlT|>~(Z)!~oX>*b zQ1`~jpvnp?utXMCt|fZVII*?TvL;_%OC#_th=KQuG4bfd_cUC>qMnm6>{ggw9??md zA)M-PeYMs>>~F+~_;IG$uf@?)L;g-#NZlndbg*uVLcIgM?CB!r*}@cY$+W^p!wwoz zgJr;LP|I)C;%?Hot40Y(v070eoMfv*rl2vo{`oyNVjy-f8>0E_II=LLNHhikG;}w_iG~C@MK`U(PkQ{)u55Wx;WJLbxM(Z4A)7w>=F3)9+W*|Ic^m3 z*hTmuCk>3C8fzqur*jv(8IUzmdAA+!2ERnAAhvz*$chR#JnYgf!$WJn(nn zg#StjKjG}4i7ui9g1W%1vsbzZQ%8)!gHA3RNgm`^Y!$40YB!%+zorH#D9C{{TWg(1 z=ffDXU=^xVSRh3}hKgYOs?+3~B(H_14GV5i4uQFti(zC2P9D}d=X^+AJsWOdSvjdT zt~{KywLUW=k~2jPWgULH5*kQo96v?^9!HHWHoAZ?JjdJK>@A_}A`pqxc z=&oNq1)wk+G6PRzKc@bZD1ZgEW)C&?g+89YPDJW#43?1q3`}E^OfInF7fS?qj7Zpc zfyRQJ7w&GRnV)zRm*yUrCI_VPhg@yxV$f3(Q0iNf-7s-Fx_HPi>^F8&_+iOZ3@%D* zBtD_+6SQN!0=l_kIFj(&Z$Ffg7GQly$Bv&iI2H3R{WtgpTX7oY}f|10s2a9 z?!k@?qoiYg?#|!P53JtgZ~;0x{YuBM%!}!87UOhUavsI_Aexfnz_>&9grfBVxKw`7 z(ite>Q`&CQAq{Lfno28CcpLoHTzze1NfHATMZCL+9%*Hx`Tc&(V130j83}PqYNHwE zUn5KXbV-&gkfmFa7*&c?b*}JAhYVSEUqVVzY?W`=#gPzi_b$MiZd0-?+%k<#f8(TG zd*ntNN66l?s!*(JDs{FmZ^U5G)Qb`xVr8D1u3%Jjs@h|>{b6-ml?FvITM=`*Xc#QA zP-~OJ3J08u5DSrar0iC#G7^CqldSoo=Mk*6Iff9<{mezzDNG1hg!u3IGP>Qb!Hi& zzT>m573bOVP4jk>4zEU$WYMHgex-G!j8Zd=2Bkgnl-cQMWY;|uLm7vixHxoAZmJ}G z;z29*=e=_~T3(Mq-*2btGY{&)5qI%SB27gO&E=z6sP#^3$H*?@HryXO29A9=7avha z3%_JUs=g6{ji&YE)F~YH7gV9X7|V~=b*`d}kBLJ;CwevRY}FbfnsI$ex$ESw$si!f zRu+qwj}g3Er#7zusJjyDu)s-)lAMFT9f7x8raZ|liQ7b4lsERl>zM_(#dLnl`=YbS ziM;*Qon$b8g#-jW3b)13n66Pu>jB+dKqu zGyJwyMYh&Ibq$ML9Z}KMR^Z6rJ@1IkY#TmahR3F;e)w@GZoI+S17_0)*Q1O~n~8Ow zk41cf&Qn7vl_@var9M-9Gwk8E7U}iYa(0H_B5%FMT$^I^#t+`X|$Qt zROe36Q18=2;1w|1@+4N`PL+F}p;n+ApP}Zh3`F3aN7vsb2=KVcKXlC@c)%9fo7y~R@Uiy@b${CipLwmbV8tIbFg;$!N=eS+`$*75r2 z(mv$_uqRjS)=XeCk!Z_%cjhXAcWvrmN^53$Er&@b@aTsk@5To($aeYWLqJCP{1XH2 zZZ!GL)(1D7#n4l}CHR55?3*tI=e-PWO*3S#?!Hco_GjBuCnvYM$hc=sUf3a*fZNmD zeIE)tVQ+2^R@LKE!SbA{f&7%#4(^PJj zg>}zv=N=J(Z>z7*GFDG800PGdNwIG{zK#g1EhL-5-4kX5^gBIz961PqL04(EpB+hM zks&UB-y^jms6Y5#ocQQ60g8Ncq$=bCTU??=MxscnO2?aAXqu|2V$C$?>4VtZoS6WdNE&cvA5e)BuH_uO}#b=Up&|8jcuT8&-1 z_O9C1-L-cY;!6Z>5gva;gi=-Ax-ibi9^U8{8sHvUTyXnSbpbt1^#-A+$U5aMaA_Bx zenu_CE8Ik^rrmhF%9SPVkN7czJ-mkdO^{LB(Q7qF zOUrc-Sg@D&d&s#H*PEM~ZYvi)+d2OFkg+lT$e6iDq3Iv>N}`=@E8Z*Si_|UT(+u5P zQ}FNai6@+OW3p(25o7F~bO_zOQMtLX>8mkt7tRS(v@+Kd2ee~zKbS4oUBm)OP`jwB zj9*LY{4jyQV#jQ8aqaSr9D_exIyMCZ0LzN2g686Axk!c^cJ(!gX?pW{TMd(|L-mHr$6p&BPIAYFlT7f)NcUr?$=g$B=%U zpMjlWPKhWf#dIPea~P%g4C)Z?vg(RPNlATe=v;J*%${?JUEnqT17(0zi5M#4-NE#(v{ zqEUDRxx$zO9#H*i$s^lH0)V;%{Gul77RuCiFVhs&%9&<29j(80wj`rd7+||5h9TU- zz#c9NdBE~*b;28)uu}x{kVDZfsnyCuv!-gGn@s$h9gscxmkRPLoNJEsKb)A&YL6DQ zVp-wE8n6EDdB#bIfkd;k zJleV591dA)4o4URWDPSj)wj}!bD%S~uhel|5|1?x`i7b&l?FMLdY*VmSYCLTG#Q0F zhsAI%T2HljJamt(u;d1Mi$gR||7$PurDXyKNUxuh>-k6@E^+-_?6JX+b`A+=yw9a$ zoVs}m@7q_pU{Kcl!6F@cla?4ccRzi?=>Y$Gk|q=ut@u`)?-XBu>pk&nQUAc%mLA0l zj~f90@XvTcS<=#?Do97q7@4X-6Ce;nI|>d_-dnT#0Rc0xi)q5DfZJav9U{yJf)Dgig#f7Y@lS0t(5(MO>qj>(k1-?2704xfn9KDA?>u5>I_xB_D$J2 zH%G@vf2)woptrlW#*y9J)xa3gz_e1f%4ki`0^><~_r^ivPFa=5KMO|4K^T|C%z+>k zwDxUVfA#i=C^@+{4YFo28Qu$E@UKDdaKw&lFU)h1)g5xkXQV-B{kiq3b6E}_AP8h$ z{Eaz+)A+mF;2oZsk-?pcu#g42!9O=`2jU_aA@Bp1>0|fL1bgu<2(6~VU;y$91$I@ZX78k8df z;2pge_|=2cGG<^F4%)G?kCmWmQLXI3W--JA5UAZ*DoRe(yWEZt@0%<_ggOQl*2#YX z;4NF8ENsqWM_3qcVG=AY??7Y=*X(pB-1)VQbvIO+`etu{aQy@Jx44KKU4?JWeb~2V zwQ0oHKQo*ik!ehU#TRK83^Z+fmHIlu@guTqScO)iau9xary_5j*40*+pcFAU`K^_l z5r;Sy+&8a(nAxl1Z2K6>!-Q~@L8 zK^oY-_U#r&HL)F|r;bKtdrm06XpCL~)@sx>KAK*2$@OphdR2yG9>T?#2cTMbU(T@Z zSjEN^B;Yo~Ug8|TgFr1ix77Or*b*IFxS2^pEv+8$WfNm>?)M1N5Kx>eOx2RtN`J?k z2m$)vB?!t1Easqck2OE-h*0saUlJEMo|^fJoMmK>`FRhRZ#z8kBYu&6@CC@%BiDgG zlEFv%%3DftX~AJkGBQbw`d)}z&Lzm~(Jv}QdtKcF1o^aH_2p{d;(4J5fS5n2W@wBu zh41oCun`P3{K3J3(DTHyn%_j0&VF}$Y^OM$1%HqpGgiTQY& zif}rlz3m^}3x!&2hmxrkgU9zfhOYS(HbicNoQbqevZGMco{3J-`hfXEdFnL52{+~J7ThKqiXIQ7Oy z>mY)`ej#Ot0wVfrOQMBSv)`zTg9oW)0^iX`p40Y8o9~n=;uioBA^Zm0?hvsX4za&T zLO*m2^e!s;=i@RD?!P!TUG1cyq;kudF`2KuccJiIZ;u`Q;@lOpA5rWA*Vp=vj}&(B zIr|i2igWq8VUncmD$XEWsByAdCzt>lJG`djp&F~|!yuY(Wj~?zr}xcXzD}7wPMf7p ztw)QJxQ_IqTY*eaUD4{|QftA^MMq;31y%|r;H8Xkx-m}H^}e(A2J6muTds7t8!9b| z+Y(F!?GL3L3D zH)=ea13sr$VC9=!=$TjH$lOW%D; zQ5FN?LxiytxLAg19QavNnB!B0CJ&fQw^ho@@h(n5;WwtZMBhT((d;MU{TjE_ID9GL zY_HH5z_7X>beqpdU)shN)&l3wo$FxEActKA7$ej$bn9?2iXv(3yCNalofAT4Z zs#ghy5oTs1(fU)iC@j@Ja-ID9(MJ9NL2{=NacE;kSH#opO#rs|c&N;B4KmiFw!r!Z zlh=pzcP%6-hwiM>!_%meo{ASVJWq_XV5U2;$P{+5@6oDk#M^)9KHYtbS4_5A44=0* z=55lMsrehr0m7Y7g;nb9_emRQ(7-Jna--q+${lyZ2E6Zjl0V)ZtzC1CkS0ES*fwaw z_9-<(Mxyzd-0c%8Up{aLVAJrZQXfS9_|#tF5Pzv!gkbZ$&WBq{SW?nvCu|3ZE2-K6 zO{oaSm$v_a5JP*GVqmRt46gC`4TlFZ%wBJ8vlWXSCeSr?XeaRR0~FH@Fow`CDvaEg z<<8$23q6s`V9beLxvZ_Sw%4zsa>PeDX<~tI*v~S{IuHqm{Q7tuIkMj_9>WJJ>lLNo zL*L!K4jZnoRNeDokOzZcF^ygm_I*W_BL~I~d6L|*p`vW)2{dRD`D(S8K(*O2%@N@iacq2UD5 z2kqQPNa+mhb)-LuBr6FEjur1`>6GN00g|;1=$}p(>nsR7qqiiKeNg^rMQr;;aG@o3 zZuNsmTrew@`4$q+HN3Swa&4$5uBvCZmybsA&=tLfcI9?~tHS<)%;py1p(VHk%%0>I)iB>}^erEH{?FE8X+0mtz;h+T}{k;Dj-5 zZzw8V#-AUPV}I2MQ3qDHqs~-XwV%!PJmYTia4=&k6q(FNnoDYFs?AH2O?+FyZ(f47hyRRj!Whp;_nYz zaG!IJ14~YfrooRdQ)n%g$wd4Q;Bx3u##cbvz=P+9{lXnnt8;cA-IhCzKYR}OS0bs# zkv*Jd%8Ece^&QNsf9A}!Hyp+M$$REEBzGtmm3c$ZNa`Uhc2+u&X-r}uv z_6L^H&h^qVIB#IHz)0}Y%EJXX5(`l*IYB8WZCTw^sb~zqK@5QNAALp8J;3K{W)(hi zt5=fd*J_4;WV^)+d=EPd37cFehpNT!>q_tF;$U%NTLyckDfX!ftC9Ela8~U_?}A8s zXbztMvPDO7cDI*6S#f_+Os(HFHES)t`F@-7$}#q$?9-nQJrYm7+rq(F%)@;t%= z;SWIwKEW;RX8m2=mD|y8HYw^i)+SA*-|Ly&>|ax1c?;Ct5?C28>aJD^T_dT$%aUAc zg9JW@;*r+hDg(YyScrVjgsOR>4V5^d7Ez8oH|E zoPQahr^Q=!y~mS?1eWRQNQAULQnZ0 zlR@>7$cB5Gc(7#Q#xIfdY?_^`;7k`SWCA%M8}AYfh1A1}CUG|wjc;p*lg4r8VBo;n3l|iywDOe-F>7O zPq;UDn4LR%*9yV5e8oM8H1f2JRGK+%uShVzE8=On)+XyGjs)Y5;Z!8n40h)uhz~LiU{TDCT0J6B$LotmRRhVAb;QUYF>MaP&7!V^~O1 zww-}|>=rD4^bvhz&dYo+{^V&W8*T-0a-g>1LP~-AvJ`TT*N16H8Q~#nnA3?Xaht_8 zJY4Cok4Q4kWL_AvbCvMR`P3Mq!HUh`A)Ezfyx9}5o-Cf&%e}x^dDWI>W%B~(HcXb1 z``;DZiGcSMb#BZ#&BO58Bz;|K1|&8pMk5Pyq#(`U_X2bq@*nWZxyCvfdek1|58Ly4 z6l7j96tBoDg* zsEoFqUU%#J-d%Nme#?J!9rvUJMwSI0%Cu9{9py*tJu9`hY&#N?x47L(GMqP-lAWc$ zZtZgM(_G6G-8kxP=jxpUXbd2d2OV|H`OP&O+Brv#=6YNl;l?(fFr$n$*grZ!TWA#+ ztQlj_3~+_4bX;&T$el!SnHMlD7xK4MBYTmK`)coim)>a9sZChYr_$habobZWS9zUG{8__T@9>-(RA(Fcaru*V_b2ou53l|0pw-)tpci{m2xp zwuc^7FYC&_AjHz8bbk1=ayy!pZJkqDkH$H2?FTBjZO0-tSkD$+u((i z0_i5H0Z-xntJhhr! zu!36dYI|2QC|h;9_s;lG#ie~WT{lT2O0=^gv!=Q|70*Zh;YlGV%#!N^Wq7@5g7l3T zv~+eSo@hz)A38a(`wwd6Xxb^AIkok$t5(1WHe+{TFv2{tQrb={3ytyF9Q?X{!D z04=s%drgEmkWb&UEC}JMY6+yA91E8w0vr$6R_n{M zEXQcwi;H9pI0iJ-QM_Dm^0K6<3%&tk6!Dq(@D_{ga8JIG(0+*;`ZPteK%j=OSHhLc zh*9B6nf16Vz%!qsn+ZWTL6iUm)(l;FcOyMIZ-iNjSKxz zd(4&6wq8KH!5Y-cx@<2EQY0PclGMW5Pq9<~*Na7Dw zi{DVp;G$Vc+Bf$-nao_QXi!!psG8nq>7eKIv#=!c&iFilW5HP;MHd8dM==)nmgWsp z5(Q2%DBF-D&faHvTPsaO8e+FPVQ0CGIE0Pk)aK)Mn2v!gsBS2}5M(Vc3z6f10?NBz z0!3>KQnsQxO&`6=vW*yFngfS(zkJaHbtGLc=X#0rnkZY24G>$`MFUCEe*ZSCSgLe? zj*2OZQ6#`!FYKLD7?%oLKK!koiC|8!%41BqzLb3s}s+&}D>7z5r)6|JJP zTvCr3WHCavV~;=R3@7Q((?T%~ZgEC83$Nb|{8Q=t1{aCz8Mfz5*>I#@Gl4A11Zjpe zoLOV9Jnn@>fjHsOG`ok=<(ZlV6wc=dJCa;Lwj*$~Q1TkI@?_*GtB!xTPMuE-G#=LG zD<`u`>MEOxwWW!2JuPZ@CNAvS%{Q6h_=uA4RG^)|H^}K~d$?!Bx2sB|)Q2J+saOA) z9N{H?QPj>!P2rR#sz|$U+;C|Ijr`d++T;v^%YsE>ka$O&KJF@;$<^(8#>MMwV`6QW zbk+Pq8CuP-O%+=%U9DuX^36_Fu~ZmIzL?1-GB!pbdw_2+lstH zNK!@ZFqKSrsOS4Af+(`|lcu(dg7m3na9(t{rvmf#>tyMpV^b#6@ZpD;*bT?@E|j7A zv05pzZ!fdrVA9Tq`5qyp9c;Q8swQFnbC$Za2LgKz*2$f+r|uSon~}=K-6L~@2+@>N%T)b)j~#F z_a4V;!QC3$A++(_5aYHqdABHzh9+Z9N|5#m=pPxb;ShZ>=IN~gEQ#ep9V_%H{#mQF zm~=9??LfljtS}v;_u(_|f}Bl8#v!TG_B8l9e#P)7{*FbDWasz(EHSRZeQ!2{$%B*3 zwNtE{Nh*+3C<@rggF8cRg)da83JLb5YG8y*Nxl^UVqyg-xYqn9SDr3yt!arn^lGfL zgSIRLs%GiZ)1(ad^yy|x zFE6JC-OJElJ3A(j`#tLhQ;V*iJ`HlY6*LIOyV_+q$G0c0Y+I2xk=B?^>VJL4OxdW>0&XcAEWuf zIa4}zb4$x7cW1AzUFtW-e~L0}7mWv9W%SiC;*5O*Vga-B;%%Wt?E&8MRyfm9Oqyu+ zB8u;{uCm2@rE}p`9;DDPu>(SQ8DHjgcfg?{5gd`D>6W~R0|?k zCXrS~OdXEQ>V>Gb_KiN?w7ICQg+KewgIfj+$dM5iEE!DB*Uf+0)|utS79qx^86^R_ z)8D$D9PlH}g!~@PS`2at9CTR8_O@(ef}pBxiwm&T(C*DlIiCd^% zFzrrF!-~;a(W}hW^UFckmNbgxV%Q+acsOFkx&Z6^28hacIFTPo%*ubZN7hWd)&Wxm z_FcdE(;cU*yM!Cd+L0KCLbH&5eis*cr72s&WUEJ1jdK_sZdq5%MrDh?n6A@B&|mIj z`I^z}kL05c!sNdX`?TmuA=@ldrv!JQQjr$B=!yuRvuS1hJ-RBt9}GY?iZ1F!H`V3+ zyiC+BTV^#q#h>erU@H4nh23}OGgnB#i(uD|UPw2;b z7jWHrso@E;k5u?3H5CT)lxs~fR7H|i0X7IBo0oSixAZ$N(R^3p)1q;k8d`XjV5gQY zKR>J{gragVTSkK7*Vq_~g7n7~MI&1-kLNPRyD`R74UtrdQ3DRF#PniTs~%&% zf^i0psNnXj%3t9Lj|WQGD|Mgj}24#fdDNd;Y39h?BdA=ku&4`Z=aNET(1*(@qx`U2X+z@=$G z0@3JPATX$S9DVDGPYgx3(syFg?$}){&^)FvEO4~>GV1{|-im3Q4-522c2{NsP4*N$ zB0&nnNIh(5o-|4_p*<|lNBG5+nE|LeWRv0V7bZiFd@f|LJC2=go8@bZJH~bC)8RP5 zq)E!D=rM9z-)a_^gFyUsZI_lU8NN-7(|}P1%a(+9`L|-Nga_C7OdPY8%SdIIrk-SS zpq_F}JQ&yOR{oMUnKI(aYU$bkCJzg$Xb@ip>oT?k=PtTFm_)J;nbjHGoY>g-D}lB@ zfSMUn_*XHgM&H}%Q1Gt-YX`qR4EENO-NXd%`^Ku3w>2~EpA<~ghRDZ-OsahyT@ zsFu4gtneA@`pSD^_8^kPpxnt&^^A0y8^KT`>wV8tP2Q&aK!!O!#9P9Kn zbC4DMRo|LVTbs-Its=-aeqMc=FkCHisy(<_c`9?qqTCI|AbjLiV=7rkJf9A)*Yjp= z&5#JeK1H2bOlUglsRn=0m;2-Ouz||pZNGQY`Qf^kTZN*ajUrvk9<@nv;X)co~)BSfqJtx2*umY9Xt3uC0phO+x1RZ{vb8k)pa&g$h6>mWyvzwWz_+y=R z2-b3E=Sb-=fMepZjq1>h58kn9m5_f49!6!nL(UiGm{X)Y4CWF7CDr$HAw8*Pyf4eC zn41eTQ6OC+{BMCGiR3wmPiYVvj%cBvNh0S+BGdxz^*I~*(;9qt+bbpogaRINZsHL? z{nW4KiaLF=ZAdFaFgNS3TxAhs;`!@#D07wY&>a%lAWHYFs}d1SeI1X)H!yWmi6oVIP8WlM}(p? z{??)Uyen&Wna)1WgpD!Z*iYmFN1bZM`q{=Ke1xB6xTHZ;JvYcHUf#{#qa*8Aeh{fTt<=m$ z;s&$b-U7BiQrD7j1<#VAm=Qd~f_@Ul4a+}N%J!#ncx&~?DOBHY z0W5sbXq>-QLAs2`>@|GZA+f{}?k=#j2ltzI53v(q|aV8n~c*Hk_pwpj9-UE-nz zx!pS{hM~?^%LTk=PKHMgg$wxpD4gW3g0AE8BaRIGr&syWrU{F8$!S>G9<>oav!}h3 zqUm@>te5!%LuXv&k3iVu(&>6pp2-^Im2(9|o_!}Jpw_(V*I)WLk9#}cK@%WIq<4UB zrvSxs@m1IaWRhMPb2lUoS;@Dk0a8tXg3zEZdY}g zb^kD`KZ4glB<5S}FTV`QQ6G7H^U+E?CZ-eS7 z!$OL8X#)3!NttrcF#?G;=0`?cd+g@wB0`wY-0m5qmJ~kz>B7!Gq&AprswXQ|T4-)9 zbbNu}=tJU%L=I*cfS{BeaR zwK{iF11k3|?hMJ2XxU?%vm)(AqRc}fDTGUn+SE{j|DGU%By5JV9y`?AP6CZDq>L^%n6%4UX}zonZgzP%}^8H17+ zv>B%}V(emeNcAo8xXfI_Tn;!3aBhxnv>Lyf!;L0%ymsNh0BVmow?K%gU;1lGJe;md zq+ynT`4R~m3l*pafA6Rwv9~3;t?qL&#B5@3*~WDuist!MziWo&b>LH6>gXPsK291B z>bdv0Uf56nJVPCR^*jvBiMXgyDaq1tLkqK73IU;Q6+<+|hjiJk!#OHOwyx`lan;Kc zA?ID_2sDd{>TCwiN!9JmD(rDju=NEQ@62fPl6u-z(qn@~{{x1p8}*22ZDkWap!WrY zU~AV0M1JZ59I6J~`h_>o3und)V@9apG%+xc#Ddr6%jvI<2cpgAwezKJw_TyuGotldB-< zAHmPJKLlPppS!Z3WOVP`6Of$vHtIhZzwg;@aA0IZjlam);Pwzd$P=j&1SW-j~!b_*L&l2o1>~v}t#w9%@*);Qt;r@a`iYyjUr*;^nD9(R55kA+fZ`*S0f-rLkxH3*|ibC4xOyYai zWaWs@+QILXVZlRBalPsgthxXC`sJPx!*qs#+wCe;AD@FLxMn(cmhbvP@`I@dB)COO z``@kW|DWRjv2yv415DIZoK7Rs7@GlQszKE@h7J@@i3u*lB8iOk~SL@-38}jI2Pz1ahrDB&E7Hsi( zTD7x$i;k)4cvIT5SC3o1L+|Os7e0e**3uvNv~+Y{Mh1FsIpEOxTV%hR?(qfRz02GE z(VG$HXZw(jCctFO1lANbw`4kQ4B+=+-!KB7!Rk)MjiKKbtG0cfIZJ<3Mvn>WugsHJ zpYs2jrHQ)&oDjUk5@#<0Y@>?IE=v_b=dX;3aY&G6hpP6Yu%;U}H2P!4) zl~6IJvIMAgBU`MBi@?q~qc2u2nRP-yiv{dqp&e}!f_(6hXrB8WY|Q5$Z03#wrZPKH z1ARr`=HN+s=$iH5Weg2&_#?0q5^rn^+u1wETAUq-kTef5)P=Xf$S{d%_a35q>nkzi zW^zYgZ6+y0G%&lDVf&zJ^_dt~_}0~L!E2|HQp^^bnN5smwcl1w$mE@Ldo<~|VR0Ku zD#Vxl?N@c<87t}NCoEI^pMUPp*=_IF-8_;;G7Xj#?-H~4Y&zkz#c|2Z|0<+kJ+^!0 z)h%it)Ug-~I4*wf5iC3})t~X_fLqlL8TQ|9eV?&yT~fv33miHr(P4BB%7I^QdWhRH zK$@&FrgLg*nt3}hPtMnOj9i14`Hmu~iI;U^hL39!-y;BuJDyZSwTAeZs+QCV#3@Dp(dN^`?^BGuRZM4Er#j2!lgY z<>GCqu+6Ct6sfAY`fC_g~^i!H}(Acph9D4kDABw_Ksmw(MzEcU$Uif@S0>!NfvCkF1f#=R%aie|Bs$_x@wy(^3i1T{_NO?LTr z-_*%YX5+^9XAnGuTB@fTR$`r2>z^^}ZGWWc4R5aYlKcoC56*HoLg4Mb&C4*;X$_F< zB(ju@UqsnD9A$ZyeCL*0t$-1x5QXHug1zDZ;w7(35URtd8CLXJB=|o>Svl z93OFAGv|m@DvvKXirN`0o0ue<;Kqji1VJhqI*1>v)WR8h_3Q}dV_!E5_85}5*%aK& za~Bq%ZlI;74I0av$HwR>{F5qUh&Yvvp5Her{-#czS=XkbIVONJA(xdHO^Z)UOrbXM z9W+2I1b2|ONDA6{N1qOfhoN~FSMICR4^WB9CR*j{Hn1+VjW>$Z#+NI@RG ziz&kKkK;a>?m|GB8uwC=J+FNIJhmd^`BEd8d__?kimg2-^xBM}*XQg(%-3>=J#4)d z*-jRCFX4dJvaIQlnFJx#j@)?&562l zrr{gwHns$i%5wD`?9hNxrY$gj(Sr<0EuAuYp?=l$jp~eMY={+qAoCJR$g%F#Rv5F| z3UmuWV&Ev4j9Lcx30J?tDIV0N1B|DSh-ACle>o>IkG$p^~4ME!~ zyn+!%JWkpu__^XizU&OslS*dd$@7twlI@Q23)aFJ2$YfJ!lNYOCn22q z_Ixu0VHr&~6vEvT@~RT&dntQ7${B6H$nQ7fXJEp~R5j`wmX+%ilR6Mhm6*>DGs&%C z*YB6GVNSD=zh5w-Uw9r|UI1yu{pqQQupEp;pZ(Pi?E_$%z_UeKpEiG;sF@JUwb(_a02Kq2ZSp zBkOBWieK}m$i#5!-O520av?*EIu%cVM(nE6b_C*!`|>yf7$YsG^GFe23^Ywoz2L-efcC%I-HB2DPDgis0W)e zRrKk=>#A{QHVNw^g_ZE+sn@g7-_%4#dKIT0DqUq6wOPq|)MyGUD}qL6)ih4@L@$-Z zSr~|B+5Pce?7EW1>hcr)21PL6eh13N z{9*!bL5?lRh1(zfKyB6-+JG5+^_nZ}Vhsu1&BZUrpu76>iucCvGqu#D^DO4SQ!oDa zh~xiMFS!0yjmRLTY48_iZuZX?EebF~kAnmG#4p4Q?*8xa|KGgxS=s*&>WkVp4|}v_ z90McQP9;f*K=wJt6<`KCL|^b%L^$%q0B&r!A;T|C2!iC~PYXWP0CKGq9_w%$Z>eU5 z(@T4~j^(!eE+)OZAAGtm9(=cUg`EzC z@e$1&o|`LUbeg;BlvOFqb=%ab%IW#y_zk3YZxf!`1ATd7B+)Eh#rT};V6*P**j`;$ znR=t(dzwSG`x^}y(py;1)Xf>gg6yw=coZ^q&J|+iGs(xL>X~kbO0>swTlC3Nz>l!0 zYV;#4CEP|3Pgw0QSHz&fA~j#_ge&qEXLM%PX82~b%?TO_S3zt7nnhP;2&uUscgS~{ zVN4oK!R<>yu0;>I+p;ovMzS_}9#k=le&*Km4GOsRuAkPzH1}?dz>f&-5tXekNS$aL zz4$aR`(Y~JKcOhx%?Mo{%?%`q(e%k%A5yyajEuOe#94yTfb^X=Zr*Q^ytbIQ4G9vx zdGGaI!nEUzzLuJ&)|a|l?PH2US&o!$-VzQ?#IlyDPUwDCxFR?)?fByuFbcTh$6F{FU1MYQqBdakC4;!MgZx)F;u*gB1!NcvwRZ?Xt z2v9`~L^Zs)@!`C7ZRWeWVNys!fy`L+89Nc7ln88(6NPSu!CC3}ohvU0xNV1B4;1&1 zF3Ox=qm$4f9PNt^l$yRTsGpWMLRIOP%#7cvm=GpMsHA_NpttU{8;%R-M(WymA_}c` zj(PM1>-C3sf#l8Xb2nJ1zDWettDwhPQ_VKgmBn5fYl(=Okx>^{$6-`{yz z9>Za9xk=i(O+r+ehSA>^J_)F(*G33+jFHl+q@Kf4JwON?&d;oC=>L+1jcRWZdEo=X zmP=v-s!056Wo2cVZS!LqI@K(2!~CPwZRg99#=O}%1wD&=>w1*uIB?R~Tau6hEzvlg zvc~i!w+bvJE?)`;LEKZ9>gt`agHbHhnGwii%4_YY4bzb_XTE6a6jCmH-}`;tI9a=h zvy>h#s!#ibTB40hgtO*H+q$ZV`}-_pwq9)IBO zUgkuMYa3@$gHG?lvY}Ki*jHoJ*$gv5#Nq%XTV!97q}tp@h+v<;GUoH4+eYqOrOD%w z_G@blf>sVFTQZWAqGqcYeU+Cf5)HwNmbamt+bh3bY9 zjCKeJA_Bdk?1f+Mjl#;+^LrgcxBwS17BbdH0>>R)O>W@v^Ld9F^?c%W2?c6Q3MOT~ zgd?d0U_+Tm36X_cMZJ+B%V*nfaZjT4j z+l`RZbIVVj?E&T&RqI0^szI7Qfu5BXrVFb2CseEVtNn2(;1HD=EVu#w-E9>lvr<4s2fvIKMjE@aC zW)Ia!C-O74BMG0S-eOe>d1?~q{K82kR9|_?1aHL@Wep8a^in#x3w;4t za0mMOp-?I`cbYdrgpi;{C7MyN5ToImoYUhi>NoUplwuxbQ`*_R@rK~0l8PF&4IN28 zVs|u9CaQgqFT^9zcZ~}KSBOxQ!Nd3XOpltFGS-;hu`^k@J5i`LYqzFlt~Isg>(x8} zLC@WzkOH&DTkYOc7&67z3|duF*(KQ1_LI&S2A4CDKO-CMqr5ec{AiWJL%C7se1gBA zWeW6qOPT2Zl-gIUela8@!+3=^y6#oIObRtJCF!d)T{=8Vj%3I9Dp}Hhc!|%Wj4}JY z3hV+-Uql%6Ci*Gm2`Tet-p*a3?Wgwl)QWW!fwHL8uqoroVeRitsJF6GCd*gm5FMr& zDT5J4T3teD(@c%!;)R-el*(t~i(XYcE1C`s3px_+<9#QIrb#Dw_5)YrZ10Z{Jc8>^ zV;uJi4LzjuG@5uub3J7G?3~+#L(uIum2WPVYh&=S3w#9FY!U9oqF0*e`;=MR+}WT1 z0Q>AH!os+F6{r!oi%CS&d%L=1$IWrf?jE4z zCyPOIkS5H$4BaYsydKTg*|#$2A7Eg$qi{&|5ny;bC-fV@LUq zTc}QWOWRQby72s7;Q8WOFlk;yk12}*EU;86iQusMDR%zdTv~5n$PvTl$mZBjP~z}r z*ni-I+;bn$ZJ$>&jo0t9sqF>LbBpX+aVKx8TU5G*iuA%7I5+F#Dq^pR=m?TaeXcOcm$c(uwth5dlKB{Ki0T&+;WM zc~E*lm6sLJvvR%a%PEXr;j}MYx2wzcl6pesD-sEV>_KNoX%}I>8Rm4P$H!-`$T|^r6liWC<>?4i^;A$Z@w77G0Z<7E!tuNFy4%>>d}1SZx3RW$ z;&m4wH3HZf19?Bs|GLdYO8hqvXDb0xk-rLvHRY9vMeQ7c#2k#Q3?|IXEX15_j4W(C z>})*r#H`FL%uJsZ9IAVTBfGMwvn8ZIC`@9k$HFtKl=VfAY zb8};KV`H>)G-G1n;o)IoW@Tb!W%z_(aPqKqHgadMbt3x<#ouv=0i8@7E$p2w>}-ku z!f9k|=i)3tO8Qw&{7*`3?Eee7trO$lsA4p+vte>KvS(spWM=yR0n5w(@8C8z|Dy)L z4klmP*hk*si!p*?WYRt)C#KrN~8K(&=yNM|?I{^64-hI}W zwsmqgvNZwzSA7;96BZL5RyGD>b`ui@_D}O>25vSWGXondw-Ki)I}3n=o%?U~0Vcer zc8)eipW|v|Z*Og3V)WN=Gkx;wZ^fTSUL{96fQt#x@t?A(^eI|$7RHW7jvmB3jLd(N z7h?WTM9vn@*1*4{=O6MTV(t9*tG|nrwZ&gT!)tA1YbHSI&HwZ&^d z9i4QoO^uDLfR4KV3xTw2D96!DmlQfKE;pwr09U1}dLYVB`ce_>?kT z4g*yq8+&UYvB-bZKrH#mNT8E0kHI&2F=b*pVlmOL#4N1D5_XPmMvj2LkT^Oy8#vjy zIGO-;U2Omk1^^Rd1EBlgFfls0n(;IJCH2-8{~+maSO27mpXpx-{yXHqJTISB{(kTC zIsAM9F#XpX;2#Y6i%0*Dw!e+f|D(45*6e>e@E;ERhXeoNz<)UK9}fJ7191O6rvTc1 zP9)s^nlZq^{kwS!4>2>bov|eXKR=U%g|#!#kx9b(FSQhCVg~^JGi4te7x(`Na+RdD z=C~$-`svCx(Ju(1jn=nrFKtu4bj6^6KU|jvs94gWkA$Eg|CtT}EWZzn)c5rGWi?yrGbOp&54q`T|;ek=A4vj7Ew;+< z&NO*kQ_^Gwv~!O1b)?mwzSUe}d(h&Dny3Fdsbt#(GOox(>nfUv!dCUxWIMyH-z0V4 z3oup=yf~glT6~G3QLJ(1inyZU_wKuh6`yuszJO_J=u9wA&n&wzOrwvFxCPI9$e@O}J{7b(yzx+Z+2`J&Fqy zhX{3mg7w1*0Y6xaP09(0sDEyD_zq9(&|3yOO^{+D5UqaNR>|Pam^lm_*d~`Wud#_L z7;(t#u!s=`SucXKq(3eROnD9Bdbp+OwV-V~{MIqE1dYDwCo!7EqO#u%rBd)e3gO`) zpU(YULkabRgMF;(5{%r=#eqerh7P;ejxNUp_l4W`z&4c*l{FPcka)V;1%hISl^@G^ zygF)=ayc}D-I>iNzj=qk-owc%vZLkzaxUbijNsy|?jjX(?jBwe#c zFpS`_qb?vLW@iXq!kyV+0F4&H$b#3T>#iV|k&X8A9PG>YLilxl*Pu`vsk11W-orCrPl_pa{t; zf)z>Lf)T$v2CInee@hoV)=Xm7Vmhdp4vAx^KnhFYdlQq*2>MuVDp4Eob4$>7kO+~4 zI~Rgm57a;sIy4P2oEQKa+AP*|e4C0(}S&PL0|_#g`09s)pM0D49*z@ndS1 zn`T=ruvDs!M;H?MleN-CfnSC18FeoEf>qOz0jSixn+NmdXo9x=yZEA{)LD&1S_JqU$gB#aRF=uxY6{592`+H~qRc=% z6i?H|?X<=;5Q#(2>~9puZ}DcRS;g zVlJmUP~JI44hJR7L`;5{O`$1Vjp2q$V^G5@djmg9dlK;_S8Ljk0_isV5!JI2f~R;F(Hg_y5^Ayh z=!~9q;NBU-29CPbmz-l*gifrhjy~wMPl?8YwQ> zAxLJ@==;F%0*br=k=*f<)k_9?%U2>(Lj)H72p8*2Pd`>PhlBU;En$bZ#;XnpZGBR@ zyOoA+iOTdI9qa}sB86tUph_$^g)#))B9j{GUYlID!_6RHkg39u8h2Ev(P!E@Vv2(_Dw?i^pGC6z*jQc#+8B49?QitUBZ4?Y;kok(XD!LpY)}X# zxHgaL#`3achMoxXrzopD`S?j|^A=!~h(2Zw{`yu!AxDE+^~!YjcV+r$aZZY|yO2xBuHd&r7k+Zg>9E`x+WBMj)C*`VjbTfj;Df2?H`@@P>@%gR zu9~xy&z-(q*Q|K^F*7vf#;{N~7)qCX%z27{#0~|IPVkhKL5!pVqf?77hnLEaI~Al3 z-$VAgObd^fJ{fkJuBhHc!tpO!Ku*VrB$f|xe}eBnjY~7Bgt? zaeAPU)ttITtOk*9nIq7?;xZ=}p!@QWd2!s}BP?zMJ+UqbG9GiUa=g zI96G=URV`v$Kk<>!X+Z+b${7eJ{nym#4@o)kHDrqnSW1IB}*H>H0R`jP&ib*IGP(v z;OTU>t@p#DC9e-JE?p`rFBTMT?n{4`?Gl?(hX-)j5CF|BHM>2hyaC1winO^_JvV!cGn?(UeLTJ>qBWS$YzNNlC9p=Gz_{pY(qbA(7T>c zE?O^&t!J8sXEP2U(P#KNvK-PVCBm$^V1G-#OvtT!ol;@-O(Hgf$scr{2G{m;r-USq z@Nq$--^({w(G!BqE!U ziP0iUK)`Ao1V$?VY^Tv*>U#Vp-L|WN*j)^9j*aIC7}Il~s&@E@5CVJS}-DyNSR}>=be!3_Xybx zD#Xu;KlvG+b8l9sOggs)Q@`05`C``XFbGsDFZ2~uL&(;kckCJSxeeqa6OwmE&EaIv zWs5KS?JE`TXr%_}7|+dhvbrq^m9^2fq0dj4N$~8bs{VQt@^S98MNWP(pl0K94GOjH zyp{QRDV8)nBcteY+1wTX#uiFJNiG0UhNn1=0*aXxT}|pM(L_i^mR81JVJyy zo__PVq}d@~M2AHK43)ggFPW1ryOHy@8(vpUzYEshd$;f%R&-ZO+w_0|NMW9DGOrl4 z6-g&>f0)fG(4UR7&pL+$app5}u_5K6pwFay%xF0FR{j>o@>F!2uFN)tQYx`m>N4H> z_@zJ^HakUSt6j^_{q?{oS6c#Ipuj&)*=0E!x6<~w69>pJ&aQpYH%KS_uQ zz*(`A*RwOc6ANu+wBUL5_B74dMSmCnc(`46rcCXd4)f&`QAw)uNHk?+$rne_8m$}@Lsn7U-L`Y`G`tx zzqRs#4yCMURX#w4vKMS%j;}Rjj#GAZu1G~g9CaI_G-bt}=|0Z0+>c$LAs^ z=YC$ye=#0r{BJD*XJ%sN`1=odrW=78$DiZjhWE*$qbEDBFJghy>uQy8C1EsVSL4Rt z?2Su_lOPENAdnFA1>`VE5>!)7(l63yVf-~>B`8irAC?~y>swOW9`A*J{`^HOE*+1V zkl==lRd5w&BClRtkx5OOMW(?rvN?PcNGBDH_2F8vB3^bqBV4VVQdm242?49!wqb1E zpv*qY+DKXBmbmvf@0rUY6hcVKQ$?fyQT8YEg8cVcO%wT;MDvE7fO*5xxS>=jX%_F2 z5(=U?K%>k;bMq^jFE*T%PC3SLB`h)HkOep7F|D88b2UJ>Rh&r41hO{@$L2j=SaDY+ zNkGv6Jb$Wswk(k*uu!0vxs}pD=x^>hB{6cQiMV4Yd5J0)dL+eIzCu}POY-w1=r}@& z(bxvNL@LeEA0)-dGnjHiT>^#x%xO(lxclOe-i{FC1(1#eAj!ZO*-~l@wZnIPJGs)K zABZPf($vgGND2A!3dsDQ+%w{kDUQbiC1MZb%R7*;40XshXTGFmz8*uY^kG{-NY_tE z+Y})?(5fj(O^r|O*%zBWInXH+HI>#Z8BSD^33d3_F8Jy`@GZ>V0szQpLm!RN4mmCV z4Sy;|+vVF5pDwkcc>HF_7!vGLsr(ITIPv zk7fu=!L~eh&V+o$kSUG$varWSu^N^GuLc#61qrwoB3GlfW5-`9#z9U>@fGl?IKE2; zI@D+j*%~MTu=4I(4Q~oo82NXcDaW_pkfi%_mXUOV-U)H+d$zm2PafOj|9gKSb&=~#t?M9$4 zjYFjqIl2Owg0pdv+VM)n#Z$1=p{L7#ZKw^!k;Dk75wuGCb>3RQOcH=R(!lRQD6;Jd z!QIEuJbcU7(w7O)0g@yTK`klEzKy6Q{l(60)GGE>PpGivV_RIdj-g0x?{H|z4pmU? z5PM4=FxYoNqO%2kcvrasR9hdLezu>eT*>R${#n5!S%u;%*KmJZ=p( zLmbM&uTR@W#CN~fcoN}Pv}Zhjwp1+iE7TfvmMvY3M-8T0&JSH&`P)Bi7a7rg85&6$6*~z>DcVqqiDXvC5eOhyeq-TC$@NrzX3hC5^k1^p zC0nv$bAH<{@$U+Wu-P?Zd%RX-__1fy z>L6YDVqsr&rO6)=4KPUcDrKTUgh|UvYW?C!t$B$0TUerFRb%+u=E#>Q(iB)BnwwVh ziFM7FQujbmd0-u5s*foYMckxVkS!t^&VJZMlKqUAA5G1l++ACCtm1qDFw)*OB*9|- z;VUw-xW_|&pl_{SdlhcTKE3R16@jzPShAkAx9m+b9@VKFt*KttUsmG8l>^8VpV?7H z!I!vVg?$l^)rJnCvph;nvGcY!f4tX!Oc9pLsUn`S&-|*BA_5D z%2U9)b?c1SX7%EZk@*gVK|_NciDvpUSJ;SQh7R3W&KLM2C+&eeKatZx7Vs#hR8~Df zX_0@IT5eETr^IEnv|LDu{A~Y0x?MBatxA2K#{2H%rBGGzPBK}9^N}I_AO<3`n!^8q zfYJoBxHKcUIMff4FXR2AugfefJ?!C_0jU9KTNoU#6INb4Zc8~tluV8f{cgOawVbmj z7EC$>61z_IBrryWz`8A?Y$@8i7BK5Srd9Ox9$<(Qz+ha|qbqWEAYolsR|{O5VFbp&_Ca z%u8-_r%tV=2P_m5f+UJKx6&vx#>e99o5UYNl|PhJz-L@9LO3WuRGFGN$8{T-^X|Ry zb!Mz`Fmo~NkKl07VzS5>oS1wL>XO;EHaKns(TV~ftNHzHwO?R+j5O%R>%J%mz z0>jW+s=jJL!jMA7^g(jywj@!fPSVoxUYux^cPMlY`d}mQX;!ir8G@q?u|2x>@bnOY zHBir74acWZAznl!$vDUiliY~DIY{g}CCUx%1qm0DB+s5|FvKU7+@5TMAmVX0A&{4l zA^g&r_eusqbMoz7?Sv7L8_-xDRb4zH2bi*>MXV|Ur6Q+9<~u~S?UOpugXF4B|D2aytiN2oxQC2$}>rR$vS^I0zQ_HKvH^X4$$yMnU= zHpj6zzEpC}(An@_tbsHMZY3I$P4U9vmyp$J8E7W%_ID8|!yn9gr8qR(CBM{{HYixc zM`a00>4g_pMf2Rx4CcA7yUgoPkFg6EzLT1TYrD1q*;7CmkD9lZUo2zvqBWo4x}4)| zVmxpg>WJYl%%5QzYtThy7niDz-+yI+Diew=Of)rZE-SwCw;}Xxyk5ujFYR}5$x+l4 z^<5vbZeAK^Cy~POcpw@@a~xX(PG}`%c>6Kv=E60EJpE$3;0i_I7+DPundqIAQg0ZA@4xvSV?w zzs5yY$ExpBp6dN`p|Sjumzm4#ynTn-mUZP2M8PM&1L9bVLap|jv*+t5oX$u725zrw zj|-P@YA^eBraWM}GEEVm(=g?f4AHzo8N0p;Ax2JpY+&Nw=q`F ztr+Asli@s>y@r1Aro7pe>qcS%umk4#O75Btn1*fy_*=16ZyRWOd{VKEDWdfX9f6Z5z zd1Pqanin&fcs_=@?7o1kT1imUThLAOZz;j*E#rM7K=<^bo+(fE9p zzyivPeY5ECcw5_lVLAD7u=W9xiYK$+CH@BtVg=q#YgddnM zD!0X>h9#ZKbeFh3JwdsmVnS-Du8CjPkYv<=%|=2OxHmh9l*A`#ISnf4>;AaQsmA`P zIu2u>)1_5mb3(L@NtD_P3aEq=Xhns(gX#zsQtQA* zc=rmzbXV*`YN}DY*TpmGPlHu4*vBe#MvFzv$9}Z2r0S{tkms$k^VN=sV#+=@^Mz|D zA70AIp3-hjTl;HIZ)n-$9Lr!fN$t?Z9x5D;$=FrMYcw8u5^*RKSnJKh5K|0}KYg_xU<^64WdM5VQC{E17W1^i_vm^m$ z7Gi*e8{ZQ_yXmZ8!fv3Rgee8CM0uuq5U7RIBp|pio6o z+3sgcO#`$YeaX=Eyl$KCTN`mEX%SbvyIdO#g+3SWUh?BM=#nrGk?U~lKEYb_{M^nD9K(a&zboagH0!hIoztyH{Awks3^8+e{4HWboi7RO|UV2eH4*5E8;n z3DxrC)owML1A4jQ+M--&@+O5Sj7{HNd$+(Wp3-YJ-+{GjToGp1TKNJqQ{7VKqrhOv z>&8tLD$KU3_DE|H?o6M~$IrBGfZ6GxSW(jS73e2Ys6B{G%z7Gk7GWGrUZ*unHp#^h z-jqs5~l-q8u-JwwwQEL{Hm~Ji1HNAX?=PrJiafh4d1+qpJ(BlNW zNiJ_*6}Yw71vf2=15m=Ag1%E4-VTowC*SvGd1qh!k4w=Si!8>VSt7l=drVU- za+}qz1DAIgsHU-Au`)2E7trIzO;|c*dS|X+I1Z%q=kdsot-0Wi8P2qs8weOb0;7GL z9A7>iCsTTkz~N=0yYH4OGeWq>Ua$K5n*wznU;+?9hN=W_qcL6W`$I_V zIVt5yttf*QP>ud~b4D}(d7Su@MUP+U)Et|3TVFMXg3m!LbVT<+!k^q7IG-v3fB+^q zOdbB!9Lj%hqZkAIKddPTR2z?79YE;VrFf18ie?l|{=mqxw6G#`-ifv3~Q#T?aQ z_|lCsJkJ|z<2K;A4g^n3#8nWx^yB#W{n^Dh|G3)y?hY;$H!2}&ganeKbRv3S6J1Hh zaY+eo{zYOoku(N9qpZ=y&*wd5pfoS}#|bsaxW)1iB&v2h@8_IWAd6KViIIVAjM60q z2IoDI5IyuZkJ-tkn2?Vg0azGk@gCD1m->=&x${UWfB67hJt@_04x_tlc|RMNrG`;P zaixYuJWMFFF52=Q3r09iG^>Cc4mcl0*zGGpNnn;hgNK1@037N``{&@f`hGzpflEx? zsXLse+%pM7b&;VB_!SWH)PzPj98visFLCRJ#5$2BAdO8y+C^fLPAamEBcZVAkILnM(l zKaZgVc>(pdummwo`#gnKQOkdv%Eldedv)}AOVcGYS8@a&RMJarP7s)5%rHCx1fcz~ ztHWFgKtE)u_w(%5Z^=8*PBm{Ux`JEL{YMhmm1iJc4QSYNE|mMOPhY;bA$edLbD&?= zB2i+v7%>P8Rq22;2BKFVqetA+Oth+B( zmm-^!u|-QcbfxU__n{B!8Nuf5ZC6R|urDrPuI(TWmV{%1vce4W?+{e0@B1l<9Y#(w z?fz$cl5^5${9ilst&OyEFT z$v~*1+5q58z7TpVM~2qO^GTvMFU&+$UzJr>Xj6JZ#w>vpK?2HyuDNG1je0jGslzBa z8LXOo--A_I>8MF*iDm&TqDiF&_n?A8z%Y`>#4S*r<%f8B-CEu*Ta7|WOrGfbTv_hz z0djoum>cbjK?A8LnLns%RoK&|HSq{x)x;&|p3u~{G33xuC*o{#$XJ39-Uv4`-~bnT z%?#)XmO}&7LBr0&OpFEkum_M5aM%f!!JfXP-jEG76RH+7AsdGsK!r2G4gjYY)AK$# zC)6bK3&LeLv!{rPJ3uo0qEAk)k#{e>u^jAUOlZMxKu}CU^@OxVGfM{vI-tRH;;cmH zsr1|eb#hmSu*)%MTAEA;c4lWv98Bsbrz&dHWDYcRn+&CW$M+_k6ztAwL}f&FGag$m z=uw}5WOcx92FsqeJLE7hii#_BM*q@JHsx6&5YrNR&QXN~Q=qAQ79EXgQlF%RI$#$n z8({+09%E^bqNZsBAC6yg$WWj^I7nuC%y2m%6{Jc|R|+-0F-R-J!DFIj?mK8LP(uZM z9=;UrR@v-s8=2&YrV*R9<|Yyx#5$n;6~iVe0Ufnv&BM5)&pSvasm`6tt{-wI+Tk%z7H=Zb!$9%2+puqNXX(0iP@bH;g`*zSAJ4Txe3XEDUTO zW{BOE7qVTC^bf!-%)Zws0F%)N%Qlv?Kol5eJrN@rwZ$Tqc8moBq=XVS$3`<-juJl0 z@1Tp7nip7^HFvWWo_J6mxi*bt=6-XFw^5y&?FDzPo4|p|=h|%a-+Tmom~Q9aFKc+? zgRBB*O9uxD?|R)In2{51pW-|{FdN=dGi2|a`NB47$T&vJwYeg45$R~OZ7X27aJP%*Rw2H*~uc=qbIdQ z-22`tpXJD?xeFJrIrPZq=MJU&qWp$IJC-yMzW?5jK}7xv=OJa8fo(Mv=;AkCWuM#_hf7 zMEt&so0n3_CXxG=Zpzi{xyCEOh`FZS&!vTDhAHCuL-?C9!bdQWhP>lm|1$zkS%nGk zDoLJ4_sCi6?HN3L=bj8I@eI&W^c?=Anr;=VYE4`fW?R5r-Pk0>q z9MXHS^E93;Tr1D3=BeD#E0)ZgLlrgCLK#n@pQE>PvQo1z+@2@~sZy!%#J{)G%-S7r z-D~l%@E1^Cybv^L@C#=dyxn1NCZuTK;9Q{N#KB`Va68U->G@^BVRJ1zUxQz#a~HmF zG`w(Byl`~9aFo25&A!#)9Kj>wz;oN`39glH4K?{q$ooxKGfY>Hs(^s~z+|5yAk@!p z6?a(Uf7oi;T5`RJkYdHmW$rw1hEi&E;cC5ZRyEIy`SgKv*-hReY|F8jv=IttYS~NcAZ8N>uvmjWb zW_@**r;cajb6YdH#=FcZ!6ueedTJfY*VpX$YW1U`JWIJ54_gk+tzw6pH`d-Z4tyEs3?PDJf3MWSIPXNjv z#e-Jicv64*O@M!D^LDOX;8(fGQSR9L#QT?Q%N;BQ=84mML0bhD(C9V$BBOM@k zNT6PJW=3f_q;^gza#LSXz-Ty+#%1rXoZ%+mxR5EWM`gw((3S@LtdHXeZ>{H%HVB)G z8#Hv0sJzGt(ChNmc^G!N|RfBFBed-t)ugkE=pUicn#7IAD^#=pVL;7DKLxa|G#dS_+J5I<>|( zg}~FS&QHm6!>JJ=%uX9xj#qxZMhQ9-TJRMEkZe4lB3zJ?J~ zfC_A3k1Ab$wOmxz%hZ2IG}oC93WI&G?w(yGvwi2ctF6m2Zk3R1w4_F$VoQ&VznyS+ z6>Fv$ntVWteft?((sYk?Eb?00NW&l6_oES2 zqYd2es%{&aVrWqUH(J+FBn-;A3w1i`9tmJCirLMS=8Eu^{9Dtd9<)N!Z09s*7Ao=z z@x7X9NKB(fTFH&3ty4~b4GRWvRklw8x;mZD_xCnhH|GYmq3`eA@;lCQY`pC{m3j$+ zy~}HgDXWc!*Q=0?XplOGmaC~WHI?f>4${Sz*7VMK_oc4{-Op7?(%{S{P_rt3ZJnMX zCE_7EAl4YD9@ht5Nzf{E#^;>3HP=WFZ%<$ZZ+*E2DmJKUWJoeEs;4N2pM@kS&(8xT zSfZp$TK1tRq5u9eU10B4(ZyRiR!Qek+=;mqbQe(643nu-bLLpq^=@(ZoZ@G27gr(c z#|x;o)-+2}Gl`QEopxA1#$f3=G5LAk18uv*%H|L3@j&>G{zb@b9O zZU*|yXL5U(de{n0toPd2DrZ=}XLvvp-coykQrgac9y~xW4E37v2=RV2AnkZik;6-I z7NUk;?=yYR|AQ67y}PK;{0&mrp@2Ru^2^1=)yIYz!7OoI zFY)cw2Vz*C^_uNUOkK!mT?4-M`n+z%=YM7b(9Z8U{Jct32Cu-Y3eFLzV|zY?cSQZ& zbDpsFLr3@wVL-UO!=hkt-}j`-`{1tvbbHrH1a%EcXBS<`N%5{LS(~3c4@N}Tdz~uG!MOg@C$N@r0uTtw_aS>;6C2>FY(lDU$nKRil< zYb{?CUtkyBQEZr1+DZR_^mknwo|hI-G`pBnLKR|&Py&K)Nms7KkKOPsISe!t__#77 z;gNH3BdW26cL^%}{#)wShdY=h_>;>FwXS8g?-&E!p6FB8zxHns-tTtM7z5p!=wrZ2 zc8ogQGP!ZN&`+}IL=&2e$-tTj7 z+!l##J9=puJC{EmY^{^gtI`$Nol~4Mga^5G&&EL@j>Y@x9Fx_jC1oN+i6xDtWV3PT1TgH!UQ@a84_9wJ4$&h$_%YSK4I8gx3fY(#&;r7P$P5( z102R5kI@Bg(BCrIeVm;+Od=3N9#)$2C9j>jO=Ip&-rcI7-l3AAJrR zPsc-B?lP)kOpOly-L}lS;F1of?CuZtAO6hx_Q|&y47!lR%&C9|$-!_`Xjy6A3x(Ea zgFa^1!7a@T>XMHdJ>k`U$(vw$0k;F_^hY)q6-2HgutY@y^fDc0=g9`z+U}nDMK?PXEJ3>Y= z5CkMA`&CJ#ND7tP0Xazy7f5rP;3!~4Go}vJ3(!1_VI}SWyzT>YCr+WQE+&)JGaRFB zb*n@-*d{ymKsWRxDnfuR;TfR=5$Q>=WOA0bCV58U2eWe{2#`#>h*s}-U%L5;fj3=F zn$zPeq-6!5tlP0P)~ir+Hazq}v>P_t8=O6}_^bB4N}qEV;s;|#Pm;bQ#^^OWkYFUL z1uN`%BBNBs6KQEtRVS7!t>mP)Ca>W-h&Ev|?lP)e8IV0KJ%&@g#i33t)Ph}YovPgt zYLJWPEfpsx7bjQswe`QjvSE)}>|sIwRFDa=G#cnwqiOb3-tWlOw{zl2!0x>MneeKr zV>P*gUnBnl1O$b8B`%)UKpd0TyzEi?aA0~LcNB1f(iCRM)8*~TAF`{qwo5%`v%V@P zN8-4!Uco)s=qwt=M}i7$^77=gSW5*6C_B|bW}A^{3XT-Hh6~1G#%Z59*A4bO_|9fa z_l4~h>`}`v*Wwk|KxO6jaxsQ_TWf&A65OF-|SdU}{?lzH^pAS!b~ z=bZf}jz5yQkvLE#)H1_79r|XN(peK_I3C#T!2QbgvTf2zbWJNJi?n9H5u+s5*m~l$>ExL zyg-F(oyLUa-s$NK+d}9C%_BsNXub!P5So765@`VlAxG7~oJ$sreHLCCIsX@2d>+Fk zVuOluU?B8{W6oM@optP^-hlasDXh6%!>47m$hgAD#O|HD-Jemu*0@hW{Lc)pwaQ*d z-IK&>1**;SAsRRqhxZ^+gVZ3UyMt+PzSb7v+TIH3;-7#r!yxJtj_q!Buhi=mIydwf znQz6Zs9W#T2V?y0gN{_r^8})_e1(Av#Pcr6n8>F33y*BVq-BZrjW|7Ws7$U|Smh^*2SAzkf245c9BFDS0-g|dSVfA1G9Uil8pVkj6 z0s8}NEx(pQTug=P0tocas5lsPUPrEC@EW+_5Jde`kjF*!)ofk*38PM4w5K zjH&14fgjO&{cF1riSWEP;ySXg-Gt-t;tpAXr}%C>3K92u>d=~M7;~Dz|7)v(Uz#bK z$&tzZ;yK^j`wks_m3hP7%JwRl{oUP932 z9@qmz0}n5-5By0nM7x>2^AgcDA6Qh*p;iKdhHx*4s02h<>hvlQptK((fqu1G`AgKc z*`eBwvC>Hk`f%#Q9(2yzqNa_>xo_Vhez}`lZXw%viuhyaMT)PWro~W8X|?J)pm+Ld zpJZj=kQTZrloZJtF<-hBM*kXpDj zDrCXRt~Zi|c|>_4_wOZs3boTaFxR9e+vJKGwoN<8K1RWBs)W1`4c`VA)e~?+CWn_} zJH)7U>S>6-Mp?Ajw9iU(0b(j%FUjmDQh0cv-z(h>JP#4|_&tVrdB2V=$Aq_uG~6KK zR$UTw-0(*2L7y38o>VB;-;jM%G!AoZq(}%vjx8_7pm>v}{(c)mc76$w2+nuJ@C9xF zuM=Iyv@1kM+c)8utu{x|SQqo=#=B$F>6E||7>TNCp*PR~nA#stn*Ck+yGe*{6@rTW zyxF|?An^TrFMn{V*=rxj=vxw!tG9tU_W-y4f;f@^xISQ1j&BjSX;vpdCh#D$=QtUM zEUuBu?%ix8m|~2V<#li^04Kh~#1wTAO zpnQ#EUmu4wD8_3=IMc|oZ_Q->cnp&QsYEr40s$tHQ^CHiQTeyROGu~;mUmQg8I(2$ zQP>i2qo|u=s*6FdM#Zs2Qqi3SXFycvITqVv89ieed2Do)^Tb{msachskA|D#e!qmt z846__6ZF!>@DUe|GLJf7A7@{AJgO!G}l=N4nq9m?@c5!lFGZWOBtoRcmk1 zSKL-tYT*8r=<-o5Wb>N!nEVf%Iu0naaPATEPSHx0^)KZ75v>$VPl8+gcGpY&W7Y`sxgTN+(?=bakIc|&PT!gL8tlc%( zxRq-7TDZd9y5Izyc+QO-@W+Yb`zDXH%R}c1jR=vkxU;%8b@-CJOH`buL4a+0r$tS)0h@zcJx|4W-HHq?-8~|PA}`EJY%{Qb9?nh z5FuCVJE@q5KgAU0a6)9+kCcU#xbCj`4UOio>Ocrpjr z!N{6Tz!7KT-FOde<47BsPhFeEvCClN*Gu8+=+XCw&8}LCMx+#eh*bIAl;rLx66VwM z5?u3pvzPSwM<1;jwXK#$7ZB(hfGaE5R@KDvsl_DAQPj+bCd@+wKS6z;{b7{L%Awla z0S7l3ga>(*v}*cw+`D;PEnzr2vpDIx?cxK19-z`ljr?Cs*)#v;MBP96G&ZKc&(HrG zd>Y{=pGNqbPb2)V_%y=*Lq3g}mXVg>6HsGj!e?QmWnyDwV`TpgsL_AgXAE?ov=#%) zZ$OQn`A-n(KQyb~@YDYxq4qn+KPa}puu%VZM(xjX{|3YTzx!o>@uRr^igWP_7}(qB z>p9{Jn>jd|Sy~#||F*ek>G8=`&8!V=TpTF4f1_UiiFy5*&%ctZ{;1QxTlG&S?LV!y zf1ua?Q-b@m$lr1KO#L@h&Bk8I#>SFY)LzfZNcy+_h0pL=Ygz_wx_?gccPiwMZ~q-| z)Wbm&l@gJ7pZxim%eE!E`|KDQr{w(!(`2SOVAtzj1GW3+bO^+n;s%+tq)p=|2K)|B>={U<`g+e>V1i zY5t!rQZ%yr$E$zd_$=>_R{bU$ZEfuTAoTv9YVwa6{ZW&DD)oP?YkWRM{~dGjFYWsM zbo~QK$oq##$Y9LC!NOq7OvBE~!Air-tZz)CXUJ|qW5mM7Y`{owXvnH>@W%u5kKg^r zC+;7p!oTIqXvn~(&t}9(!)U<9Ov7xfXGFu!{Q1Yw!1yz3MneNbeU?A-{X28`XMO*5 zoc?z8{~vnq?+yO5_x*qH`M-tyTS5KbaQ$z%{;df7TZ#X-cl~d;{;df7TZ#X-cm0or z>z}ASLYL3c<-Zu=)u$M2h0nzJd#RcN-JdW0_2QCiyO!V}B z+?BO8vKBD-O(=h3WdGY2M9mzv=wzK7EzPWdCuI0Nd{&^7(=##p9GDo$sj?Hq#y_=pk&ENRuDu4 z1d*&{l^i64L?zvWJo@t9dcN;_Zr!ST|9Pt@&Y79+Idgh?=Jd|=ZyF>dep=UwqZ?Qr zu#!<|dl?yW;B>%~!~cZD#sA4MfMO!T|2p+RIWk92J(dPS_PVEZ-7TtLSc8RY@_opI z2RHF}-EOjEq`{u2xPHf32)wb9I@biy}y?k zcJP&I4s^ee(3yJw(dCc3ms=iRyQJC7O7A6k#3~cZ<|#7cHoBravEezI2%7e|{WX1k zaav(*;_9^KX5U7IZV&EX3N+Y75h z*iF~lHDl(vc}rg-pNs!=-OR`O=1?UU(UaB|>XAlSK5L`N>a%kKKa9wDd8-iOn)&J7 zP)W02|ECSxgg2Br719GM8iO8)&jRHWBQ?oS;qDQRs?QMa(+`qy2S$QZFXF(M?)d_X?L>?$nKRhNyyAXJ2oAR%N?jd%H$`A^x0T6aJzr=P@hs>o3k<`IZ3l zZ7#0GIrr1lPj~ZuyqeD74`p4-gafI_=cJB1(q^$W^b3l7gQX7a(J90(Q4qWz6>&s{ zA`iY}Tru$BIgo7)zCt=qsVku?-+BUFI{9uF-E4-6R^21(_j7E@)qL?BXjn;Z9$l1% z2=fNH@%V(K%~oFdQ{1>?+^F_6>ejEtd@}li2yMu8{KTipms~!s@l_+8=Mf-(T7<^+ zY?K%)DPp*t$T$N9yBAB$gg>#A!W*nZ_eMIpc0i_ilXRZ?kSOUyyF^ z5~N~v*%Y`*Z`AL6gB4Y2%M?Gq;xUp_AW5<3AJ7kMUW;iB-Nr8>RfjyzB1@6htjffJ zWCKp`tI5&zan+T9|tOJlp%4oe((?7=bdL54zRZK z=DfwW6N-Z`9Oh&9MX9p~Oi^3vw8_zM6MH+BfD6T!J3m6*OVuk?5;Uo#KQA*>(GWlj z!H89IE>bw{wRDeLX@)N=3PYD|0;rkL-v;PBJF6I?OecmI#lAll{u2E`kzD_Zb1TGy z%8kmp-w8%+MZa2Eq$W@GPx42vVk!Pf= zgwNhYb~8Tw-Y1#}ez4(m>+;pk7^}_QflJry7%1*>?;~}z1flDm7SS$z@2JN*o*ofj z_w1iYc|9h#SAyR&_mYC;UCUK`ki->`r4qikvRNqs$atVANc*#tA2FiMTm&1JzIzbd zz3xX&A(HoICF!<_SDR>4RPb`zJNSqD_Wbl>ozgvIdLPbhHhsOmA4W$RIC#FY?IHHX zd4=)Wx1Xv9{VEA0*dh#so--l}+nqA@{dcYyvQX;CYvbQ>r2KRou!GluUwWP3?%eGg ze*In_FaGf6yUp13gLsRdkG9jRGalALaEC0a=Z1S7_e(=hlrLA+l!8p76munZ5p-=f z9dMzhyU!l?cX};x^11jQ&xIA0fS&yz=;_JEvs@;t+XWWrb%(8()8fR`^m)TaB!s!8)n21>ga(*e#|LgDmKYw65Xc9eky!wm(o+YWXn^GCSu5AF{$P8806!E-fk{Z*6KwV^}=)rrF*Zlu~O@oBcd4(U7v?z9he@rqpG*H_H{r3hd=JZV{4KBw)_!xXvN* zhG8+<0nv`PamMbCYX7WV(tiXc85@f-d)KE^@mz+G>J$EhSBQ6YuZ-UEf#|0Mt+MXk zHPt3VO)>CmijKJ6whR>Lzr1hI(CwyFPpK!EHbQaGS47l9VNTRgrdE0$e$jjU&dN7M z+e@Czpl+w|$+YVZA;p4po6fI$UoI)h%p^+Z23?51gu*3X4*se1mgre1YU0c4pt9`^ z_~-tQb1r6|zPp;qjN}IQywR2)URxU4eyN;krPHd=1Dbf@7(QfS>4UqW74`GV+jxt@ zPCk)mIb4-yK?de}$lmG!c7{^&J0GNBLY)SeD}Cn&nhX`9*@9$axgH2&e8!2onUb2q zZy7&;(0wE^wecAJLJGm}x&;wzF}rAfOe409vXmvDf=iV5yKJ4A9?b|1cr?jqjb(Q({p3nj(>TTxkgV(nwy90C<%X%Tk zj<^NeECs|ztYSFwC{}hV3$N+VdgO!*t3!B;DpZyral0%tk^9OJvdt!kkod5i>z(P9 zxt}k>LUo?@Q!GA2OL-e`T{e~K_<~leS#|ES4G>5}$ZW&8MU9Vuy$I12-gAGe`)4*I z{%ibzsF>hc-S^kie*Q&}_@IZ!Ke)Q={#%x8`v|>lo89c);{`H}j}3)|1|RJ3MDfY) zElR3Zi+sp0iPagfTkuf3R$Yo=E$Mc6cB}D)Eq(jA&w;qFv-P_r41YS&x$ZaMD#wUN z216+vh1++%^slK~rTNqOq2}_Ye_;F10RI~emu{$bq*Mt#n|?qC z?cKoK{=Pp?HS@`!O*f&Z?}=sOTv*OSPna7{NY5lq4^r54*PoLn!Rf+JVon(vzOFAO z*?x`j30(~4q1#N=(^6VA92nt84!i+$0XuxWk8$`%@~^fM4ES8#W3qnHw&)>%=)GAS zrWR7Aq;r3ih=jzC*5fjLirQ$%Do6hc7F{Hne`ujR%5vQ2i015F(@ENADYLV?ZRbaXEe z%1EI4T5IP8*(4~*a8``$`491_Yd%`<&}s)=%xae^8bG|{&h`dTneQ@U&8Zv+8t)e_ zHuY`td%h#N5~b|qDv=Rr7`7>0-P+_54Ub#*H+bJ?xtfG%@M~o>)^f{sp2hNdl)p_< zlf2*}`#P{WcVUrUD-E_d8nRzjy7UBYaUJMmbAuWc#S0|2Uhcj)6t^rPjE_fNQZyfeX0DwBat)c zrD}IgEu=+7?j3VueIZtpPgu8Nl|fmj!eCnQnOXO2B=5Inxk}-l!OgVcWIvm5%=MAL z@&+S?AT)%_cpir5Q=KMA{hv?bm(_HShH$ojWk+Wpk_-f4j zQ9p?{>o}gfkEnO9Md&s@?z`aJIRsgmt_z=YSZRF!q#V1hc3`|l0Hs(KgxA}L(QGYPe^9s!%W^&f~%90UoR@n6+9~O z%*m;~`?8HRK`X_5@v+=oy_WmoF_N7=8rY!4$j%VxfbN3eRDUN znWdf((wcN+-RdjglI>D;Q{>UWlg%D$S7pzy#|(EOZ?O)M$>Oow_jrg3-_FB@XImBa z+^0B_*|r(Eh`WPH!oh=;r}?G1{4{!=<9_&nU>Q@A^~%7Eu&U30bGEU@qfjGFF zKjm7g%tSz@QXmz+LI6Rk^_^mLKMT1~mKPI5{Hh`0klmeH-XH1Z{Uo{42z52811@`e zqZcw>J!?hOsCq_H;SkAT;-dR^Vu-}h<^jvjZJE9EOo8Vv;trRC8&0P79N6^R z$wT?o+6WG3DCB9GV%67o_I{S7BVUV4(o{beS4SG9W;cEDn_$Z)feG=_`b|%~TqWwIJ!lhawSG8r zj?+H+U28_F4Yimf(K^^KmkKL!Q!~A<{AnYJEH<2YYxF&l29L-FXzAdfzN||?f9g^^_;8BeI~=QR7D4=<9k-fiXf`=`yVOG)o zb$11kihi(1?WEPH?OSFHh0M#^Q*w|K(gE&{8hUBw6w_pS=wP=_Yamu=YI zJMu`VU=>gEv0OQ^=x0mW$^KU9|512gG04AJHK+vHm@krK3MWc$B}dD1OH#4BcTbjH zI(p_PTI$29O3#E%5bGoL$YSfb`3J^IZjpl@Hi6Juujv{i^wq3O2+GNN=Xhhu$jqO$ zKl#yazy2ka!=VGWWPvVEBBpf_IqMDJax2MJg& z)k{6YY9+s$ju?j0!Bnd_QS{xzCnDvGVS%5XukXVcyk&Mb92jNt2P(fCn>=Ho zBI`A$BTQwl<$uPJ*cUm-4%I-4APVv$_}rPwP2$)lrGHS1S6(D|seR9hLB*DRcKU7^ zXnxZ{Ia8z3UVe|anRc$o8Nu3C+N>+2j@KF-dxw{6U`rrirsQep)3huNgNrxi>ep_M zfkD#WxJPobQF%!gRxjDE?nF-9sqG(=_{JC``kz}Eaq}G z;*lQ?BkQik(ScIuzN=dD)erf>hleGghrwW{S0$NNb;_0*mOPf}AG7?zt#URl=`0jF ze+HEudwm-fn`p5NbjW!*EMvY+&%WWL-rckU;1@RZ-G z`-S*8#@OsH`{V}cJwObW3G0q>! z_qofm#yl^23Yz6@PKYDpOO74ixFw+4RD8?#fD^O)b0P%uSlFxNa!VDL+{-R zmk@@^`iVx8Wx%0INMhb#Tj7iSX~(LwH?wzJ%$N`nyDVMyu_lT#p_d1<*sqtR&y}>? zl8Ru6;ER9Lip0v^(G_wtT*8*qhngsHO^r*~I=G9ChH4<~m@;J%>L1YkrV*mU)R{dz z^bR)aNk4M(#)=i@w9h4KbsynB`6eVF9C(Mzqi6pEkE_(~vs#?ArgAVzvfPTb3L}%t zBAz0zfJq1Rq4`DEANF$z2Mm;A!-DZ8O79<*BfkeTr$3PDD(bDJxu`GwN~mo0f_mEB z>vcf^KJ{iF*;X?AQ<$F3XvA9Fwa2)ywRev$yn9_|xxOp=g?{Q4H(9hneG>Tq&mlj?(zdexiG^V{6PVKz6p z3exPt6^baXx8ihdFMdMCd$U|^57lB8(Uz2b?lcDp94X(_7`akPf* zLv0x-hs12wj_nsElXi8^sfAL)sREMYbW<=YVURDQ`h$hCc*`5M0wL!74VJQPD|H!0 z`en4Gdj&12*bV>h6ogmS`i4I#-HakTwV%CAt45{Ev``gcw)@bY`u`VeCngm!94beR$7nFW<~A!H+wCChA0QwQ_&ShIx)0U|xx+Co<$ZmwU-`xjSty`2@ z&M}&4@UuX7kg8uMm%nzq`*>dU~pV>YQW-i=jmN5 z2&Uv~b8_r9Yuz^RUJ0nix;Q#_W%yx)9*W}yXWn?2_dK+QrFC))mqkI~I} zA?u21iixQSQwn49ZSDNyS9@X{t1DMtlkREaF&oMmbh4QJQw%#*uu1 z2l6_mOr5)o!ww5@db9D)$6?ARl7U#q`QCjf6nejgB`K~Z*hwP*X3iUFU;( z9+cXY?e2425P2-PtZDMZDUN$P9Y^E?vGUgpyqS5Kpe!of3qD$3w#WD9?oghaN@d8E zEEnHRKX3*s9z(u?DX zmEr9LTTxLA;?YMCwFglO$qEdUk=~>pKDQHcOACKd_NoIe*G(dEa)F?Z3CS9g)gU4( zf_r!7tqALJH>n8VmxUC9XD;h2pBD=11QFU&*2>CgfoLgU@@`a=7vySt>GO$s!+0}I z@~MB)OP(_idM=0GEA2p=5^_6iteIFXq@YZwn4GzpY?x%{?wv^ySC%J)^Nl~&HEQsl z--&&@vVLw)(w~SmIK8=&_+|t#xwbqRHQ`jq-eqcKz9uD6P?)lWPIwi_Tp5fx&nTN4 z=fzDH6_S-rru~9jQ;#?*?3TRqO}cDeZ6)oMeAa&cer`v?Z4!0GYnkM|q7L9KUM!0w zjd_rOl32#)-bcM&z0=>sE|T57+xON8E(Mu8Z5DAsFfTZ9GV%@D_5i7t?UhBzc3Iv*n)HAQB9KlT!FJsa=c9% z=GNxu6M%4U$Xx&za4fLb8VD&>n0?QgA#X?W)F-@1h^EzbyrY-c&r<*|Fh)jI6;!2T zE3oC*hgEs78eYI)wVg2a* z!+u6%#t}qCW`%KueucfQk^`}N>KsfpE3sFj_pZa*BW|#pNt1DfjY>!STMliH^Mi~x9I0p@(@D#s|ML>*`_v%y853Cp@x;ln+^09 z(V29m)tW_yP8z04*VX(|J;Qf$VxGi2(umQh?3OnN&JA`+ae2DxEPanv9<03waGzH1g|5xO{x(L-<{IY}{ujnrQ~#=}iW$=vMjFOQ6PT&KDLFKsEKm_L9VIe4I^vtXja>2)O%laRr4fBI z%rwe5DmtPeFl5|qZ1KjwQ{!t4($GoFTJvS>iyO8#rZVOlvH9-aQ~8tcJyEVtTtB+s za+RFhY8q)tUi&g9HE%s#F*aL&ZEA7SP=QRD4!NSh4`B&-g*CPR~%}q8>+k7;u z@?Orn^uWOgaFSv6P$y{--IC#!_rv--_19jf?|s`}4|wGdAE#^Z^JjbBZ_;i$GD&n$ zf3UMB{e5UdbMsgRhh~CJi*8-GS6EQ2Of2F8EnPqD7U!@;4f~#KgiC~A&UHN@VWBDr zR-jX)SQurSxKX%W^gR4psC)DEt7X)g)a2BaMPCip%%xe8)W%d2Rf_iNgDFK$bc38F z(li{5@9s%5-jAV(c8NM_+_D*j*xS@w7uwhv2^0qu&!dz)8U3Pc+IVKwT1MX`wQ97E zyjzJ(jq-{B$EMG{a=lgyrjep?yfAm+@IoxR0DBlmHRp7E_Cw7LTkpvFG}^?3;)Gzm zJN+U3al)xpuaQuH^lf>`ga*#nx;N<#)wW4I!c4=m8Fv`<8PQOAsHUBY{YJ&litS1- zTg{o8dH>fG)iKr2uN(4vxvzfn&D*YK&LD>{zY=pqb&iWSwA*~@>k93PObvVwZNJv# z_nHHxYS@4|$~`DAVKq%X&rZ5`uK6?SBcP#iV4J;~S z%QS1yIZb(4?r-Lf%=d5TwHl5UV4gdc7!;0aOM3+cSgmJFzfEGAP032x4X+7b9rW#g zF@zjSWNT&V7C)$xscP7DY8|>yPD&0VXNz5rxyM2-HTub6V0bvEU+*KC*!k4F7#M5a zkNZnvPx=`W=&7jtnXE#v)^2joD9g?XP>t1@G=?Kf2lo6`Z;rFc_%q$FD9;g zy*pU-&Q- z2YT=o&@U3K)9+a1M(SE8PS*&Kh=33P^ALo9M43^lkR$-} z5E1$qHu(hBFzc9pF4Df2V6XmzH6&4kR*ud=&f{~E7a95_s=e>2cl>XR#A)hx6aF0)aawi; zzjQKcz<*(*fbsgDu=c;_6X5!5c%4cAKfnD?1O6j{|486J68Mh<{v(0^{}TAeVeL;H z>;Lj!RFLHcDzz%LIyJK&!^hzG2o0|dN>xd1`#0b>Wa1?(!o$oZWCIOd=D_3FsT zfECd$SfKnDKUC=C*NYC=1g>BWkev9;10*LzNCe8(-PHVF9`&ac{B8i!&JKY=xY!`v zO@)NP)))Yq3buh`&@NybBnGg)>`(}Auq_%3x3K{XF|dP&i#;6U;f#WNV8MWe<$`bm zV}N#o0T8w=0tA%+yLq6o2zMY0g#by2g6%PIPXq`eBo4Or0ASi!ur1u)9)bDmVrz{8 zBTy)$t2@#ijBvJvyE}jZ$N!Uy9SRLp47S6-f8h(j_8v&ouWC^UJM6zw7^J-e7VM04 z@o)#bA~0A7w1+!jES@$4sNNdxj`)-K#rms(K*=fRuhhTt&t(33*e`Z025yURhGU$- zc1WOIU^REtNeeY$VBJ$u8QUTOA)dU0$!P{4FciYw9SKIAlA;k{_tO&JUl$NWR1ggF zz@UK}VX%z{=A;XJfP^T}1!yOPi!~fThKq`W|5R*)cJ(=J92zi45x@%r_zDn2Ob84Z zx=0&1$_0%D3jqIY5q4kfPGl>JG;Z9IU@7iSL;L>zh|QY6~;r0ajxW`nQ=!1i!3(C>fff%Z7V-EBNh03QI4 z_(`Fg2OI-%oZLFVQFf@QUU%l>(F*x(6d!VpL zSCkJ}^R#pIPg(U(S@qAb>i^*}J}uO9Kx16Mfc`rJI%Sv z_@64^zr+9shUafF0DTG!E+DG#uO363VFtSCFNH*&(jZT1kY{L+e`pYZ7+g*thdw1o zpOT}`kfZ-1_`_w3^h5#;M4t-5UN`JHVJ{)PPcJ;9xs zsD#gK3o0Oh2@#Bn&4iW)t0ZNXv0RZg49P?|sJJ~)2E24nk*rzOD z6$Mi%MNtu12}PKMh@70LqM)FVoPvU=h=QV`I20459pz^X%Q6U9k zQE_o$n3#yT2(Y*!58Og!%)u%yc4z>tbmF0<1OB~2BnAQNpwU<$|771@2dsz3W-!sq#? zt9844-P$-*_|I_sw2={Aj#ECdk0CILBRx)5`+Mcwl)07>Bc3*L32lbv=cxE`sJR|* z%PB*cO@kcgoOLbbec<1d#i3Se{Ik^(;g1(Zb7Q`D2dk@DbV#g6b~R=7RFQ(m$>lub zj#D2WEfR$KhrIE$yW#tDKfnFG-e|{Lt*?S2dS+{a)MG}3uea6P@UCtP8gCpjKgqam zHX8_fuD8E#udM47FrLbKb>fG4mz@#kXV#vTXbL;iM~}%NTxRHP5(JU)^RjOpluewI z($vk!l-1F${oFySgDw8aw(+%2JC)0dN#CCRTz&67+IO+N&w0CqA3SPTc)RvE6E!Ig zt>Tm66|tYv3_BMKYK@mJ%C+@MZ7_FxbW6gU&I+fG#x>?|R{Y$tz$E33LD-R#ia{WP P!sMhJ9578qa?<|=v^&rH literal 0 HcmV?d00001 diff --git a/examples/vmdq_dcb/Makefile b/examples/vmdq_dcb/Makefile new file mode 100644 index 0000000000..82b1981571 --- /dev/null +++ b/examples/vmdq_dcb/Makefile @@ -0,0 +1,59 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = vmdq_dcb_app + +# all source are stored in SRCS-y +SRCS-y := main.c + +CFLAGS += $(WERROR_FLAGS) + +# workaround for a gcc bug with noreturn attribute +# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603 +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_main.o += -Wno-return-type +endif + +EXTRA_CFLAGS += -O3 -g + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c new file mode 100644 index 0000000000..634ebc51a6 --- /dev/null +++ b/examples/vmdq_dcb/main.c @@ -0,0 +1,331 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +/* basic constants used in application */ +#define SOCKET0 0 +#define SOCKET1 1 + +#define NUM_QUEUES 128 + +#define NUM_MBUFS 64*1024 +#define MBUF_CACHE_SIZE 64 +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) + +/* Basic application settings */ +#define NUM_POOLS ETH_16_POOLS /* can be ETH_16_POOLS or ETH_32_POOLS */ + +#define RX_PORT 0 +#define TX_PORT 1 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +/* Default configuration for rx and tx thresholds etc. */ +static const struct rte_eth_rxconf rx_conf_default = { + .rx_thresh = { + .pthresh = 8, + .hthresh = 8, + .wthresh = 4, + }, +}; + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +static const struct rte_eth_txconf tx_conf_default = { + .tx_thresh = { + .pthresh = 36, + .hthresh = 0, + .wthresh = 0, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +/* empty vmdq+dcb configuration structure. Filled in programatically */ +static const struct rte_eth_conf vmdq_dcb_conf_default = { + .rxmode = { + .mq_mode = ETH_VMDQ_DCB, + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 0, /**< IP checksum offload disabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + }, + .txmode = { + }, + .rx_adv_conf = { + /* + * should be overridden separately in code with + * appropriate values + */ + .vmdq_dcb_conf = { + .nb_queue_pools = NUM_POOLS, + .enable_default_pool = 0, + .default_pool = 0, + .nb_pool_maps = 0, + .pool_map = {{0, 0},}, + .dcb_queue = {0}, + }, + }, +}; + +/* array used for printing out statistics */ +volatile unsigned long rxPackets[ NUM_QUEUES ] = {0}; + +const uint16_t vlan_tags[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31 +}; + +/* Builds up the correct configuration for vmdq+dcb based on the vlan tags array + * given above, and the number of traffic classes available for use. */ +static inline int +get_eth_conf(struct rte_eth_conf *eth_conf, enum rte_eth_nb_pools num_pools) +{ + struct rte_eth_vmdq_dcb_conf conf; + unsigned i; + + if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS ) return -1; + + conf.nb_queue_pools = num_pools; + conf.enable_default_pool = 0; + conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]); + for (i = 0; i < conf.nb_pool_maps; i++){ + conf.pool_map[i].vlan_id = vlan_tags[ i ]; + conf.pool_map[i].pools = 1 << (i % num_pools); + } + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){ + conf.dcb_queue[i] = (uint8_t)(i % (NUM_QUEUES/num_pools)); + } + rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf)); + rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf, + sizeof(eth_conf->rx_adv_conf.vmdq_dcb_conf)); + return 0; +} + +/* + * Initialises a given port using global settings and with the rx buffers + * coming from the mbuf_pool passed as parameter + */ +static inline int +port_init(uint8_t port, struct rte_mempool *mbuf_pool) +{ + struct rte_eth_conf port_conf; + const uint16_t rxRings = ETH_VMDQ_DCB_NUM_QUEUES, + txRings = (uint16_t)rte_lcore_count(); + const uint16_t rxRingSize = 128, txRingSize = 512; + int retval; + uint16_t q; + + get_eth_conf(&port_conf, NUM_POOLS); + + if (port >= rte_eth_dev_count()) return -1; + + retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf); + if (retval != 0) + return retval; + + for (q = 0; q < rxRings; q ++) { + retval = rte_eth_rx_queue_setup(port, q, rxRingSize, + SOCKET0, &rx_conf_default, + mbuf_pool); + if (retval < 0) + return retval; + } + + for (q = 0; q < txRings; q ++) { + retval = rte_eth_tx_queue_setup(port, q, txRingSize, + SOCKET0, &tx_conf_default); + if (retval < 0) + return retval; + } + + retval = rte_eth_dev_start(port); + if (retval < 0) + return retval; + + return 0; +} + +#ifndef RTE_EXEC_ENV_BAREMETAL +/* When we receive a HUP signal, print out our stats */ +static void +sighup_handler(int signum) +{ + unsigned q; + for (q = 0; q < NUM_QUEUES; q ++) { + if (q % (NUM_QUEUES/NUM_POOLS) == 0) + printf("\nPool %u: ", q/(NUM_QUEUES/NUM_POOLS)); + printf("%lu ", rxPackets[ q ]); + } + printf("\nFinished handling signal %d\n", signum); +} +#endif + +/* + * Main thread that does the work, reading from INPUT_PORT + * and writing to OUTPUT_PORT + */ +static __attribute__((noreturn)) int +lcore_main(void *arg) +{ + const uintptr_t core_num = (uintptr_t)arg; + const unsigned num_cores = rte_lcore_count(); + uint16_t startQueue = (uint16_t)(core_num * (NUM_QUEUES/num_cores)); + uint16_t endQueue = (uint16_t)(startQueue + (NUM_QUEUES/num_cores)); + uint16_t q, i; + + printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num, + rte_lcore_id(), startQueue, endQueue - 1); + + for (;;) { + struct rte_mbuf *buf[32]; + const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]); + + for (q = startQueue; q < endQueue; q++) { + const uint16_t rxCount = rte_eth_rx_burst(RX_PORT, + q, buf, buf_size); + if (rxCount == 0) + continue; + rxPackets[q] += rxCount; + + const uint16_t txCount = rte_eth_tx_burst(TX_PORT, + (uint16_t)core_num, buf, rxCount); + if (txCount != rxCount) { + for (i = txCount; i < rxCount; i++) + rte_pktmbuf_free(buf[i]); + } + } + } +} + +/* Main function, does initialisation and calls the per-lcore functions */ +int +MAIN(int argc, char *argv[]) +{ + unsigned cores; + struct rte_mempool *mbuf_pool; + unsigned lcore_id; + uintptr_t i; + +#ifndef RTE_EXEC_ENV_BAREMETAL + signal(SIGHUP, sighup_handler); +#endif + + if (rte_eal_init(argc, argv) < 0) + rte_exit(EXIT_FAILURE, "Error with EAL initialization\n"); + if (rte_igb_pmd_init() != 0 || + rte_ixgbe_pmd_init() != 0 || + rte_eal_pci_probe() != 0) + rte_exit(EXIT_FAILURE, "Error with NIC driver initialization\n"); + + cores = rte_lcore_count(); + if ((cores & (cores - 1)) != 0 || cores > 16) { + rte_exit(EXIT_FAILURE, + "This program can only run on 2,4,8 or 16 cores\n\n"); + } + + mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS, + MBUF_SIZE, MBUF_CACHE_SIZE, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + SOCKET0, 0); + if (mbuf_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); + + if (port_init(RX_PORT, mbuf_pool) != 0 || + port_init(TX_PORT, mbuf_pool) != 0) + rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); + + /* call lcore_main() on every slave lcore */ + i = 0; + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id); + } + /* call on master too */ + (void) lcore_main((void*)i); + + return 0; +} diff --git a/examples/vmdq_dcb/main.h b/examples/vmdq_dcb/main.h new file mode 100644 index 0000000000..ad1b4b300e --- /dev/null +++ b/examples/vmdq_dcb/main.h @@ -0,0 +1,48 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN( int argc, char *argv[] ); + +#endif /* ifndef _MAIN_H_ */ diff --git a/lib/Makefile b/lib/Makefile new file mode 100644 index 0000000000..4cf1892da6 --- /dev/null +++ b/lib/Makefile @@ -0,0 +1,51 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-$(CONFIG_RTE_LIBC) += libc +DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal +DIRS-$(CONFIG_RTE_LIBRTE_MALLOC) += librte_malloc +DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring +DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool +DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf +DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer +DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline +DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether +DIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += librte_pmd_igb +DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += librte_pmd_ixgbe +DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash +DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm +DIRS-$(CONFIG_RTE_LIBRTE_NET) += librte_net + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/lib/librte_cmdline/Makefile b/lib/librte_cmdline/Makefile new file mode 100644 index 0000000000..faeb9f3bff --- /dev/null +++ b/lib/librte_cmdline/Makefile @@ -0,0 +1,65 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_cmdline.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) := cmdline.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_cirbuf.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_etheraddr.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_ipaddr.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_num.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_string.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_rdline.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_vt100.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_socket.c +SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_portlist.c + +CFLAGS_cmdline.o := -D_GNU_SOURCE + +# install includes +INCS := cmdline.h cmdline_parse.h cmdline_parse_num.h cmdline_parse_ipaddr.h +INCS += cmdline_parse_etheraddr.h cmdline_parse_string.h cmdline_rdline.h +INCS += cmdline_vt100.h cmdline_socket.h cmdline_cirbuf.h cmdline_parse_portlist.h +SYMLINK-$(CONFIG_RTE_LIBRTE_CMDLINE)-include := $(INCS) + +# this lib needs eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += lib/librte_eal + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_cmdline/cmdline.c b/lib/librte_cmdline/cmdline.c new file mode 100644 index 0000000000..dafddfcd85 --- /dev/null +++ b/lib/librte_cmdline/cmdline.c @@ -0,0 +1,240 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "cmdline_parse.h" +#include "cmdline_rdline.h" +#include "cmdline.h" + +static void +cmdline_valid_buffer(struct rdline *rdl, const char *buf, + __attribute__((unused)) unsigned int size) +{ + struct cmdline *cl = rdl->opaque; + int ret; + ret = cmdline_parse(cl, buf); + if (ret == CMDLINE_PARSE_AMBIGUOUS) + cmdline_printf(cl, "Ambiguous command\n"); + else if (ret == CMDLINE_PARSE_NOMATCH) + cmdline_printf(cl, "Command not found\n"); + else if (ret == CMDLINE_PARSE_BAD_ARGS) + cmdline_printf(cl, "Bad arguments\n"); +} + +static int +cmdline_complete_buffer(struct rdline *rdl, const char *buf, + char *dstbuf, unsigned int dstsize, + int *state) +{ + struct cmdline *cl = rdl->opaque; + return cmdline_complete(cl, buf, state, dstbuf, dstsize); +} + +int +cmdline_write_char(struct rdline *rdl, char c) +{ + int ret = -1; + struct cmdline *cl = rdl->opaque; + + if (cl->s_out >= 0) + ret = write(cl->s_out, &c, 1); + + return ret; +} + + +void +cmdline_set_prompt(struct cmdline *cl, const char *prompt) +{ + rte_snprintf(cl->prompt, sizeof(cl->prompt), "%s", prompt); +} + +struct cmdline * +cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out) +{ + struct cmdline *cl; + cl = malloc(sizeof(struct cmdline)); + if (cl == NULL) + return NULL; + memset(cl, 0, sizeof(struct cmdline)); + cl->s_in = s_in; + cl->s_out = s_out; + cl->ctx = ctx; + + rdline_init(&cl->rdl, cmdline_write_char, + cmdline_valid_buffer, cmdline_complete_buffer); + cl->rdl.opaque = cl; + cmdline_set_prompt(cl, prompt); + rdline_newline(&cl->rdl, cl->prompt); + + return cl; +} + +void +cmdline_free(struct cmdline *cl) +{ + dprintf("called\n"); + if (cl->s_in > 2) + close(cl->s_in); + if (cl->s_out != cl->s_in && cl->s_out > 2) + close(cl->s_out); + free(cl); +} + +void +cmdline_printf(const struct cmdline *cl, const char *fmt, ...) +{ + va_list ap; + +#ifdef _GNU_SOURCE + if (cl->s_out < 0) + return; + va_start(ap, fmt); + vdprintf(cl->s_out, fmt, ap); + va_end(ap); +#else + int ret; + char *buf; + + if (cl->s_out < 0) + return; + + buf = malloc(BUFSIZ); + if (buf == NULL) + return; + va_start(ap, fmt); + ret = vsnprintf(buf, BUFSIZ, fmt, ap); + va_end(ap); + if (ret < 0) + return; + if (ret >= BUFSIZ) + ret = BUFSIZ - 1; + write(cl->s_out, buf, ret); + free(buf); +#endif +} + +int +cmdline_in(struct cmdline *cl, const char *buf, int size) +{ + const char *history, *buffer; + size_t histlen, buflen; + int ret = 0; + int i, same; + + for (i=0; irdl, buf[i]); + + if (ret == RDLINE_RES_VALIDATED) { + buffer = rdline_get_buffer(&cl->rdl); + history = rdline_get_history_item(&cl->rdl, 0); + if (history) { + histlen = strnlen(history, RDLINE_BUF_SIZE); + same = !memcmp(buffer, history, histlen) && + buffer[histlen] == '\n'; + } + else + same = 0; + buflen = strnlen(buffer, RDLINE_BUF_SIZE); + if (buflen > 1 && !same) + rdline_add_history(&cl->rdl, buffer); + rdline_newline(&cl->rdl, cl->prompt); + } + else if (ret == RDLINE_RES_EOF) + return -1; + else if (ret == RDLINE_RES_EXITED) + return -1; + } + return i; +} + +void +cmdline_quit(struct cmdline *cl) +{ + rdline_quit(&cl->rdl); +} + +void +cmdline_interact(struct cmdline *cl) +{ + char c; + + c = -1; + while (1) { + if (read(cl->s_in, &c, 1) < 0) + break; + if (cmdline_in(cl, &c, 1) < 0) + break; + } +} diff --git a/lib/librte_cmdline/cmdline.h b/lib/librte_cmdline/cmdline.h new file mode 100644 index 0000000000..5754afeca5 --- /dev/null +++ b/lib/librte_cmdline/cmdline.h @@ -0,0 +1,94 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CMDLINE_H_ +#define _CMDLINE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +struct cmdline { + int s_in; + int s_out; + cmdline_parse_ctx_t *ctx; + struct rdline rdl; + char prompt[RDLINE_PROMPT_SIZE]; +#ifdef RTE_EXEC_ENV_LINUXAPP + struct termios oldterm; +#endif +}; + +struct cmdline *cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out); +void cmdline_set_prompt(struct cmdline *cl, const char *prompt); +void cmdline_free(struct cmdline *cl); +void cmdline_printf(const struct cmdline *cl, const char *fmt, ...); +int cmdline_in(struct cmdline *cl, const char *buf, int size); +int cmdline_write_char(struct rdline *rdl, char c); +void cmdline_interact(struct cmdline *cl); +void cmdline_quit(struct cmdline *cl); + +#ifdef __cplusplus +} +#endif + +#endif /* _CMDLINE_SOCKET_H_ */ diff --git a/lib/librte_cmdline/cmdline_cirbuf.c b/lib/librte_cmdline/cmdline_cirbuf.c new file mode 100644 index 0000000000..ccc51fc40c --- /dev/null +++ b/lib/librte_cmdline/cmdline_cirbuf.c @@ -0,0 +1,434 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +#include "cmdline_cirbuf.h" + + +void +cirbuf_init(struct cirbuf *cbuf, char *buf, unsigned int start, unsigned int maxlen) +{ + cbuf->maxlen = maxlen; + cbuf->len = 0; + cbuf->start = start; + cbuf->end = start; + cbuf->buf = buf; +} + +/* multiple add */ + +int +cirbuf_add_buf_head(struct cirbuf *cbuf, const char *c, unsigned int n) +{ + unsigned int e; + + if (!n || n > CIRBUF_GET_FREELEN(cbuf)) + return -EINVAL; + + e = CIRBUF_IS_EMPTY(cbuf) ? 1 : 0; + + if (n < cbuf->start + e) { + dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->start - n + e, n); + memcpy(cbuf->buf + cbuf->start - n + e, c, n); + } + else { + dprintf("s[%d] -> d[%d] (%d)\n", + n - (cbuf->start + e), 0, + cbuf->start + e); + dprintf("s[%d] -> d[%d] (%d)\n", cbuf->maxlen - n + + (cbuf->start + e), 0, n - (cbuf->start + e)); + memcpy(cbuf->buf, c + n - (cbuf->start + e) , cbuf->start + e); + memcpy(cbuf->buf + cbuf->maxlen - n + (cbuf->start + e), c, + n - (cbuf->start + e)); + } + cbuf->len += n; + cbuf->start += (cbuf->maxlen - n + e); + cbuf->start %= cbuf->maxlen; + return n; +} + +/* multiple add */ + +int +cirbuf_add_buf_tail(struct cirbuf *cbuf, const char *c, unsigned int n) +{ + unsigned int e; + + if (!n || n > CIRBUF_GET_FREELEN(cbuf)) + return -EINVAL; + + e = CIRBUF_IS_EMPTY(cbuf) ? 1 : 0; + + if (n < cbuf->maxlen - cbuf->end - 1 + e) { + dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->end + !e, n); + memcpy(cbuf->buf + cbuf->end + !e, c, n); + } + else { + dprintf("s[%d] -> d[%d] (%d)\n", cbuf->end + !e, 0, + cbuf->maxlen - cbuf->end - 1 + e); + dprintf("s[%d] -> d[%d] (%d)\n", cbuf->maxlen - cbuf->end - 1 + + e, 0, n - cbuf->maxlen + cbuf->end + 1 - e); + memcpy(cbuf->buf + cbuf->end + !e, c, cbuf->maxlen - + cbuf->end - 1 + e); + memcpy(cbuf->buf, c + cbuf->maxlen - cbuf->end - 1 + e, + n - cbuf->maxlen + cbuf->end + 1 - e); + } + cbuf->len += n; + cbuf->end += n - e; + cbuf->end %= cbuf->maxlen; + return n; +} + +/* add at head */ + +static inline void +__cirbuf_add_head(struct cirbuf * cbuf, char c) +{ + if (!CIRBUF_IS_EMPTY(cbuf)) { + cbuf->start += (cbuf->maxlen - 1); + cbuf->start %= cbuf->maxlen; + } + cbuf->buf[cbuf->start] = c; + cbuf->len ++; +} + +int +cirbuf_add_head_safe(struct cirbuf * cbuf, char c) +{ + if (cbuf && !CIRBUF_IS_FULL(cbuf)) { + __cirbuf_add_head(cbuf, c); + return 0; + } + return -EINVAL; +} + +void +cirbuf_add_head(struct cirbuf * cbuf, char c) +{ + __cirbuf_add_head(cbuf, c); +} + +/* add at tail */ + +static inline void +__cirbuf_add_tail(struct cirbuf * cbuf, char c) +{ + if (!CIRBUF_IS_EMPTY(cbuf)) { + cbuf->end ++; + cbuf->end %= cbuf->maxlen; + } + cbuf->buf[cbuf->end] = c; + cbuf->len ++; +} + +int +cirbuf_add_tail_safe(struct cirbuf * cbuf, char c) +{ + if (cbuf && !CIRBUF_IS_FULL(cbuf)) { + __cirbuf_add_tail(cbuf, c); + return 0; + } + return -EINVAL; +} + +void +cirbuf_add_tail(struct cirbuf * cbuf, char c) +{ + __cirbuf_add_tail(cbuf, c); +} + + +static inline void +__cirbuf_shift_left(struct cirbuf *cbuf) +{ + unsigned int i; + char tmp = cbuf->buf[cbuf->start]; + + for (i=0 ; ilen ; i++) { + cbuf->buf[(cbuf->start+i)%cbuf->maxlen] = + cbuf->buf[(cbuf->start+i+1)%cbuf->maxlen]; + } + cbuf->buf[(cbuf->start-1+cbuf->maxlen)%cbuf->maxlen] = tmp; + cbuf->start += (cbuf->maxlen - 1); + cbuf->start %= cbuf->maxlen; + cbuf->end += (cbuf->maxlen - 1); + cbuf->end %= cbuf->maxlen; +} + +static inline void +__cirbuf_shift_right(struct cirbuf *cbuf) +{ + unsigned int i; + char tmp = cbuf->buf[cbuf->end]; + + for (i=0 ; ilen ; i++) { + cbuf->buf[(cbuf->end+cbuf->maxlen-i)%cbuf->maxlen] = + cbuf->buf[(cbuf->end+cbuf->maxlen-i-1)%cbuf->maxlen]; + } + cbuf->buf[(cbuf->end+1)%cbuf->maxlen] = tmp; + cbuf->start += 1; + cbuf->start %= cbuf->maxlen; + cbuf->end += 1; + cbuf->end %= cbuf->maxlen; +} + +/* XXX we could do a better algorithm here... */ +void cirbuf_align_left(struct cirbuf * cbuf) +{ + if (cbuf->start < cbuf->maxlen/2) { + while (cbuf->start != 0) { + __cirbuf_shift_left(cbuf); + } + } + else { + while (cbuf->start != 0) { + __cirbuf_shift_right(cbuf); + } + } +} + +/* XXX we could do a better algorithm here... */ +void cirbuf_align_right(struct cirbuf * cbuf) +{ + if (cbuf->start >= cbuf->maxlen/2) { + while (cbuf->end != cbuf->maxlen-1) { + __cirbuf_shift_left(cbuf); + } + } + else { + while (cbuf->start != cbuf->maxlen-1) { + __cirbuf_shift_right(cbuf); + } + } +} + +/* buffer del */ + +int +cirbuf_del_buf_head(struct cirbuf *cbuf, unsigned int size) +{ + if (!size || size > CIRBUF_GET_LEN(cbuf)) + return -EINVAL; + + cbuf->len -= size; + if (CIRBUF_IS_EMPTY(cbuf)) { + cbuf->start += size - 1; + cbuf->start %= cbuf->maxlen; + } + else { + cbuf->start += size; + cbuf->start %= cbuf->maxlen; + } + return 0; +} + +/* buffer del */ + +int +cirbuf_del_buf_tail(struct cirbuf *cbuf, unsigned int size) +{ + if (!size || size > CIRBUF_GET_LEN(cbuf)) + return -EINVAL; + + cbuf->len -= size; + if (CIRBUF_IS_EMPTY(cbuf)) { + cbuf->end += (cbuf->maxlen - size + 1); + cbuf->end %= cbuf->maxlen; + } + else { + cbuf->end += (cbuf->maxlen - size); + cbuf->end %= cbuf->maxlen; + } + return 0; +} + +/* del at head */ + +static inline void +__cirbuf_del_head(struct cirbuf * cbuf) +{ + cbuf->len --; + if (!CIRBUF_IS_EMPTY(cbuf)) { + cbuf->start ++; + cbuf->start %= cbuf->maxlen; + } +} + +int +cirbuf_del_head_safe(struct cirbuf * cbuf) +{ + if (cbuf && !CIRBUF_IS_EMPTY(cbuf)) { + __cirbuf_del_head(cbuf); + return 0; + } + return -EINVAL; +} + +void +cirbuf_del_head(struct cirbuf * cbuf) +{ + __cirbuf_del_head(cbuf); +} + +/* del at tail */ + +static inline void +__cirbuf_del_tail(struct cirbuf * cbuf) +{ + cbuf->len --; + if (!CIRBUF_IS_EMPTY(cbuf)) { + cbuf->end += (cbuf->maxlen - 1); + cbuf->end %= cbuf->maxlen; + } +} + +int +cirbuf_del_tail_safe(struct cirbuf * cbuf) +{ + if (cbuf && !CIRBUF_IS_EMPTY(cbuf)) { + __cirbuf_del_tail(cbuf); + return 0; + } + return -EINVAL; +} + +void +cirbuf_del_tail(struct cirbuf * cbuf) +{ + __cirbuf_del_tail(cbuf); +} + +/* convert to buffer */ + +int +cirbuf_get_buf_head(struct cirbuf *cbuf, char *c, unsigned int size) +{ + unsigned int n; + + n = (size < CIRBUF_GET_LEN(cbuf)) ? size : CIRBUF_GET_LEN(cbuf); + + if (!n) + return 0; + + if (cbuf->start <= cbuf->end) { + dprintf("s[%d] -> d[%d] (%d)\n", cbuf->start, 0, n); + memcpy(c, cbuf->buf + cbuf->start , n); + } + else { + dprintf("s[%d] -> d[%d] (%d)\n", cbuf->start, 0, + cbuf->maxlen - cbuf->start); + dprintf("s[%d] -> d[%d] (%d)\n", 0,cbuf->maxlen - cbuf->start, + n - cbuf->maxlen + cbuf->start); + memcpy(c, cbuf->buf + cbuf->start , cbuf->maxlen - cbuf->start); + memcpy(c + cbuf->maxlen - cbuf->start, cbuf->buf, + n - cbuf->maxlen + cbuf->start); + } + return n; +} + +/* convert to buffer */ + +int +cirbuf_get_buf_tail(struct cirbuf *cbuf, char *c, unsigned int size) +{ + unsigned int n; + + n = (size < CIRBUF_GET_LEN(cbuf)) ? size : CIRBUF_GET_LEN(cbuf); + + if (!n) + return 0; + + if (cbuf->start <= cbuf->end) { + dprintf("s[%d] -> d[%d] (%d)\n", cbuf->end - n + 1, 0, n); + memcpy(c, cbuf->buf + cbuf->end - n + 1, n); + } + else { + dprintf("s[%d] -> d[%d] (%d)\n", 0, + cbuf->maxlen - cbuf->start, cbuf->end + 1); + dprintf("s[%d] -> d[%d] (%d)\n", + cbuf->maxlen - n + cbuf->end + 1, 0, n - cbuf->end - 1); + + memcpy(c + cbuf->maxlen - cbuf->start, + cbuf->buf, cbuf->end + 1); + memcpy(c, cbuf->buf + cbuf->maxlen - n + cbuf->end +1, + n - cbuf->end - 1); + } + return n; +} + +/* get head or get tail */ + +char +cirbuf_get_head(struct cirbuf * cbuf) +{ + return cbuf->buf[cbuf->start]; +} + +/* get head or get tail */ + +char +cirbuf_get_tail(struct cirbuf * cbuf) +{ + return cbuf->buf[cbuf->end]; +} + diff --git a/lib/librte_cmdline/cmdline_cirbuf.h b/lib/librte_cmdline/cmdline_cirbuf.h new file mode 100644 index 0000000000..f934292e3f --- /dev/null +++ b/lib/librte_cmdline/cmdline_cirbuf.h @@ -0,0 +1,248 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CIRBUF_H_ +#define _CIRBUF_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * This structure is the header of a cirbuf type. + */ +struct cirbuf { + unsigned int maxlen; /**< total len of the fifo (number of elements) */ + unsigned int start; /**< indice of the first elt */ + unsigned int end; /**< indice of the last elt */ + unsigned int len; /**< current len of fifo */ + char *buf; +}; + +/* #define CIRBUF_DEBUG */ + +#ifdef CIRBUF_DEBUG +#define dprintf(fmt, ...) printf("line %3.3d - " fmt, __LINE__, ##__VA_ARGS__) +#else +#define dprintf(args...) do {} while(0) +#endif + + +/** + * Init the circular buffer + */ +void cirbuf_init(struct cirbuf *cbuf, char *buf, unsigned int start, unsigned int maxlen); + + +/** + * Return 1 if the circular buffer is full + */ +#define CIRBUF_IS_FULL(cirbuf) ((cirbuf)->maxlen == (cirbuf)->len) + +/** + * Return 1 if the circular buffer is empty + */ +#define CIRBUF_IS_EMPTY(cirbuf) ((cirbuf)->len == 0) + +/** + * return current size of the circular buffer (number of used elements) + */ +#define CIRBUF_GET_LEN(cirbuf) ((cirbuf)->len) + +/** + * return size of the circular buffer (used + free elements) + */ +#define CIRBUF_GET_MAXLEN(cirbuf) ((cirbuf)->maxlen) + +/** + * return the number of free elts + */ +#define CIRBUF_GET_FREELEN(cirbuf) ((cirbuf)->maxlen - (cirbuf)->len) + +/** + * Iterator for a circular buffer + * c: struct cirbuf pointer + * i: an integer type internally used in the macro + * e: char that takes the value for each iteration + */ +#define CIRBUF_FOREACH(c, i, e) \ + for ( i=0, e=(c)->buf[(c)->start] ; \ + i<((c)->len) ; \ + i ++, e=(c)->buf[((c)->start+i)%((c)->maxlen)]) + + +/** + * Add a character at head of the circular buffer. Return 0 on success, or + * a negative value on error. + */ +int cirbuf_add_head_safe(struct cirbuf *cbuf, char c); + +/** + * Add a character at head of the circular buffer. You _must_ check that you + * have enough free space in the buffer before calling this func. + */ +void cirbuf_add_head(struct cirbuf *cbuf, char c); + +/** + * Add a character at tail of the circular buffer. Return 0 on success, or + * a negative value on error. + */ +int cirbuf_add_tail_safe(struct cirbuf *cbuf, char c); + +/** + * Add a character at tail of the circular buffer. You _must_ check that you + * have enough free space in the buffer before calling this func. + */ +void cirbuf_add_tail(struct cirbuf *cbuf, char c); + +/** + * Remove a char at the head of the circular buffer. Return 0 on + * success, or a negative value on error. + */ +int cirbuf_del_head_safe(struct cirbuf *cbuf); + +/** + * Remove a char at the head of the circular buffer. You _must_ check + * that buffer is not empty before calling the function. + */ +void cirbuf_del_head(struct cirbuf *cbuf); + +/** + * Remove a char at the tail of the circular buffer. Return 0 on + * success, or a negative value on error. + */ +int cirbuf_del_tail_safe(struct cirbuf *cbuf); + +/** + * Remove a char at the tail of the circular buffer. You _must_ check + * that buffer is not empty before calling the function. + */ +void cirbuf_del_tail(struct cirbuf *cbuf); + +/** + * Return the head of the circular buffer. You _must_ check that + * buffer is not empty before calling the function. + */ +char cirbuf_get_head(struct cirbuf *cbuf); + +/** + * Return the tail of the circular buffer. You _must_ check that + * buffer is not empty before calling the function. + */ +char cirbuf_get_tail(struct cirbuf *cbuf); + +/** + * Add a buffer at head of the circular buffer. 'c' is a pointer to a + * buffer, and n is the number of char to add. Return the number of + * copied bytes on success, or a negative value on error. + */ +int cirbuf_add_buf_head(struct cirbuf *cbuf, const char *c, unsigned int n); + +/** + * Add a buffer at tail of the circular buffer. 'c' is a pointer to a + * buffer, and n is the number of char to add. Return the number of + * copied bytes on success, or a negative value on error. + */ +int cirbuf_add_buf_tail(struct cirbuf *cbuf, const char *c, unsigned int n); + +/** + * Remove chars at the head of the circular buffer. Return 0 on + * success, or a negative value on error. + */ +int cirbuf_del_buf_head(struct cirbuf *cbuf, unsigned int size); + +/** + * Remove chars at the tail of the circular buffer. Return 0 on + * success, or a negative value on error. + */ +int cirbuf_del_buf_tail(struct cirbuf *cbuf, unsigned int size); + +/** + * Copy a maximum of 'size' characters from the head of the circular + * buffer to a flat one pointed by 'c'. Return the number of copied + * chars. + */ +int cirbuf_get_buf_head(struct cirbuf *cbuf, char *c, unsigned int size); + +/** + * Copy a maximum of 'size' characters from the tail of the circular + * buffer to a flat one pointed by 'c'. Return the number of copied + * chars. + */ +int cirbuf_get_buf_tail(struct cirbuf *cbuf, char *c, unsigned int size); + + +/** + * Set the start of the data to the index 0 of the internal buffer. + */ +void cirbuf_align_left(struct cirbuf *cbuf); + +/** + * Set the end of the data to the last index of the internal buffer. + */ +void cirbuf_align_right(struct cirbuf *cbuf); + +#ifdef __cplusplus +} +#endif + +#endif /* _CIRBUF_H_ */ diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c new file mode 100644 index 0000000000..f9d46ca40f --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse.c @@ -0,0 +1,544 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "cmdline_rdline.h" +#include "cmdline_parse.h" +#include "cmdline.h" + +#ifdef RTE_LIBRTE_CMDLINE_DEBUG +#define debug_printf printf +#else +#define debug_printf(args...) do {} while(0) +#endif + +#define CMDLINE_BUFFER_SIZE 64 + +/* isblank() needs _XOPEN_SOURCE >= 600 || _ISOC99_SOURCE, so use our + * own. */ +static int +isblank2(char c) +{ + if (c == ' ' || + c == '\t' ) + return 1; + return 0; +} + +static int +isendofline(char c) +{ + if (c == '\n' || + c == '\r' ) + return 1; + return 0; +} + +static int +iscomment(char c) +{ + if (c == '#') + return 1; + return 0; +} + +int +cmdline_isendoftoken(char c) +{ + if (!c || iscomment(c) || isblank2(c) || isendofline(c)) + return 1; + return 0; +} + +static unsigned int +nb_common_chars(const char * s1, const char * s2) +{ + unsigned int i=0; + + while (*s1==*s2 && *s1 && *s2) { + s1++; + s2++; + i++; + } + return i; +} + +/** + * try to match the buffer with an instruction (only the first + * nb_match_token tokens if != 0). Return 0 if we match all the + * tokens, else the number of matched tokens, else -1. + */ +static int +match_inst(cmdline_parse_inst_t *inst, const char *buf, + unsigned int nb_match_token, void * result_buf) +{ + unsigned int token_num=0; + cmdline_parse_token_hdr_t * token_p; + unsigned int i=0; + int n = 0; + struct cmdline_token_hdr token_hdr; + + token_p = inst->tokens[token_num]; + if (token_p) + memcpy(&token_hdr, token_p, sizeof(token_hdr)); + + /* check if we match all tokens of inst */ + while (token_p && (!nb_match_token || iparse(token_p, buf, + (char *)result_buf + + token_hdr.offset); + else + n = token_hdr.ops->parse(token_p, buf, NULL); + + if (n < 0) + break; + + debug_printf("TK parsed (len=%d)\n", n); + i++; + buf += n; + + token_num ++; + token_p = inst->tokens[token_num]; + if (token_p) + memcpy(&token_hdr, token_p, sizeof(token_hdr)); + } + + /* does not match */ + if (i==0) + return -1; + + /* in case we want to match a specific num of token */ + if (nb_match_token) { + if (i == nb_match_token) { + return 0; + } + return i; + } + + /* we don't match all the tokens */ + if (token_p) { + return i; + } + + /* are there are some tokens more */ + while (isblank2(*buf)) { + buf++; + } + + /* end of buf */ + if ( isendofline(*buf) || iscomment(*buf) ) + return 0; + + /* garbage after inst */ + return i; +} + + +int +cmdline_parse(struct cmdline *cl, const char * buf) +{ + unsigned int inst_num=0; + cmdline_parse_inst_t *inst; + const char *curbuf; + char result_buf[BUFSIZ]; + void (*f)(void *, struct cmdline *, void *) = NULL; + void *data = NULL; + int comment = 0; + int linelen = 0; + int parse_it = 0; + int err = CMDLINE_PARSE_NOMATCH; + int tok; + cmdline_parse_ctx_t *ctx = cl->ctx; +#ifdef RTE_LIBRTE_CMDLINE_DEBUG + char debug_buf[BUFSIZ]; +#endif + + /* + * - look if the buffer contains at least one line + * - look if line contains only spaces or comments + * - count line length + */ + curbuf = buf; + while (! isendofline(*curbuf)) { + if ( *curbuf == '\0' ) { + debug_printf("Incomplete buf (len=%d)\n", linelen); + return 0; + } + if ( iscomment(*curbuf) ) { + comment = 1; + } + if ( ! isblank2(*curbuf) && ! comment) { + parse_it = 1; + } + curbuf++; + linelen++; + } + + /* skip all endofline chars */ + while (isendofline(buf[linelen])) { + linelen++; + } + + /* empty line */ + if ( parse_it == 0 ) { + debug_printf("Empty line (len=%d)\n", linelen); + return linelen; + } + +#ifdef RTE_LIBRTE_CMDLINE_DEBUG + rte_snprintf(debug_buf, (linelen>64 ? 64 : linelen), "%s", buf); + debug_printf("Parse line : len=%d, <%s>\n", linelen, debug_buf); +#endif + + /* parse it !! */ + inst = ctx[inst_num]; + while (inst) { + debug_printf("INST %d\n", inst_num); + + /* fully parsed */ + tok = match_inst(inst, buf, 0, result_buf); + + if (tok > 0) /* we matched at least one token */ + err = CMDLINE_PARSE_BAD_ARGS; + + else if (!tok) { + debug_printf("INST fully parsed\n"); + /* skip spaces */ + while (isblank2(*curbuf)) { + curbuf++; + } + + /* if end of buf -> there is no garbage after inst */ + if (isendofline(*curbuf) || iscomment(*curbuf)) { + if (!f) { + memcpy(&f, &inst->f, sizeof(f)); + memcpy(&data, &inst->data, sizeof(data)); + } + else { + /* more than 1 inst matches */ + err = CMDLINE_PARSE_AMBIGUOUS; + f=NULL; + debug_printf("Ambiguous cmd\n"); + break; + } + } + } + + inst_num ++; + inst = ctx[inst_num]; + } + + /* call func */ + if (f) { + f(result_buf, cl, data); + } + + /* no match */ + else { + debug_printf("No match err=%d\n", err); + return err; + } + + return linelen; +} + +int +cmdline_complete(struct cmdline *cl, const char *buf, int *state, + char *dst, unsigned int size) +{ + const char *partial_tok = buf; + unsigned int inst_num = 0; + cmdline_parse_inst_t *inst; + cmdline_parse_token_hdr_t *token_p; + struct cmdline_token_hdr token_hdr; + char tmpbuf[CMDLINE_BUFFER_SIZE], comp_buf[CMDLINE_BUFFER_SIZE]; + unsigned int partial_tok_len; + int comp_len = -1; + int tmp_len = -1; + int nb_token = 0; + unsigned int i, n; + int l; + unsigned int nb_completable; + unsigned int nb_non_completable; + int local_state = 0; + const char *help_str; + cmdline_parse_ctx_t *ctx = cl->ctx; + + debug_printf("%s called\n", __func__); + memset(&token_hdr, 0, sizeof(token_hdr)); + + /* count the number of complete token to parse */ + for (i=0 ; buf[i] ; i++) { + if (!isblank2(buf[i]) && isblank2(buf[i+1])) + nb_token++; + if (isblank2(buf[i]) && !isblank2(buf[i+1])) + partial_tok = buf+i+1; + } + partial_tok_len = strnlen(partial_tok, RDLINE_BUF_SIZE); + + /* first call -> do a first pass */ + if (*state <= 0) { + debug_printf("try complete <%s>\n", buf); + debug_printf("there is %d complete tokens, <%s> is incomplete\n", + nb_token, partial_tok); + + nb_completable = 0; + nb_non_completable = 0; + + inst = ctx[inst_num]; + while (inst) { + /* parse the first tokens of the inst */ + if (nb_token && match_inst(inst, buf, nb_token, NULL)) + goto next; + + debug_printf("instruction match \n"); + token_p = inst->tokens[nb_token]; + if (token_p) + memcpy(&token_hdr, token_p, sizeof(token_hdr)); + + /* non completable */ + if (!token_p || + !token_hdr.ops->complete_get_nb || + !token_hdr.ops->complete_get_elt || + (n = token_hdr.ops->complete_get_nb(token_p)) == 0) { + nb_non_completable++; + goto next; + } + + debug_printf("%d choices for this token\n", n); + for (i=0 ; icomplete_get_elt(token_p, i, + tmpbuf, + sizeof(tmpbuf)) < 0) + continue; + + /* we have at least room for one char */ + tmp_len = strnlen(tmpbuf, sizeof(tmpbuf)); + if (tmp_len < CMDLINE_BUFFER_SIZE - 1) { + tmpbuf[tmp_len] = ' '; + tmpbuf[tmp_len+1] = 0; + } + + debug_printf(" choice <%s>\n", tmpbuf); + + /* does the completion match the + * beginning of the word ? */ + if (!strncmp(partial_tok, tmpbuf, + partial_tok_len)) { + if (comp_len == -1) { + rte_snprintf(comp_buf, sizeof(comp_buf), + "%s", tmpbuf + partial_tok_len); + comp_len = + strnlen(tmpbuf + partial_tok_len, + sizeof(tmpbuf) - partial_tok_len); + + } + else { + comp_len = + nb_common_chars(comp_buf, + tmpbuf+partial_tok_len); + comp_buf[comp_len] = 0; + } + nb_completable++; + } + } + next: + inst_num ++; + inst = ctx[inst_num]; + } + + debug_printf("total choices %d for this completion\n", + nb_completable); + + /* no possible completion */ + if (nb_completable == 0 && nb_non_completable == 0) + return 0; + + /* if multichoice is not required */ + if (*state == 0 && partial_tok_len > 0) { + /* one or several choices starting with the + same chars */ + if (comp_len > 0) { + if ((unsigned)(comp_len + 1) > size) + return 0; + + rte_snprintf(dst, size, "%s", comp_buf); + dst[comp_len] = 0; + return 2; + } + } + } + + /* init state correctly */ + if (*state == -1) + *state = 0; + + debug_printf("Multiple choice STATE=%d\n", *state); + + inst_num = 0; + inst = ctx[inst_num]; + while (inst) { + /* we need to redo it */ + inst = ctx[inst_num]; + + if (nb_token && match_inst(inst, buf, nb_token, NULL)) + goto next2; + + token_p = inst->tokens[nb_token]; + if (token_p) + memcpy(&token_hdr, token_p, sizeof(token_hdr)); + + /* one choice for this token */ + if (!token_p || + !token_hdr.ops->complete_get_nb || + !token_hdr.ops->complete_get_elt || + (n = token_hdr.ops->complete_get_nb(token_p)) == 0) { + if (local_state < *state) { + local_state++; + goto next2; + } + (*state)++; + if (token_p && token_hdr.ops->get_help) { + token_hdr.ops->get_help(token_p, tmpbuf, + sizeof(tmpbuf)); + help_str = inst->help_str; + if (help_str) + rte_snprintf(dst, size, "[%s]: %s", tmpbuf, + help_str); + else + rte_snprintf(dst, size, "[%s]: No help", + tmpbuf); + } + else { + rte_snprintf(dst, size, "[RETURN]"); + } + return 1; + } + + /* several choices */ + for (i=0 ; icomplete_get_elt(token_p, i, tmpbuf, + sizeof(tmpbuf)) < 0) + continue; + /* we have at least room for one char */ + tmp_len = strnlen(tmpbuf, sizeof(tmpbuf)); + if (tmp_len < CMDLINE_BUFFER_SIZE - 1) { + tmpbuf[tmp_len] = ' '; + tmpbuf[tmp_len + 1] = 0; + } + + debug_printf(" choice <%s>\n", tmpbuf); + + /* does the completion match the beginning of + * the word ? */ + if (!strncmp(partial_tok, tmpbuf, + partial_tok_len)) { + if (local_state < *state) { + local_state++; + continue; + } + (*state)++; + l=rte_snprintf(dst, size, "%s", tmpbuf); + if (l>=0 && token_hdr.ops->get_help) { + token_hdr.ops->get_help(token_p, tmpbuf, + sizeof(tmpbuf)); + help_str = inst->help_str; + if (help_str) + rte_snprintf(dst+l, size-l, "[%s]: %s", + tmpbuf, help_str); + else + rte_snprintf(dst+l, size-l, + "[%s]: No help", tmpbuf); + } + + return 1; + } + } + next2: + inst_num ++; + inst = ctx[inst_num]; + } + return 0; +} + diff --git a/lib/librte_cmdline/cmdline_parse.h b/lib/librte_cmdline/cmdline_parse.h new file mode 100644 index 0000000000..eb9a03721c --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse.h @@ -0,0 +1,188 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CMDLINE_PARSE_H_ +#define _CMDLINE_PARSE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef offsetof +#define offsetof(type, field) ((size_t) &( ((type *)0)->field) ) +#endif + +/* return status for parsing */ +#define CMDLINE_PARSE_SUCCESS 0 +#define CMDLINE_PARSE_AMBIGUOUS -1 +#define CMDLINE_PARSE_NOMATCH -2 +#define CMDLINE_PARSE_BAD_ARGS -3 + +/* return status for completion */ +#define CMDLINE_PARSE_COMPLETE_FINISHED 0 +#define CMDLINE_PARSE_COMPLETE_AGAIN 1 +#define CMDLINE_PARSE_COMPLETED_BUFFER 2 + +/** + * Stores a pointer to the ops struct, and the offset: the place to + * write the parsed result in the destination structure. + */ +struct cmdline_token_hdr { + struct cmdline_token_ops *ops; + unsigned int offset; +}; +typedef struct cmdline_token_hdr cmdline_parse_token_hdr_t; + +/** + * A token is defined by this structure. + * + * parse() takes the token as first argument, then the source buffer + * starting at the token we want to parse. The 3rd arg is a pointer + * where we store the parsed data (as binary). It returns the number of + * parsed chars on success and a negative value on error. + * + * complete_get_nb() returns the number of possible values for this + * token if completion is possible. If it is NULL or if it returns 0, + * no completion is possible. + * + * complete_get_elt() copy in dstbuf (the size is specified in the + * parameter) the i-th possible completion for this token. returns 0 + * on success or and a negative value on error. + * + * get_help() fills the dstbuf with the help for the token. It returns + * -1 on error and 0 on success. + */ +struct cmdline_token_ops { + /** parse(token ptr, buf, res pts) */ + int (*parse)(cmdline_parse_token_hdr_t *, const char *, void *); + /** return the num of possible choices for this token */ + int (*complete_get_nb)(cmdline_parse_token_hdr_t *); + /** return the elt x for this token (token, idx, dstbuf, size) */ + int (*complete_get_elt)(cmdline_parse_token_hdr_t *, int, char *, unsigned int); + /** get help for this token (token, dstbuf, size) */ + int (*get_help)(cmdline_parse_token_hdr_t *, char *, unsigned int); +}; + +struct cmdline; +/** + * Store a instruction, which is a pointer to a callback function and + * its parameter that is called when the instruction is parsed, a help + * string, and a list of token composing this instruction. + */ +struct cmdline_inst { + /* f(parsed_struct, data) */ + void (*f)(void *, struct cmdline *, void *); + void *data; + const char *help_str; + cmdline_parse_token_hdr_t *tokens[]; +}; +typedef struct cmdline_inst cmdline_parse_inst_t; + +/** + * A context is identified by its name, and contains a list of + * instruction + * + */ +typedef cmdline_parse_inst_t *cmdline_parse_ctx_t; + +/** + * Try to parse a buffer according to the specified context. The + * argument buf must ends with "\n\0". The function returns + * CMDLINE_PARSE_AMBIGUOUS, CMDLINE_PARSE_NOMATCH or + * CMDLINE_PARSE_BAD_ARGS on error. Else it calls the associated + * function (defined in the context) and returns 0 + * (CMDLINE_PARSE_SUCCESS). + */ +int cmdline_parse(struct cmdline *cl, const char *buf); + +/** + * complete() must be called with *state==0 (try to complete) or + * with *state==-1 (just display choices), then called without + * modifying *state until it returns CMDLINE_PARSE_COMPLETED_BUFFER or + * CMDLINE_PARSE_COMPLETED_BUFFER. + * + * It returns < 0 on error. + * + * Else it returns: + * - CMDLINE_PARSE_COMPLETED_BUFFER on completion (one possible + * choice). In this case, the chars are appended in dst buffer. + * - CMDLINE_PARSE_COMPLETE_AGAIN if there is several possible + * choices. In this case, you must call the function again, + * keeping the value of state intact. + * - CMDLINE_PARSE_COMPLETED_BUFFER when the iteration is + * finished. The dst is not valid for this last call. + * + * The returned dst buf ends with \0. + */ +int cmdline_complete(struct cmdline *cl, const char *buf, int *state, + char *dst, unsigned int size); + + +/* return true if(!c || iscomment(c) || isblank(c) || + * isendofline(c)) */ +int cmdline_isendoftoken(char c); + +#ifdef __cplusplus +} +#endif + +#endif /* _CMDLINE_PARSE_H_ */ diff --git a/lib/librte_cmdline/cmdline_parse_etheraddr.c b/lib/librte_cmdline/cmdline_parse_etheraddr.c new file mode 100644 index 0000000000..5700a74ddd --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_etheraddr.c @@ -0,0 +1,172 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "cmdline_parse.h" +#include "cmdline_parse_etheraddr.h" + +struct cmdline_token_ops cmdline_token_etheraddr_ops = { + .parse = cmdline_parse_etheraddr, + .complete_get_nb = NULL, + .complete_get_elt = NULL, + .get_help = cmdline_get_help_etheraddr, +}; + + +#define ETHER_ADDRSTRLEN 18 + +#ifdef __linux__ +#define ea_oct ether_addr_octet +#else +#define ea_oct octet +#endif + + +static struct ether_addr * +my_ether_aton(const char *a) +{ + int i; + char *end; + unsigned long o[ETHER_ADDR_LEN]; + static struct ether_addr ether_addr; + + i = 0; + do { + errno = 0; + o[i] = strtoul(a, &end, 16); + if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0)) + return (NULL); + a = end + 1; + } while (++i != sizeof (o) / sizeof (o[0]) && end[0] != 0); + + /* Junk at the end of line */ + if (end[0] != 0) + return (NULL); + + /* Support the format XX:XX:XX:XX:XX:XX */ + if (i == ETHER_ADDR_LEN) { + while (i-- != 0) { + if (o[i] > UINT8_MAX) + return (NULL); + ether_addr.ea_oct[i] = (uint8_t)o[i]; + } + /* Support the format XXXX:XXXX:XXXX */ + } else if (i == ETHER_ADDR_LEN / 2) { + while (i-- != 0) { + if (o[i] > UINT16_MAX) + return (NULL); + ether_addr.ea_oct[i * 2] = (uint8_t)(o[i] >> 8); + ether_addr.ea_oct[i * 2 + 1] = (uint8_t)(o[i] & 0xff); + } + /* unknown format */ + } else + return (NULL); + + return (struct ether_addr *)ðer_addr; +} + +int +cmdline_parse_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk, + const char *buf, void *res) +{ + unsigned int token_len = 0; + char ether_str[ETHER_ADDRSTRLEN+1]; + struct ether_addr *tmp; + + if (! *buf) + return -1; + + while (!cmdline_isendoftoken(buf[token_len])) + token_len++; + + /* if token is too big... */ + if (token_len >= ETHER_ADDRSTRLEN) + return -1; + + rte_snprintf(ether_str, token_len+1, "%s", buf); + + tmp = my_ether_aton(ether_str); + if (tmp == NULL) + return -1; + + memcpy(res, tmp, sizeof(struct ether_addr)); + return token_len; +} + +int cmdline_get_help_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk, + char *dstbuf, unsigned int size) +{ + rte_snprintf(dstbuf, size, "Ethernet address"); + return 0; +} diff --git a/lib/librte_cmdline/cmdline_parse_etheraddr.h b/lib/librte_cmdline/cmdline_parse_etheraddr.h new file mode 100644 index 0000000000..d6d30b6a59 --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_etheraddr.h @@ -0,0 +1,102 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARSE_ETHERADDR_H_ +#define _PARSE_ETHERADDR_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +struct cmdline_token_etheraddr_data { + uint8_t flags; +}; + +struct cmdline_token_etheraddr { + struct cmdline_token_hdr hdr; +}; +typedef struct cmdline_token_etheraddr cmdline_parse_token_etheraddr_t; + +extern struct cmdline_token_ops cmdline_token_etheraddr_ops; + +int cmdline_parse_etheraddr(cmdline_parse_token_hdr_t *tk, const char *srcbuf, + void *res); +int cmdline_get_help_etheraddr(cmdline_parse_token_hdr_t *tk, char *dstbuf, + unsigned int size); + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_ETHERADDR_INITIALIZER(structure, field) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_etheraddr_ops, \ + .offset = offsetof(structure, field), \ + }, \ +} + +#ifdef __cplusplus +} +#endif + + +#endif /* _PARSE_ETHERADDR_H_ */ diff --git a/lib/librte_cmdline/cmdline_parse_ipaddr.c b/lib/librte_cmdline/cmdline_parse_ipaddr.c new file mode 100644 index 0000000000..66a1493527 --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_ipaddr.c @@ -0,0 +1,383 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * For inet_ntop() functions: + * + * Copyright (c) 1996 by Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS + * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE + * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS + * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS + * SOFTWARE. + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef __linux__ +#include +#endif + +#include + +#include "cmdline_parse.h" +#include "cmdline_parse_ipaddr.h" + +struct cmdline_token_ops cmdline_token_ipaddr_ops = { + .parse = cmdline_parse_ipaddr, + .complete_get_nb = NULL, + .complete_get_elt = NULL, + .get_help = cmdline_get_help_ipaddr, +}; + +#define INADDRSZ 4 +#define IN6ADDRSZ 16 + +/* + * WARNING: Don't even consider trying to compile this on a system where + * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. + */ + +static int inet_pton4(const char *src, unsigned char *dst); +static int inet_pton6(const char *src, unsigned char *dst); + +/* int + * inet_pton(af, src, dst) + * convert from presentation format (which usually means ASCII printable) + * to network format (which is usually some kind of binary format). + * return: + * 1 if the address was valid for the specified address family + * 0 if the address wasn't valid (`dst' is untouched in this case) + * -1 if some other error occurred (`dst' is untouched in this case, too) + * author: + * Paul Vixie, 1996. + */ +static int +my_inet_pton(int af, const char *src, void *dst) +{ + switch (af) { + case AF_INET: + return (inet_pton4(src, dst)); + case AF_INET6: + return (inet_pton6(src, dst)); + default: + errno = EAFNOSUPPORT; + return (-1); + } + /* NOTREACHED */ +} + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(const char *src, unsigned char *dst) +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + if ((pch = strchr(digits, ch)) != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return (0); + if (! saw_digit) { + if (++octets > 4) + return (0); + saw_digit = 1; + } + *tp = (unsigned char)new; + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return (0); + *++tp = 0; + saw_digit = 0; + } else + return (0); + } + if (octets < 4) + return (0); + + memcpy(dst, tmp, INADDRSZ); + return (1); +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton6(const char *src, unsigned char *dst) +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[IN6ADDRSZ], *tp, *endp, *colonp; + const char *xdigits, *curtok; + int ch, saw_xdigit, count_xdigit; + unsigned int val; + + memset((tp = tmp), '\0', IN6ADDRSZ); + endp = tp + IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return (0); + curtok = src; + saw_xdigit = count_xdigit = 0; + val = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + if (count_xdigit >= 4) + return (0); + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return (0); + saw_xdigit = 1; + count_xdigit++; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return (0); + colonp = tp; + continue; + } else if (*src == '\0') { + return (0); + } + if (tp + sizeof(int16_t) > endp) + return (0); + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + saw_xdigit = 0; + count_xdigit = 0; + val = 0; + continue; + } + if (ch == '.' && ((tp + INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += INADDRSZ; + saw_xdigit = 0; + count_xdigit = 0; + break; /* '\0' was seen by inet_pton4(). */ + } + return (0); + } + if (saw_xdigit) { + if (tp + sizeof(int16_t) > endp) + return (0); + *tp++ = (unsigned char) ((val >> 8) & 0xff); + *tp++ = (unsigned char) (val & 0xff); + } + if (colonp != NULL) { + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[- i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return (0); + memcpy(dst, tmp, IN6ADDRSZ); + return (1); +} + +int +cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *buf, void *res) +{ + struct cmdline_token_ipaddr *tk2 = (struct cmdline_token_ipaddr *)tk; + unsigned int token_len = 0; + char ip_str[INET6_ADDRSTRLEN+4+1]; /* '+4' is for prefixlen (if any) */ + cmdline_ipaddr_t ipaddr; + char *prefix, *prefix_end; + long prefixlen; + + if (! *buf) + return -1; + + while (!cmdline_isendoftoken(buf[token_len])) + token_len++; + + /* if token is too big... */ + if (token_len >= INET6_ADDRSTRLEN+4) + return -1; + + rte_snprintf(ip_str, token_len+1, "%s", buf); + + /* convert the network prefix */ + if (tk2->ipaddr_data.flags & CMDLINE_IPADDR_NETWORK) { + prefix = strrchr(ip_str, '/'); + if (prefix == NULL) + return -1; + *prefix = '\0'; + prefix ++; + errno = 0; + prefixlen = strtol(prefix, &prefix_end, 10); + if (errno || (*prefix_end != '\0') ) + return -1; + ipaddr.prefixlen = prefixlen; + } + else { + ipaddr.prefixlen = 0; + } + + /* convert the IP addr */ + if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V4) && + my_inet_pton(AF_INET, ip_str, &ipaddr.addr.ipv4) == 1) { + ipaddr.family = AF_INET; + if (res != NULL) + memcpy(res, &ipaddr, sizeof(ipaddr)); + return token_len; + } + if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V6) && + my_inet_pton(AF_INET6, ip_str, &ipaddr.addr.ipv6) == 1) { + ipaddr.family = AF_INET6; + if (res != NULL) + memcpy(res, &ipaddr, sizeof(ipaddr)); + return token_len; + } + return -1; + +} + +int cmdline_get_help_ipaddr(cmdline_parse_token_hdr_t *tk, char *dstbuf, + unsigned int size) +{ + struct cmdline_token_ipaddr *tk2 = (struct cmdline_token_ipaddr *)tk; + + switch (tk2->ipaddr_data.flags) { + case CMDLINE_IPADDR_V4: + rte_snprintf(dstbuf, size, "IPv4"); + break; + case CMDLINE_IPADDR_V6: + rte_snprintf(dstbuf, size, "IPv6"); + break; + case CMDLINE_IPADDR_V4|CMDLINE_IPADDR_V6: + rte_snprintf(dstbuf, size, "IPv4/IPv6"); + break; + case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V4: + rte_snprintf(dstbuf, size, "IPv4 network"); + break; + case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V6: + rte_snprintf(dstbuf, size, "IPv6 network"); + break; + case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V4|CMDLINE_IPADDR_V6: + rte_snprintf(dstbuf, size, "IPv4/IPv6 network"); + break; + default: + rte_snprintf(dstbuf, size, "IPaddr (bad flags)"); + break; + } + return 0; +} diff --git a/lib/librte_cmdline/cmdline_parse_ipaddr.h b/lib/librte_cmdline/cmdline_parse_ipaddr.h new file mode 100644 index 0000000000..1a434e536b --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_ipaddr.h @@ -0,0 +1,194 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARSE_IPADDR_H_ +#define _PARSE_IPADDR_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define CMDLINE_IPADDR_V4 0x01 +#define CMDLINE_IPADDR_V6 0x02 +#define CMDLINE_IPADDR_NETWORK 0x04 + +struct cmdline_ipaddr { + uint8_t family; + union { + struct in_addr ipv4; + struct in6_addr ipv6; + } addr; + unsigned int prefixlen; /* in case of network only */ +}; +typedef struct cmdline_ipaddr cmdline_ipaddr_t; + +struct cmdline_token_ipaddr_data { + uint8_t flags; +}; + +struct cmdline_token_ipaddr { + struct cmdline_token_hdr hdr; + struct cmdline_token_ipaddr_data ipaddr_data; +}; +typedef struct cmdline_token_ipaddr cmdline_parse_token_ipaddr_t; + +extern struct cmdline_token_ops cmdline_token_ipaddr_ops; + +int cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *srcbuf, + void *res); +int cmdline_get_help_ipaddr(cmdline_parse_token_hdr_t *tk, char *dstbuf, + unsigned int size); + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_IPADDR_INITIALIZER(structure, field) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_ipaddr_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .ipaddr_data = { \ + .flags = CMDLINE_IPADDR_V4 | \ + CMDLINE_IPADDR_V6, \ + }, \ +} + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_IPV4_INITIALIZER(structure, field) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_ipaddr_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .ipaddr_data = { \ + .flags = CMDLINE_IPADDR_V4, \ + }, \ +} + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_IPV6_INITIALIZER(structure, field) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_ipaddr_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .ipaddr_data = { \ + .flags = CMDLINE_IPADDR_V6, \ + }, \ +} + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_IPNET_INITIALIZER(structure, field) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_ipaddr_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .ipaddr_data = { \ + .flags = CMDLINE_IPADDR_V4 | \ + CMDLINE_IPADDR_V6 | \ + CMDLINE_IPADDR_NETWORK, \ + }, \ +} + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_IPV4NET_INITIALIZER(structure, field) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_ipaddr_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .ipaddr_data = { \ + .flags = CMDLINE_IPADDR_V4 | \ + CMDLINE_IPADDR_NETWORK, \ + }, \ +} + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_IPV6NET_INITIALIZER(structure, field) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_ipaddr_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .ipaddr_data = { \ + .flags = CMDLINE_IPADDR_V4 | \ + CMDLINE_IPADDR_NETWORK, \ + }, \ +} + +#ifdef __cplusplus +} +#endif + +#endif /* _PARSE_IPADDR_H_ */ diff --git a/lib/librte_cmdline/cmdline_parse_num.c b/lib/librte_cmdline/cmdline_parse_num.c new file mode 100644 index 0000000000..087cf483f0 --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_num.c @@ -0,0 +1,493 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cmdline_parse.h" +#include "cmdline_parse_num.h" + +#ifdef RTE_LIBRTE_CMDLINE_DEBUG +#define debug_printf(args...) printf(args) +#else +#define debug_printf(args...) do {} while(0) +#endif + +struct cmdline_token_ops cmdline_token_num_ops = { + .parse = cmdline_parse_num, + .complete_get_nb = NULL, + .complete_get_elt = NULL, + .get_help = cmdline_get_help_num, +}; + + +enum num_parse_state_t { + START, + DEC_NEG, + BIN, + HEX, + FLOAT_POS, + FLOAT_NEG, + + ERROR, + + FIRST_OK, /* not used */ + ZERO_OK, + HEX_OK, + OCTAL_OK, + BIN_OK, + DEC_NEG_OK, + DEC_POS_OK, + FLOAT_POS_OK, + FLOAT_NEG_OK +}; + +/* Keep it sync with enum in .h */ +static const char * num_help[] = { + "UINT8", "UINT16", "UINT32", "UINT64", + "INT8", "INT16", "INT32", "INT64", +#ifdef CMDLINE_HAVE_FLOAT + "FLOAT", +#endif +}; + +static inline int +add_to_res(unsigned int c, uint64_t *res, unsigned int base) +{ + /* overflow */ + if ( (UINT64_MAX - c) / base < *res ) { + return -1; + } + + *res = (uint64_t) (*res * base + c); + return 0; +} + + +/* parse an int or a float */ +int +cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res) +{ + struct cmdline_token_num_data nd; + enum num_parse_state_t st = START; + const char * buf = srcbuf; + char c = *buf; + uint64_t res1 = 0; +#ifdef CMDLINE_HAVE_FLOAT + uint64_t res2 = 0, res3 = 1; +#endif + + memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd)); + + while ( st != ERROR && c && ! cmdline_isendoftoken(c) ) { + debug_printf("%c %x -> ", c, c); + switch (st) { + case START: + if (c == '-') { + st = DEC_NEG; + } + else if (c == '0') { + st = ZERO_OK; + } +#ifdef CMDLINE_HAVE_FLOAT + else if (c == '.') { + st = FLOAT_POS; + res1 = 0; + } +#endif + else if (c >= '1' && c <= '9') { + if (add_to_res(c - '0', &res1, 10) < 0) + st = ERROR; + else + st = DEC_POS_OK; + } + else { + st = ERROR; + } + break; + + case ZERO_OK: + if (c == 'x') { + st = HEX; + } + else if (c == 'b') { + st = BIN; + } +#ifdef CMDLINE_HAVE_FLOAT + else if (c == '.') { + st = FLOAT_POS; + res1 = 0; + } +#endif + else if (c >= '0' && c <= '7') { + if (add_to_res(c - '0', &res1, 10) < 0) + st = ERROR; + else + st = OCTAL_OK; + } + else { + st = ERROR; + } + break; + + case DEC_NEG: + if (c >= '0' && c <= '9') { + if (add_to_res(c - '0', &res1, 10) < 0) + st = ERROR; + else + st = DEC_NEG_OK; + } +#ifdef CMDLINE_HAVE_FLOAT + else if (c == '.') { + res1 = 0; + st = FLOAT_NEG; + } +#endif + else { + st = ERROR; + } + break; + + case DEC_NEG_OK: + if (c >= '0' && c <= '9') { + if (add_to_res(c - '0', &res1, 10) < 0) + st = ERROR; + } +#ifdef CMDLINE_HAVE_FLOAT + else if (c == '.') { + st = FLOAT_NEG; + } +#endif + else { + st = ERROR; + } + break; + + case DEC_POS_OK: + if (c >= '0' && c <= '9') { + if (add_to_res(c - '0', &res1, 10) < 0) + st = ERROR; + } +#ifdef CMDLINE_HAVE_FLOAT + else if (c == '.') { + st = FLOAT_POS; + } +#endif + else { + st = ERROR; + } + break; + + case HEX: + st = HEX_OK; + /* no break */ + case HEX_OK: + if (c >= '0' && c <= '9') { + if (add_to_res(c - '0', &res1, 16) < 0) + st = ERROR; + } + else if (c >= 'a' && c <= 'f') { + if (add_to_res(c - 'a' + 10, &res1, 16) < 0) + st = ERROR; + } + else if (c >= 'A' && c <= 'F') { + if (add_to_res(c - 'A' + 10, &res1, 16) < 0) + st = ERROR; + } + else { + st = ERROR; + } + break; + + + case OCTAL_OK: + if (c >= '0' && c <= '7') { + if (add_to_res(c - '0', &res1, 8) < 0) + st = ERROR; + } + else { + st = ERROR; + } + break; + + case BIN: + st = BIN_OK; + /* no break */ + case BIN_OK: + if (c >= '0' && c <= '1') { + if (add_to_res(c - '0', &res1, 2) < 0) + st = ERROR; + } + else { + st = ERROR; + } + break; + +#ifdef CMDLINE_HAVE_FLOAT + case FLOAT_POS: + if (c >= '0' && c <= '9') { + if (add_to_res(c - '0', &res2, 10) < 0) + st = ERROR; + else + st = FLOAT_POS_OK; + res3 = 10; + } + else { + st = ERROR; + } + break; + + case FLOAT_NEG: + if (c >= '0' && c <= '9') { + if (add_to_res(c - '0', &res2, 10) < 0) + st = ERROR; + else + st = FLOAT_NEG_OK; + res3 = 10; + } + else { + st = ERROR; + } + break; + + case FLOAT_POS_OK: + if (c >= '0' && c <= '9') { + if (add_to_res(c - '0', &res2, 10) < 0) + st = ERROR; + if (add_to_res(0, &res3, 10) < 0) + st = ERROR; + } + else { + st = ERROR; + } + break; + + case FLOAT_NEG_OK: + if (c >= '0' && c <= '9') { + if (add_to_res(c - '0', &res2, 10) < 0) + st = ERROR; + if (add_to_res(0, &res3, 10) < 0) + st = ERROR; + } + else { + st = ERROR; + } + break; +#endif + + default: + debug_printf("not impl "); + + } + +#ifdef CMDLINE_HAVE_FLOAT + debug_printf("(%"PRIu32") (%"PRIu32") (%"PRIu32")\n", + res1, res2, res3); +#else + debug_printf("(%"PRIu32")\n", res1); +#endif + + buf ++; + c = *buf; + + /* token too long */ + if (buf-srcbuf > 127) + return -1; + } + + switch (st) { + case ZERO_OK: + case DEC_POS_OK: + case HEX_OK: + case OCTAL_OK: + case BIN_OK: + if ( nd.type == INT8 && res1 <= INT8_MAX ) { + if (res) + *(int8_t *)res = (int8_t) res1; + return (buf-srcbuf); + } + else if ( nd.type == INT16 && res1 <= INT16_MAX ) { + if (res) + *(int16_t *)res = (int16_t) res1; + return (buf-srcbuf); + } + else if ( nd.type == INT32 && res1 <= INT32_MAX ) { + if (res) + *(int32_t *)res = (int32_t) res1; + return (buf-srcbuf); + } + else if ( nd.type == UINT8 && res1 <= UINT8_MAX ) { + if (res) + *(uint8_t *)res = (uint8_t) res1; + return (buf-srcbuf); + } + else if (nd.type == UINT16 && res1 <= UINT16_MAX ) { + if (res) + *(uint16_t *)res = (uint16_t) res1; + return (buf-srcbuf); + } + else if ( nd.type == UINT32 ) { + if (res) + *(uint32_t *)res = (uint32_t) res1; + return (buf-srcbuf); + } + else if ( nd.type == UINT64 ) { + if (res) + *(uint64_t *)res = res1; + return (buf-srcbuf); + } +#ifdef CMDLINE_HAVE_FLOAT + else if ( nd.type == FLOAT ) { + if (res) + *(float *)res = (float)res1; + return (buf-srcbuf); + } +#endif + else { + return -1; + } + break; + + case DEC_NEG_OK: + if ( nd.type == INT8 && res1 <= INT8_MAX + 1 ) { + if (res) + *(int8_t *)res = (int8_t) (-res1); + return (buf-srcbuf); + } + else if ( nd.type == INT16 && res1 <= (uint16_t)INT16_MAX + 1 ) { + if (res) + *(int16_t *)res = (int16_t) (-res1); + return (buf-srcbuf); + } + else if ( nd.type == INT32 && res1 <= (uint32_t)INT32_MAX + 1 ) { + if (res) + *(int32_t *)res = (int32_t) (-res1); + return (buf-srcbuf); + } +#ifdef CMDLINE_HAVE_FLOAT + else if ( nd.type == FLOAT ) { + if (res) + *(float *)res = - (float)res1; + return (buf-srcbuf); + } +#endif + else { + return -1; + } + break; + +#ifdef CMDLINE_HAVE_FLOAT + case FLOAT_POS: + case FLOAT_POS_OK: + if ( nd.type == FLOAT ) { + if (res) + *(float *)res = (float)res1 + ((float)res2 / (float)res3); + return (buf-srcbuf); + + } + else { + return -1; + } + break; + + case FLOAT_NEG: + case FLOAT_NEG_OK: + if ( nd.type == FLOAT ) { + if (res) + *(float *)res = - ((float)res1 + ((float)res2 / (float)res3)); + return (buf-srcbuf); + + } + else { + return -1; + } + break; +#endif + default: + debug_printf("error\n"); + return -1; + } +} + + +/* parse an int or a float */ +int +cmdline_get_help_num(cmdline_parse_token_hdr_t *tk, char *dstbuf, unsigned int size) +{ + struct cmdline_token_num_data nd; + + memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd)); + + /* should not happen.... don't so this test */ + /* if (nd.type >= (sizeof(num_help)/sizeof(const char *))) */ + /* return -1; */ + + rte_snprintf(dstbuf, size, "%s", num_help[nd.type]); + dstbuf[size-1] = '\0'; + return 0; +} diff --git a/lib/librte_cmdline/cmdline_parse_num.h b/lib/librte_cmdline/cmdline_parse_num.h new file mode 100644 index 0000000000..34aaa935cd --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_num.h @@ -0,0 +1,119 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARSE_NUM_H_ +#define _PARSE_NUM_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +enum cmdline_numtype { + UINT8 = 0, + UINT16, + UINT32, + UINT64, + INT8, + INT16, + INT32, + INT64 +#ifndef NO_PARSE_FLOAT + ,FLOAT +#endif +}; + +struct cmdline_token_num_data { + enum cmdline_numtype type; +}; + +struct cmdline_token_num { + struct cmdline_token_hdr hdr; + struct cmdline_token_num_data num_data; +}; +typedef struct cmdline_token_num cmdline_parse_token_num_t; + +extern struct cmdline_token_ops cmdline_token_num_ops; + +int cmdline_parse_num(cmdline_parse_token_hdr_t *tk, + const char *srcbuf, void *res); +int cmdline_get_help_num(cmdline_parse_token_hdr_t *tk, + char *dstbuf, unsigned int size); + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_NUM_INITIALIZER(structure, field, numtype) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_num_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .num_data = { \ + .type = numtype, \ + }, \ +} + +#ifdef __cplusplus +} +#endif + +#endif /* _PARSE_NUM_H_ */ diff --git a/lib/librte_cmdline/cmdline_parse_portlist.c b/lib/librte_cmdline/cmdline_parse_portlist.c new file mode 100644 index 0000000000..72b3b912f8 --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_portlist.c @@ -0,0 +1,172 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2010, Keith Wiles + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef __linux__ +#include +#endif + +#include +#include "cmdline_parse.h" +#include "cmdline_parse_portlist.h" + +struct cmdline_token_ops cmdline_token_portlist_ops = { + .parse = cmdline_parse_portlist, + .complete_get_nb = NULL, + .complete_get_elt = NULL, + .get_help = cmdline_get_help_portlist, +}; + +static void +parse_set_list(cmdline_portlist_t * pl, int low, int high) +{ + do { + pl->map |= (1 << low++); + } while (low <= high); +} + +static int +parse_ports(cmdline_portlist_t * pl, const char * str) +{ + size_t ps, pe; + const char *first, *last; + char *end; + + for (first = str, last = first; + first != NULL && last != NULL; + first = last + 1) { + + last = strchr(first, ','); + + errno = 0; + ps = strtoul(first, &end, 10); + if (errno != 0 || end == first || + (end[0] != '-' && end[0] != 0 && end != last)) + return (-1); + + /* Support for N-M portlist format */ + if (end[0] == '-') { + errno = 0; + first = end + 1; + pe = strtoul(first, &end, 10); + if (errno != 0 || end == first || + (end[0] != 0 && end != last)) + return (-1); + } else { + pe = ps; + } + + if (ps > pe || pe >= sizeof (pl->map) * 8) + return (-1); + + parse_set_list(pl, ps, pe); + } + + return (0); +} + +int +cmdline_parse_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk, + const char *buf, void *res) +{ + unsigned int token_len = 0; + char portlist_str[PORTLIST_TOKEN_SIZE+1]; + cmdline_portlist_t *pl = res; + + if (! *buf) + return (-1); + + while (!cmdline_isendoftoken(buf[token_len]) && + (token_len < PORTLIST_TOKEN_SIZE)) + token_len++; + + if (token_len >= PORTLIST_TOKEN_SIZE) + return (-1); + + if (pl == NULL) + return (token_len); + + rte_snprintf(portlist_str, token_len+1, "%s", buf); + + pl->map = 0; + if (strcmp("all", portlist_str) == 0) + pl->map = UINT32_MAX; + else if (parse_ports(pl, portlist_str) != 0) + return (-1); + + return token_len; +} + +int cmdline_get_help_portlist(cmdline_parse_token_hdr_t *tk, char *dstbuf, + unsigned int size) +{ + (void)tk; + rte_snprintf(dstbuf, size, "range of ports as 3,4-6,8-19,20"); + return 0; +} diff --git a/lib/librte_cmdline/cmdline_parse_portlist.h b/lib/librte_cmdline/cmdline_parse_portlist.h new file mode 100644 index 0000000000..6f481ca45b --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_portlist.h @@ -0,0 +1,113 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2010, Keith Wiles + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARSE_PORTLIST_H_ +#define _PARSE_PORTLIST_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* size of a parsed string */ +#define PORTLIST_TOKEN_SIZE 128 +#define PORTLIST_MAX_TOKENS 32 + +typedef struct cmdline_portlist { + uint32_t map; +} cmdline_portlist_t; + +struct cmdline_token_portlist_data { + uint8_t flags; +}; + +struct cmdline_token_portlist { + struct cmdline_token_hdr hdr; + struct cmdline_token_portlist_data range_data; +}; +typedef struct cmdline_token_portlist cmdline_parse_token_portlist_t; + +extern struct cmdline_token_ops cmdline_token_portlist_ops; + +int cmdline_parse_portlist(cmdline_parse_token_hdr_t *tk, + const char *srcbuf, void *res); +int cmdline_get_help_portlist(cmdline_parse_token_hdr_t *tk, + char *dstbuf, unsigned int size); + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_PORTLIST_INITIALIZER(structure, field) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_portlist_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .range_data = { \ + .flags = 0, \ + }, \ +} + +#ifdef __cplusplus +} +#endif + +#endif /* _PARSE_PORTLIST_H_ */ diff --git a/lib/librte_cmdline/cmdline_parse_string.c b/lib/librte_cmdline/cmdline_parse_string.c new file mode 100644 index 0000000000..55cc4d5f04 --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_string.c @@ -0,0 +1,228 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cmdline_parse.h" +#include "cmdline_parse_string.h" + +struct cmdline_token_ops cmdline_token_string_ops = { + .parse = cmdline_parse_string, + .complete_get_nb = cmdline_complete_get_nb_string, + .complete_get_elt = cmdline_complete_get_elt_string, + .get_help = cmdline_get_help_string, +}; + +#define MULTISTRING_HELP "Mul-choice STRING" +#define ANYSTRING_HELP "Any STRING" +#define FIXEDSTRING_HELP "Fixed STRING" + +static unsigned int +get_token_len(const char *s) +{ + char c; + unsigned int i=0; + + c = s[i]; + while (c!='#' && c!='\0') { + i++; + c = s[i]; + } + return i; +} + +static const char * +get_next_token(const char *s) +{ + unsigned int i; + i = get_token_len(s); + if (s[i] == '#') + return s+i+1; + return NULL; +} + +int +cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *buf, void *res) +{ + struct cmdline_token_string *tk2 = (struct cmdline_token_string *)tk; + struct cmdline_token_string_data *sd = &tk2->string_data; + unsigned int token_len; + const char *str; + + if (! *buf) + return -1; + + /* fixed string */ + if (sd->str) { + str = sd->str; + do { + token_len = get_token_len(str); + + /* if token is too big... */ + if (token_len >= STR_TOKEN_SIZE - 1) { + continue; + } + + if ( strncmp(buf, str, token_len) ) { + continue; + } + + if ( !cmdline_isendoftoken(*(buf+token_len)) ) { + continue; + } + + break; + } while ( (str = get_next_token(str)) != NULL ); + + if (!str) + return -1; + } + /* unspecified string */ + else { + token_len=0; + while(!cmdline_isendoftoken(buf[token_len]) && + token_len < (STR_TOKEN_SIZE-1)) + token_len++; + + /* return if token too long */ + if (token_len >= STR_TOKEN_SIZE - 1) { + return -1; + } + } + + if (res) { + /* we are sure that token_len is < STR_TOKEN_SIZE-1 */ + rte_snprintf(res, STR_TOKEN_SIZE, "%s", buf); + *((char *)res + token_len) = 0; + } + + return token_len; +} + +int cmdline_complete_get_nb_string(cmdline_parse_token_hdr_t *tk) +{ + struct cmdline_token_string *tk2 = (struct cmdline_token_string *)tk; + struct cmdline_token_string_data *sd = &tk2->string_data;; + int ret=1; + const char *str; + + if (!sd->str) + return 0; + + str = sd->str; + while( (str = get_next_token(str)) != NULL ) { + ret++; + } + return ret; +} + +int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx, + char *dstbuf, unsigned int size) +{ + struct cmdline_token_string *tk2 = (struct cmdline_token_string *)tk; + struct cmdline_token_string_data *sd = &tk2->string_data;; + const char *s; + unsigned int len; + + s = sd->str; + + while (idx-- && s) + s = get_next_token(s); + + if (!s) + return -1; + + len = get_token_len(s); + if (len > size - 1) + return -1; + + memcpy(dstbuf, s, len); + dstbuf[len] = '\0'; + return 0; +} + + +int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf, + unsigned int size) +{ + struct cmdline_token_string *tk2 = (struct cmdline_token_string *)tk; + struct cmdline_token_string_data *sd = &tk2->string_data;; + const char *s; + + s = sd->str; + + if (s) { + if (get_next_token(s)) + rte_snprintf(dstbuf, size, MULTISTRING_HELP); + else + rte_snprintf(dstbuf, size, FIXEDSTRING_HELP); + } else + rte_snprintf(dstbuf, size, ANYSTRING_HELP); + + return 0; +} diff --git a/lib/librte_cmdline/cmdline_parse_string.h b/lib/librte_cmdline/cmdline_parse_string.h new file mode 100644 index 0000000000..35239b9f34 --- /dev/null +++ b/lib/librte_cmdline/cmdline_parse_string.h @@ -0,0 +1,113 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _PARSE_STRING_H_ +#define _PARSE_STRING_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* size of a parsed string */ +#define STR_TOKEN_SIZE 128 + +typedef char cmdline_fixed_string_t[STR_TOKEN_SIZE]; + +struct cmdline_token_string_data { + const char *str; +}; + +struct cmdline_token_string { + struct cmdline_token_hdr hdr; + struct cmdline_token_string_data string_data; +}; +typedef struct cmdline_token_string cmdline_parse_token_string_t; + +extern struct cmdline_token_ops cmdline_token_string_ops; + +int cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *srcbuf, + void *res); +int cmdline_complete_get_nb_string(cmdline_parse_token_hdr_t *tk); +int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx, + char *dstbuf, unsigned int size); +int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf, + unsigned int size); + +/* + * Warning! Not compatible with C++! + */ +#define TOKEN_STRING_INITIALIZER(structure, field, string) \ +{ \ + .hdr = { \ + .ops = &cmdline_token_string_ops, \ + .offset = offsetof(structure, field), \ + }, \ + .string_data = { \ + .str = string, \ + }, \ +} + +#ifdef __cplusplus +} +#endif + +#endif /* _PARSE_STRING_H_ */ diff --git a/lib/librte_cmdline/cmdline_rdline.c b/lib/librte_cmdline/cmdline_rdline.c new file mode 100644 index 0000000000..edc479cc85 --- /dev/null +++ b/lib/librte_cmdline/cmdline_rdline.c @@ -0,0 +1,675 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include "cmdline_cirbuf.h" +#include "cmdline_rdline.h" + +static void rdline_puts(struct rdline *rdl, const char *buf); +static void rdline_miniprintf(struct rdline *rdl, + const char *buf, unsigned int val); + +#ifndef NO_RDLINE_HISTORY +static void rdline_remove_old_history_item(struct rdline *rdl); +static void rdline_remove_first_history_item(struct rdline *rdl); +static unsigned int rdline_get_history_size(struct rdline *rdl); +#endif /* !NO_RDLINE_HISTORY */ + + +/* isblank() needs _XOPEN_SOURCE >= 600 || _ISOC99_SOURCE, so use our + * own. */ +static int +isblank2(char c) +{ + if (c == ' ' || + c == '\t' ) + return 1; + return 0; +} + +void +rdline_init(struct rdline *rdl, + rdline_write_char_t *write_char, + rdline_validate_t *validate, + rdline_complete_t *complete) +{ + memset(rdl, 0, sizeof(*rdl)); + rdl->validate = validate; + rdl->complete = complete; + rdl->write_char = write_char; + rdl->status = RDLINE_INIT; +#ifndef NO_RDLINE_HISTORY + cirbuf_init(&rdl->history, rdl->history_buf, 0, RDLINE_HISTORY_BUF_SIZE); +#endif /* !NO_RDLINE_HISTORY */ +} + +void +rdline_newline(struct rdline *rdl, const char *prompt) +{ + unsigned int i; + + vt100_init(&rdl->vt100); + cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE); + cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE); + + if (prompt != rdl->prompt) + memcpy(rdl->prompt, prompt, sizeof(rdl->prompt)-1); + rdl->prompt_size = strnlen(prompt, RDLINE_PROMPT_SIZE); + + for (i=0 ; iprompt_size ; i++) + rdl->write_char(rdl, rdl->prompt[i]); + rdl->status = RDLINE_RUNNING; + +#ifndef NO_RDLINE_HISTORY + rdl->history_cur_line = -1; +#endif /* !NO_RDLINE_HISTORY */ +} + +void +rdline_stop(struct rdline *rdl) +{ + rdl->status = RDLINE_INIT; +} + +void +rdline_quit(struct rdline *rdl) +{ + rdl->status = RDLINE_EXITED; +} + +void +rdline_restart(struct rdline *rdl) +{ + rdl->status = RDLINE_RUNNING; +} + +void +rdline_reset(struct rdline *rdl) +{ + vt100_init(&rdl->vt100); + cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE); + cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE); + + rdl->status = RDLINE_RUNNING; + +#ifndef NO_RDLINE_HISTORY + rdl->history_cur_line = -1; +#endif /* !NO_RDLINE_HISTORY */ +} + +const char * +rdline_get_buffer(struct rdline *rdl) +{ + unsigned int len_l, len_r; + cirbuf_align_left(&rdl->left); + cirbuf_align_left(&rdl->right); + + len_l = CIRBUF_GET_LEN(&rdl->left); + len_r = CIRBUF_GET_LEN(&rdl->right); + memcpy(rdl->left_buf+len_l, rdl->right_buf, len_r); + + rdl->left_buf[len_l + len_r] = '\n'; + rdl->left_buf[len_l + len_r + 1] = '\0'; + return rdl->left_buf; +} + +static void +display_right_buffer(struct rdline *rdl, int force) +{ + unsigned int i; + char tmp; + + if (!force && CIRBUF_IS_EMPTY(&rdl->right)) + return; + + rdline_puts(rdl, vt100_clear_right); + CIRBUF_FOREACH(&rdl->right, i, tmp) { + rdl->write_char(rdl, tmp); + } + if (!CIRBUF_IS_EMPTY(&rdl->right)) + rdline_miniprintf(rdl, vt100_multi_left, + CIRBUF_GET_LEN(&rdl->right)); +} + +void +rdline_redisplay(struct rdline *rdl) +{ + unsigned int i; + char tmp; + + rdline_puts(rdl, vt100_home); + for (i=0 ; iprompt_size ; i++) + rdl->write_char(rdl, rdl->prompt[i]); + CIRBUF_FOREACH(&rdl->left, i, tmp) { + rdl->write_char(rdl, tmp); + } + display_right_buffer(rdl, 1); +} + +int +rdline_char_in(struct rdline *rdl, char c) +{ + unsigned int i; + int cmd; + char tmp; +#ifndef NO_RDLINE_HISTORY + char *buf; +#endif + + if (rdl->status == RDLINE_EXITED) + return RDLINE_RES_EXITED; + if (rdl->status != RDLINE_RUNNING) + return RDLINE_RES_NOT_RUNNING; + + cmd = vt100_parser(&rdl->vt100, c); + if (cmd == -2) + return RDLINE_RES_SUCCESS; + + if (cmd >= 0) { + switch (cmd) { + case CMDLINE_KEY_CTRL_B: + case CMDLINE_KEY_LEFT_ARR: + if (CIRBUF_IS_EMPTY(&rdl->left)) + break; + tmp = cirbuf_get_tail(&rdl->left); + cirbuf_del_tail(&rdl->left); + cirbuf_add_head(&rdl->right, tmp); + rdline_puts(rdl, vt100_left_arr); + break; + + case CMDLINE_KEY_CTRL_F: + case CMDLINE_KEY_RIGHT_ARR: + if (CIRBUF_IS_EMPTY(&rdl->right)) + break; + tmp = cirbuf_get_head(&rdl->right); + cirbuf_del_head(&rdl->right); + cirbuf_add_tail(&rdl->left, tmp); + rdline_puts(rdl, vt100_right_arr); + break; + + case CMDLINE_KEY_WLEFT: + while (! CIRBUF_IS_EMPTY(&rdl->left) && + (tmp = cirbuf_get_tail(&rdl->left)) && + isblank2(tmp)) { + rdline_puts(rdl, vt100_left_arr); + cirbuf_del_tail(&rdl->left); + cirbuf_add_head(&rdl->right, tmp); + } + while (! CIRBUF_IS_EMPTY(&rdl->left) && + (tmp = cirbuf_get_tail(&rdl->left)) && + !isblank2(tmp)) { + rdline_puts(rdl, vt100_left_arr); + cirbuf_del_tail(&rdl->left); + cirbuf_add_head(&rdl->right, tmp); + } + break; + + case CMDLINE_KEY_WRIGHT: + while (! CIRBUF_IS_EMPTY(&rdl->right) && + (tmp = cirbuf_get_head(&rdl->right)) && + isblank2(tmp)) { + rdline_puts(rdl, vt100_right_arr); + cirbuf_del_head(&rdl->right); + cirbuf_add_tail(&rdl->left, tmp); + } + while (! CIRBUF_IS_EMPTY(&rdl->right) && + (tmp = cirbuf_get_head(&rdl->right)) && + !isblank2(tmp)) { + rdline_puts(rdl, vt100_right_arr); + cirbuf_del_head(&rdl->right); + cirbuf_add_tail(&rdl->left, tmp); + } + break; + + case CMDLINE_KEY_BKSPACE: + if(!cirbuf_del_tail_safe(&rdl->left)) { + rdline_puts(rdl, vt100_bs); + display_right_buffer(rdl, 1); + } + break; + + case CMDLINE_KEY_META_BKSPACE: + case CMDLINE_KEY_CTRL_W: + while (! CIRBUF_IS_EMPTY(&rdl->left) && isblank2(cirbuf_get_tail(&rdl->left))) { + rdline_puts(rdl, vt100_bs); + cirbuf_del_tail(&rdl->left); + } + while (! CIRBUF_IS_EMPTY(&rdl->left) && !isblank2(cirbuf_get_tail(&rdl->left))) { + rdline_puts(rdl, vt100_bs); + cirbuf_del_tail(&rdl->left); + } + display_right_buffer(rdl, 1); + break; + + case CMDLINE_KEY_META_D: + while (! CIRBUF_IS_EMPTY(&rdl->right) && isblank2(cirbuf_get_head(&rdl->right))) + cirbuf_del_head(&rdl->right); + while (! CIRBUF_IS_EMPTY(&rdl->right) && !isblank2(cirbuf_get_head(&rdl->right))) + cirbuf_del_head(&rdl->right); + display_right_buffer(rdl, 1); + break; + + case CMDLINE_KEY_SUPPR: + case CMDLINE_KEY_CTRL_D: + if (cmd == CMDLINE_KEY_CTRL_D && + CIRBUF_IS_EMPTY(&rdl->left) && + CIRBUF_IS_EMPTY(&rdl->right)) { + return RDLINE_RES_EOF; + } + if (!cirbuf_del_head_safe(&rdl->right)) { + display_right_buffer(rdl, 1); + } + break; + + case CMDLINE_KEY_CTRL_A: + if (CIRBUF_IS_EMPTY(&rdl->left)) + break; + rdline_miniprintf(rdl, vt100_multi_left, + CIRBUF_GET_LEN(&rdl->left)); + while (! CIRBUF_IS_EMPTY(&rdl->left)) { + tmp = cirbuf_get_tail(&rdl->left); + cirbuf_del_tail(&rdl->left); + cirbuf_add_head(&rdl->right, tmp); + } + break; + + case CMDLINE_KEY_CTRL_E: + if (CIRBUF_IS_EMPTY(&rdl->right)) + break; + rdline_miniprintf(rdl, vt100_multi_right, + CIRBUF_GET_LEN(&rdl->right)); + while (! CIRBUF_IS_EMPTY(&rdl->right)) { + tmp = cirbuf_get_head(&rdl->right); + cirbuf_del_head(&rdl->right); + cirbuf_add_tail(&rdl->left, tmp); + } + break; + +#ifndef NO_RDLINE_KILL_BUF + case CMDLINE_KEY_CTRL_K: + cirbuf_get_buf_head(&rdl->right, rdl->kill_buf, RDLINE_BUF_SIZE); + rdl->kill_size = CIRBUF_GET_LEN(&rdl->right); + cirbuf_del_buf_head(&rdl->right, rdl->kill_size); + rdline_puts(rdl, vt100_clear_right); + break; + + case CMDLINE_KEY_CTRL_Y: + i=0; + while(CIRBUF_GET_LEN(&rdl->right) + CIRBUF_GET_LEN(&rdl->left) < + RDLINE_BUF_SIZE && + i < rdl->kill_size) { + cirbuf_add_tail(&rdl->left, rdl->kill_buf[i]); + rdl->write_char(rdl, rdl->kill_buf[i]); + i++; + } + display_right_buffer(rdl, 0); + break; +#endif /* !NO_RDLINE_KILL_BUF */ + + case CMDLINE_KEY_CTRL_C: + rdline_puts(rdl, "\r\n"); + rdline_newline(rdl, rdl->prompt); + break; + + case CMDLINE_KEY_CTRL_L: + rdline_redisplay(rdl); + break; + + case CMDLINE_KEY_TAB: + case CMDLINE_KEY_HELP: + cirbuf_align_left(&rdl->left); + rdl->left_buf[CIRBUF_GET_LEN(&rdl->left)] = '\0'; + if (rdl->complete) { + char tmp_buf[BUFSIZ]; + int complete_state; + int ret; + unsigned int tmp_size; + + if (cmd == CMDLINE_KEY_TAB) + complete_state = 0; + else + complete_state = -1; + + /* see in parse.h for help on complete() */ + ret = rdl->complete(rdl, rdl->left_buf, + tmp_buf, sizeof(tmp_buf), + &complete_state); + /* no completion or error */ + if (ret <= 0) { + return RDLINE_RES_COMPLETE; + } + + tmp_size = strnlen(tmp_buf, sizeof(tmp_buf)); + /* add chars */ + if (ret == RDLINE_RES_COMPLETE) { + i=0; + while(CIRBUF_GET_LEN(&rdl->right) + CIRBUF_GET_LEN(&rdl->left) < + RDLINE_BUF_SIZE && + i < tmp_size) { + cirbuf_add_tail(&rdl->left, tmp_buf[i]); + rdl->write_char(rdl, tmp_buf[i]); + i++; + } + display_right_buffer(rdl, 1); + return RDLINE_RES_COMPLETE; /* ?? */ + } + + /* choice */ + rdline_puts(rdl, "\r\n"); + while (ret) { + rdl->write_char(rdl, ' '); + for (i=0 ; tmp_buf[i] ; i++) + rdl->write_char(rdl, tmp_buf[i]); + rdline_puts(rdl, "\r\n"); + ret = rdl->complete(rdl, rdl->left_buf, + tmp_buf, sizeof(tmp_buf), + &complete_state); + } + + rdline_redisplay(rdl); + } + return RDLINE_RES_COMPLETE; + + case CMDLINE_KEY_RETURN: + case CMDLINE_KEY_RETURN2: + rdline_get_buffer(rdl); + rdl->status = RDLINE_INIT; + rdline_puts(rdl, "\r\n"); +#ifndef NO_RDLINE_HISTORY + if (rdl->history_cur_line != -1) + rdline_remove_first_history_item(rdl); +#endif + + if (rdl->validate) + rdl->validate(rdl, rdl->left_buf, CIRBUF_GET_LEN(&rdl->left)+2); + /* user may have stopped rdline */ + if (rdl->status == RDLINE_EXITED) + return RDLINE_RES_EXITED; + return RDLINE_RES_VALIDATED; + +#ifndef NO_RDLINE_HISTORY + case CMDLINE_KEY_UP_ARR: + case CMDLINE_KEY_CTRL_P: + if (rdl->history_cur_line == 0) { + rdline_remove_first_history_item(rdl); + } + if (rdl->history_cur_line <= 0) { + rdline_add_history(rdl, rdline_get_buffer(rdl)); + rdl->history_cur_line = 0; + } + + buf = rdline_get_history_item(rdl, rdl->history_cur_line + 1); + if (!buf) + break; + + rdl->history_cur_line ++; + vt100_init(&rdl->vt100); + cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE); + cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE); + cirbuf_add_buf_tail(&rdl->left, buf, strnlen(buf, RDLINE_BUF_SIZE)); + rdline_redisplay(rdl); + break; + + case CMDLINE_KEY_DOWN_ARR: + case CMDLINE_KEY_CTRL_N: + if (rdl->history_cur_line - 1 < 0) + break; + + rdl->history_cur_line --; + buf = rdline_get_history_item(rdl, rdl->history_cur_line); + if (!buf) + break; + vt100_init(&rdl->vt100); + cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE); + cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE); + cirbuf_add_buf_tail(&rdl->left, buf, strnlen(buf, RDLINE_BUF_SIZE)); + rdline_redisplay(rdl); + + break; +#endif /* !NO_RDLINE_HISTORY */ + + + default: + break; + } + + return RDLINE_RES_SUCCESS; + } + + if (!isprint((int)c)) + return RDLINE_RES_SUCCESS; + + /* standard chars */ + if (CIRBUF_GET_LEN(&rdl->left) + CIRBUF_GET_LEN(&rdl->right) >= RDLINE_BUF_SIZE) + return RDLINE_RES_SUCCESS; + + if (cirbuf_add_tail_safe(&rdl->left, c)) + return RDLINE_RES_SUCCESS; + + rdl->write_char(rdl, c); + display_right_buffer(rdl, 0); + + return RDLINE_RES_SUCCESS; +} + + +/* HISTORY */ + +#ifndef NO_RDLINE_HISTORY +static void +rdline_remove_old_history_item(struct rdline * rdl) +{ + char tmp; + + while (! CIRBUF_IS_EMPTY(&rdl->history) ) { + tmp = cirbuf_get_head(&rdl->history); + cirbuf_del_head(&rdl->history); + if (!tmp) + break; + } +} + +static void +rdline_remove_first_history_item(struct rdline * rdl) +{ + char tmp; + + if ( CIRBUF_IS_EMPTY(&rdl->history) ) { + return; + } + else { + cirbuf_del_tail(&rdl->history); + } + + while (! CIRBUF_IS_EMPTY(&rdl->history) ) { + tmp = cirbuf_get_tail(&rdl->history); + if (!tmp) + break; + cirbuf_del_tail(&rdl->history); + } +} + +static unsigned int +rdline_get_history_size(struct rdline * rdl) +{ + unsigned int i, tmp, ret=0; + + CIRBUF_FOREACH(&rdl->history, i, tmp) { + if (tmp == 0) + ret ++; + } + + return ret; +} + +char * +rdline_get_history_item(struct rdline * rdl, unsigned int idx) +{ + unsigned int len, i, tmp; + + len = rdline_get_history_size(rdl); + if ( idx >= len ) { + return NULL; + } + + cirbuf_align_left(&rdl->history); + + CIRBUF_FOREACH(&rdl->history, i, tmp) { + if ( idx == len - 1) { + return rdl->history_buf + i; + } + if (tmp == 0) + len --; + } + + return NULL; +} + +int +rdline_add_history(struct rdline * rdl, const char * buf) +{ + unsigned int len, i; + + len = strnlen(buf, RDLINE_BUF_SIZE); + for (i=0; i= RDLINE_HISTORY_BUF_SIZE ) + return -1; + + while ( len >= CIRBUF_GET_FREELEN(&rdl->history) ) { + rdline_remove_old_history_item(rdl); + } + + cirbuf_add_buf_tail(&rdl->history, buf, len); + cirbuf_add_tail(&rdl->history, 0); + + return 0; +} + +void +rdline_clear_history(struct rdline * rdl) +{ + cirbuf_init(&rdl->history, rdl->history_buf, 0, RDLINE_HISTORY_BUF_SIZE); +} + +#else /* !NO_RDLINE_HISTORY */ + +int rdline_add_history(struct rdline * rdl, const char * buf) {return -1;} +void rdline_clear_history(struct rdline * rdl) {} +char * rdline_get_history_item(struct rdline * rdl, unsigned int i) {return NULL;} + + +#endif /* !NO_RDLINE_HISTORY */ + + +/* STATIC USEFUL FUNCS */ + +static void +rdline_puts(struct rdline * rdl, const char * buf) +{ + char c; + while ( (c = *(buf++)) != '\0' ) { + rdl->write_char(rdl, c); + } +} + +/* a very very basic printf with one arg and one format 'u' */ +static void +rdline_miniprintf(struct rdline *rdl, const char * buf, unsigned int val) +{ + char c, started=0, div=100; + + while ( (c=*(buf++)) ) { + if (c != '%') { + rdl->write_char(rdl, c); + continue; + } + c = *(buf++); + if (c != 'u') { + rdl->write_char(rdl, '%'); + rdl->write_char(rdl, c); + continue; + } + /* val is never more than 255 */ + while (div) { + c = (char)(val / div); + if (c || started) { + rdl->write_char(rdl, (char)(c+'0')); + started = 1; + } + val %= div; + div /= 10; + } + } +} + diff --git a/lib/librte_cmdline/cmdline_rdline.h b/lib/librte_cmdline/cmdline_rdline.h new file mode 100644 index 0000000000..1ff984527c --- /dev/null +++ b/lib/librte_cmdline/cmdline_rdline.h @@ -0,0 +1,260 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RDLINE_H_ +#define _RDLINE_H_ + +/** + * This file is a small equivalent to the GNU readline library, but it + * was originally designed for small systems, like Atmel AVR + * microcontrollers (8 bits). Indeed, we don't use any malloc that is + * sometimes not implemented (or just not recommended) on such + * systems. + * + * Obviously, it does not support as many things as the GNU readline, + * but at least it supports some interesting features like a kill + * buffer and a command history. + * + * It also have a feature that does not have the GNU readline (as far + * as I know): we can have several instances of it running at the same + * time, even on a monothread program, since it works with callbacks. + * + * The lib is designed for a client-side or a server-side use: + * - server-side: the server receives all data from a socket, including + * control chars, like arrows, tabulations, ... The client is + * very simple, it can be a telnet or a minicom through a serial line. + * - client-side: the client receives its data through its stdin for + * instance. + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* configuration */ +#define RDLINE_BUF_SIZE 256 +#define RDLINE_PROMPT_SIZE 32 +#define RDLINE_VT100_BUF_SIZE 8 +#define RDLINE_HISTORY_BUF_SIZE BUFSIZ +#define RDLINE_HISTORY_MAX_LINE 64 + +enum rdline_status { + RDLINE_INIT, + RDLINE_RUNNING, + RDLINE_EXITED +}; + +struct rdline; + +typedef int (rdline_write_char_t)(struct rdline *rdl, char); +typedef void (rdline_validate_t)(struct rdline *rdl, + const char *buf, unsigned int size); +typedef int (rdline_complete_t)(struct rdline *rdl, const char *buf, + char *dstbuf, unsigned int dstsize, + int *state); + +struct rdline { + enum rdline_status status; + /* rdline bufs */ + struct cirbuf left; + struct cirbuf right; + char left_buf[RDLINE_BUF_SIZE+2]; /* reserve 2 chars for the \n\0 */ + char right_buf[RDLINE_BUF_SIZE]; + + char prompt[RDLINE_PROMPT_SIZE]; + unsigned int prompt_size; + +#ifndef NO_RDLINE_KILL_BUF + char kill_buf[RDLINE_BUF_SIZE]; + unsigned int kill_size; +#endif + +#ifndef NO_RDLINE_HISTORY + /* history */ + struct cirbuf history; + char history_buf[RDLINE_HISTORY_BUF_SIZE]; + int history_cur_line; +#endif + + /* callbacks and func pointers */ + rdline_write_char_t *write_char; + rdline_validate_t *validate; + rdline_complete_t *complete; + + /* vt100 parser */ + struct cmdline_vt100 vt100; + + /* opaque pointer */ + void *opaque; +}; + +/** + * Init fields for a struct rdline. Call this only once at the beginning + * of your program. + * \param rdl A pointer to an uninitialized struct rdline + * \param write_char The function used by the function to write a character + * \param validate A pointer to the function to execute when the + * user validates the buffer. + * \param complete A pointer to the function to execute when the + * user completes the buffer. + */ +void rdline_init(struct rdline *rdl, + rdline_write_char_t *write_char, + rdline_validate_t *validate, + rdline_complete_t *complete); + + +/** + * Init the current buffer, and display a prompt. + * \param rdl A pointer to a struct rdline + * \param prompt A string containing the prompt + */ +void rdline_newline(struct rdline *rdl, const char *prompt); + +/** + * Call it and all received chars will be ignored. + * \param rdl A pointer to a struct rdline + */ +void rdline_stop(struct rdline *rdl); + +/** + * Same than rdline_stop() except that next calls to rdline_char_in() + * will return RDLINE_RES_EXITED. + * \param rdl A pointer to a struct rdline + */ +void rdline_quit(struct rdline *rdl); + +/** + * Restart after a call to rdline_stop() or rdline_quit() + * \param rdl A pointer to a struct rdline + */ +void rdline_restart(struct rdline *rdl); + +/** + * Redisplay the current buffer + * \param rdl A pointer to a struct rdline + */ +void rdline_redisplay(struct rdline *rdl); + +/** + * Reset the current buffer and setup for a new line. + * \param rdl A pointer to a struct rdline + */ +void rdline_reset(struct rdline *rdl); + + +/* return status for rdline_char_in() */ +#define RDLINE_RES_SUCCESS 0 +#define RDLINE_RES_VALIDATED 1 +#define RDLINE_RES_COMPLETE 2 +#define RDLINE_RES_NOT_RUNNING -1 +#define RDLINE_RES_EOF -2 +#define RDLINE_RES_EXITED -3 + +/** + * append a char to the readline buffer. + * Return RDLINE_RES_VALIDATE when the line has been validated. + * Return RDLINE_RES_COMPLETE when the user asked to complete the buffer. + * Return RDLINE_RES_NOT_RUNNING if it is not running. + * Return RDLINE_RES_EOF if EOF (ctrl-d on an empty line). + * Else return RDLINE_RES_SUCCESS. + * XXX error case when the buffer is full ? + * + * \param rdl A pointer to a struct rdline + * \param c The character to append + */ +int rdline_char_in(struct rdline *rdl, char c); + +/** + * Return the current buffer, terminated by '\0'. + * \param rdl A pointer to a struct rdline + */ +const char *rdline_get_buffer(struct rdline *rdl); + + +/** + * Add the buffer to history. + * return < 0 on error. + * \param rdl A pointer to a struct rdline + * \param buf A buffer that is terminated by '\0' + */ +int rdline_add_history(struct rdline *rdl, const char *buf); + +/** + * Clear current history + * \param rdl A pointer to a struct rdline + */ +void rdline_clear_history(struct rdline *rdl); + +/** + * Get the i-th history item + */ +char *rdline_get_history_item(struct rdline *rdl, unsigned int i); + +#ifdef __cplusplus +} +#endif + +#endif /* _RDLINE_H_ */ diff --git a/lib/librte_cmdline/cmdline_socket.c b/lib/librte_cmdline/cmdline_socket.c new file mode 100644 index 0000000000..21d32d93c1 --- /dev/null +++ b/lib/librte_cmdline/cmdline_socket.c @@ -0,0 +1,120 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "cmdline_parse.h" +#include "cmdline_rdline.h" +#include "cmdline_socket.h" +#include "cmdline.h" + +struct cmdline * +cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path) +{ + int fd; + fd = open(path, O_RDONLY, 0); + if (fd < 0) { + dprintf("open() failed\n"); + return NULL; + } + return (cmdline_new(ctx, prompt, fd, -1)); +} + +struct cmdline * +cmdline_stdin_new(cmdline_parse_ctx_t *ctx, const char *prompt) +{ + struct cmdline *cl; +#ifdef RTE_EXEC_ENV_LINUXAPP + struct termios oldterm, term; + + tcgetattr(0, &oldterm); + memcpy(&term, &oldterm, sizeof(term)); + term.c_lflag &= ~(ICANON | ECHO | ISIG); + tcsetattr(0, TCSANOW, &term); + setbuf(stdin, NULL); +#endif + + cl = cmdline_new(ctx, prompt, 0, 1); + +#ifdef RTE_EXEC_ENV_LINUXAPP + memcpy(&cl->oldterm, &oldterm, sizeof(term)); +#endif + return cl; +} + +void +cmdline_stdin_exit(struct cmdline *cl) +{ +#ifdef RTE_EXEC_ENV_LINUXAPP + tcsetattr(fileno(stdin), TCSANOW, &cl->oldterm); +#else + /* silent the compiler */ + (void)cl; +#endif +} diff --git a/lib/librte_cmdline/cmdline_socket.h b/lib/librte_cmdline/cmdline_socket.h new file mode 100644 index 0000000000..368836e7dc --- /dev/null +++ b/lib/librte_cmdline/cmdline_socket.h @@ -0,0 +1,78 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CMDLINE_SOCKET_H_ +#define _CMDLINE_SOCKET_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +struct cmdline *cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path); +struct cmdline *cmdline_stdin_new(cmdline_parse_ctx_t *ctx, const char *prompt); +void cmdline_stdin_exit(struct cmdline *cl); + +#ifdef __cplusplus +} +#endif + +#endif /* _CMDLINE_SOCKET_H_ */ diff --git a/lib/librte_cmdline/cmdline_vt100.c b/lib/librte_cmdline/cmdline_vt100.c new file mode 100644 index 0000000000..ebdc538fed --- /dev/null +++ b/lib/librte_cmdline/cmdline_vt100.c @@ -0,0 +1,182 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "cmdline_vt100.h" + +const char *cmdline_vt100_commands[] = { + vt100_up_arr, + vt100_down_arr, + vt100_right_arr, + vt100_left_arr, + "\177", + "\n", + "\001", + "\005", + "\013", + "\031", + "\003", + "\006", + "\002", + vt100_suppr, + vt100_tab, + "\004", + "\014", + "\r", + "\033\177", + vt100_word_left, + vt100_word_right, + "?", + "\027", + "\020", + "\016", + "\033\144", +}; + +void +vt100_init(struct cmdline_vt100 *vt) +{ + vt->state = CMDLINE_VT100_INIT; +} + + +static int +match_command(char *buf, unsigned int size) +{ + const char *cmd; + size_t cmdlen; + unsigned int i = 0; + + for (i=0 ; ibufpos >= CMDLINE_VT100_BUF_SIZE) { + vt->state = CMDLINE_VT100_INIT; + vt->bufpos = 0; + } + + vt->buf[vt->bufpos++] = c; + size = vt->bufpos; + + switch (vt->state) { + case CMDLINE_VT100_INIT: + if (c == 033) { + vt->state = CMDLINE_VT100_ESCAPE; + } + else { + vt->bufpos = 0; + goto match_command; + } + break; + + case CMDLINE_VT100_ESCAPE: + if (c == 0133) { + vt->state = CMDLINE_VT100_ESCAPE_CSI; + } + else if (c >= 060 && c <= 0177) { /* XXX 0177 ? */ + vt->bufpos = 0; + vt->state = CMDLINE_VT100_INIT; + goto match_command; + } + break; + + case CMDLINE_VT100_ESCAPE_CSI: + if (c >= 0100 && c <= 0176) { + vt->bufpos = 0; + vt->state = CMDLINE_VT100_INIT; + goto match_command; + } + break; + + default: + vt->bufpos = 0; + break; + } + + return -2; + + match_command: + return match_command(vt->buf, size); +} diff --git a/lib/librte_cmdline/cmdline_vt100.h b/lib/librte_cmdline/cmdline_vt100.h new file mode 100644 index 0000000000..28d048a560 --- /dev/null +++ b/lib/librte_cmdline/cmdline_vt100.h @@ -0,0 +1,153 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 2009, Olivier MATZ + * All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the University of California, Berkeley nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CMDLINE_VT100_H_ +#define _CMDLINE_VT100_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define vt100_bell "\007" +#define vt100_bs "\010" +#define vt100_bs_clear "\010 \010" +#define vt100_tab "\011" +#define vt100_crnl "\012\015" +#define vt100_clear_right "\033[0K" +#define vt100_clear_left "\033[1K" +#define vt100_clear_down "\033[0J" +#define vt100_clear_up "\033[1J" +#define vt100_clear_line "\033[2K" +#define vt100_clear_screen "\033[2J" +#define vt100_up_arr "\033\133\101" +#define vt100_down_arr "\033\133\102" +#define vt100_right_arr "\033\133\103" +#define vt100_left_arr "\033\133\104" +#define vt100_multi_right "\033\133%uC" +#define vt100_multi_left "\033\133%uD" +#define vt100_suppr "\033\133\063\176" +#define vt100_home "\033M\033E" +#define vt100_word_left "\033\142" +#define vt100_word_right "\033\146" + +/* Result of parsing : it must be synchronized with + * cmdline_vt100_commands[] in vt100.c */ +#define CMDLINE_KEY_UP_ARR 0 +#define CMDLINE_KEY_DOWN_ARR 1 +#define CMDLINE_KEY_RIGHT_ARR 2 +#define CMDLINE_KEY_LEFT_ARR 3 +#define CMDLINE_KEY_BKSPACE 4 +#define CMDLINE_KEY_RETURN 5 +#define CMDLINE_KEY_CTRL_A 6 +#define CMDLINE_KEY_CTRL_E 7 +#define CMDLINE_KEY_CTRL_K 8 +#define CMDLINE_KEY_CTRL_Y 9 +#define CMDLINE_KEY_CTRL_C 10 +#define CMDLINE_KEY_CTRL_F 11 +#define CMDLINE_KEY_CTRL_B 12 +#define CMDLINE_KEY_SUPPR 13 +#define CMDLINE_KEY_TAB 14 +#define CMDLINE_KEY_CTRL_D 15 +#define CMDLINE_KEY_CTRL_L 16 +#define CMDLINE_KEY_RETURN2 17 +#define CMDLINE_KEY_META_BKSPACE 18 +#define CMDLINE_KEY_WLEFT 19 +#define CMDLINE_KEY_WRIGHT 20 +#define CMDLINE_KEY_HELP 21 +#define CMDLINE_KEY_CTRL_W 22 +#define CMDLINE_KEY_CTRL_P 23 +#define CMDLINE_KEY_CTRL_N 24 +#define CMDLINE_KEY_META_D 25 + +extern const char *cmdline_vt100_commands[]; + +enum cmdline_vt100_parser_state { + CMDLINE_VT100_INIT, + CMDLINE_VT100_ESCAPE, + CMDLINE_VT100_ESCAPE_CSI +}; + +#define CMDLINE_VT100_BUF_SIZE 8 +struct cmdline_vt100 { + uint8_t bufpos; + char buf[CMDLINE_VT100_BUF_SIZE]; + enum cmdline_vt100_parser_state state; +}; + +/** + * Init + */ +void vt100_init(struct cmdline_vt100 *vt); + +/** + * Input a new character. + * Return -1 if the character is not part of a control sequence + * Return -2 if c is not the last char of a control sequence + * Else return the index in vt100_commands[] + */ +int vt100_parser(struct cmdline_vt100 *vt, char c); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile new file mode 100644 index 0000000000..d061060e20 --- /dev/null +++ b/lib/librte_eal/Makefile @@ -0,0 +1,41 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += common +DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += linuxapp +DIRS-$(CONFIG_RTE_LIBRTE_EAL_BAREMETAL) += baremetal +DIRS-$(CONFIG_RTE_LIBRTE_EAL_BAREMETAL) += common + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile new file mode 100644 index 0000000000..9a42bc76d5 --- /dev/null +++ b/lib/librte_eal/common/Makefile @@ -0,0 +1,56 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +INC := rte_atomic.h rte_branch_prediction.h rte_byteorder.h rte_common.h +INC += rte_cycles.h rte_debug.h rte_eal.h rte_errno.h rte_launch.h rte_lcore.h +INC += rte_log.h rte_memcpy.h rte_memory.h rte_memzone.h rte_pci.h +INC += rte_pci_dev_ids.h rte_per_lcore.h rte_prefetch.h rte_random.h +INC += rte_rwlock.h rte_spinlock.h rte_tailq.h rte_interrupts.h rte_alarm.h +INC += rte_string_fns.h rte_cpuflags.h rte_version.h + +ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y) +INC += rte_warnings.h +endif + +ARCH_INC := rte_atomic.h + +SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC)) +SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/arch := \ + $(addprefix include/$(RTE_ARCH)/arch/,$(ARCH_INC)) + +# add libc if configured +DEPDIRS-$(CONFIG_RTE_LIBC) += lib/libc + +include $(RTE_SDK)/mk/rte.install.mk diff --git a/lib/librte_eal/common/eal_common_cpuflags.c b/lib/librte_eal/common/eal_common_cpuflags.c new file mode 100644 index 0000000000..54293e5b3d --- /dev/null +++ b/lib/librte_eal/common/eal_common_cpuflags.c @@ -0,0 +1,265 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ +#include +#include +#include +#include +#include + +/* + * This should prevent use of advanced instruction sets in this file. Otherwise + * the check function itself could cause a crash. + */ +#ifdef __INTEL_COMPILER +#pragma optimize ("", off) +#else +#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) +#if GCC_VERSION > 404000 +#pragma GCC optimize ("O0") +#endif +#endif + +/** + * Enumeration of CPU registers + */ +enum cpu_register_t { + REG_EAX = 0, + REG_EBX, + REG_ECX, + REG_EDX, +}; + +/** + * Parameters for CPUID instruction + */ +struct cpuid_parameters_t { + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; + enum cpu_register_t return_register; +}; + +#define CPU_FLAG_NAME_MAX_LEN 64 + +/** + * Struct to hold a processor feature entry + */ +struct feature_entry { + enum rte_cpu_flag_t feature; /**< feature name */ + char name[CPU_FLAG_NAME_MAX_LEN]; /**< String for printing */ + struct cpuid_parameters_t params; /**< cpuid parameters */ + uint32_t feature_mask; /**< bitmask for feature */ +}; + +#define FEAT_DEF(f) RTE_CPUFLAG_##f, #f + +/** + * An array that holds feature entries + */ +static const struct feature_entry cpu_feature_table[] = { + {FEAT_DEF(SSE3), {0x1, 0, 0, 0, REG_ECX}, 0x00000001}, + {FEAT_DEF(PCLMULQDQ), {0x1, 0, 0, 0, REG_ECX}, 0x00000002}, + {FEAT_DEF(DTES64), {0x1, 0, 0, 0, REG_ECX}, 0x00000004}, + {FEAT_DEF(MONITOR), {0x1, 0, 0, 0, REG_ECX}, 0x00000008}, + {FEAT_DEF(DS_CPL), {0x1, 0, 0, 0, REG_ECX}, 0x00000010}, + {FEAT_DEF(VMX), {0x1, 0, 0, 0, REG_ECX}, 0x00000020}, + {FEAT_DEF(SMX), {0x1, 0, 0, 0, REG_ECX}, 0x00000040}, + {FEAT_DEF(EIST), {0x1, 0, 0, 0, REG_ECX}, 0x00000080}, + {FEAT_DEF(TM2), {0x1, 0, 0, 0, REG_ECX}, 0x00000100}, + {FEAT_DEF(SSSE3), {0x1, 0, 0, 0, REG_ECX}, 0x00000200}, + {FEAT_DEF(CNXT_ID), {0x1, 0, 0, 0, REG_ECX}, 0x00000400}, + {FEAT_DEF(FMA), {0x1, 0, 0, 0, REG_ECX}, 0x00001000}, + {FEAT_DEF(CMPXCHG16B), {0x1, 0, 0, 0, REG_ECX}, 0x00002000}, + {FEAT_DEF(XTPR), {0x1, 0, 0, 0, REG_ECX}, 0x00004000}, + {FEAT_DEF(PDCM), {0x1, 0, 0, 0, REG_ECX}, 0x00008000}, + {FEAT_DEF(PCID), {0x1, 0, 0, 0, REG_ECX}, 0x00020000}, + {FEAT_DEF(DCA), {0x1, 0, 0, 0, REG_ECX}, 0x00040000}, + {FEAT_DEF(SSE4_1), {0x1, 0, 0, 0, REG_ECX}, 0x00080000}, + {FEAT_DEF(SSE4_2), {0x1, 0, 0, 0, REG_ECX}, 0x00100000}, + {FEAT_DEF(X2APIC), {0x1, 0, 0, 0, REG_ECX}, 0x00200000}, + {FEAT_DEF(MOVBE), {0x1, 0, 0, 0, REG_ECX}, 0x00400000}, + {FEAT_DEF(POPCNT), {0x1, 0, 0, 0, REG_ECX}, 0x00800000}, + {FEAT_DEF(TSC_DEADLINE), {0x1, 0, 0, 0, REG_ECX}, 0x01000000}, + {FEAT_DEF(AES), {0x1, 0, 0, 0, REG_ECX}, 0x02000000}, + {FEAT_DEF(XSAVE), {0x1, 0, 0, 0, REG_ECX}, 0x04000000}, + {FEAT_DEF(OSXSAVE), {0x1, 0, 0, 0, REG_ECX}, 0x08000000}, + {FEAT_DEF(AVX), {0x1, 0, 0, 0, REG_ECX}, 0x10000000}, + {FEAT_DEF(F16C), {0x1, 0, 0, 0, REG_ECX}, 0x20000000}, + {FEAT_DEF(RDRAND), {0x1, 0, 0, 0, REG_ECX}, 0x40000000}, + + {FEAT_DEF(FPU), {0x1, 0, 0, 0, REG_EDX}, 0x00000001}, + {FEAT_DEF(VME), {0x1, 0, 0, 0, REG_EDX}, 0x00000002}, + {FEAT_DEF(DE), {0x1, 0, 0, 0, REG_EDX}, 0x00000004}, + {FEAT_DEF(PSE), {0x1, 0, 0, 0, REG_EDX}, 0x00000008}, + {FEAT_DEF(TSC), {0x1, 0, 0, 0, REG_EDX}, 0x00000010}, + {FEAT_DEF(MSR), {0x1, 0, 0, 0, REG_EDX}, 0x00000020}, + {FEAT_DEF(PAE), {0x1, 0, 0, 0, REG_EDX}, 0x00000040}, + {FEAT_DEF(MCE), {0x1, 0, 0, 0, REG_EDX}, 0x00000080}, + {FEAT_DEF(CX8), {0x1, 0, 0, 0, REG_EDX}, 0x00000100}, + {FEAT_DEF(APIC), {0x1, 0, 0, 0, REG_EDX}, 0x00000200}, + {FEAT_DEF(SEP), {0x1, 0, 0, 0, REG_EDX}, 0x00000800}, + {FEAT_DEF(MTRR), {0x1, 0, 0, 0, REG_EDX}, 0x00001000}, + {FEAT_DEF(PGE), {0x1, 0, 0, 0, REG_EDX}, 0x00002000}, + {FEAT_DEF(MCA), {0x1, 0, 0, 0, REG_EDX}, 0x00004000}, + {FEAT_DEF(CMOV), {0x1, 0, 0, 0, REG_EDX}, 0x00008000}, + {FEAT_DEF(PAT), {0x1, 0, 0, 0, REG_EDX}, 0x00010000}, + {FEAT_DEF(PSE36), {0x1, 0, 0, 0, REG_EDX}, 0x00020000}, + {FEAT_DEF(PSN), {0x1, 0, 0, 0, REG_EDX}, 0x00040000}, + {FEAT_DEF(CLFSH), {0x1, 0, 0, 0, REG_EDX}, 0x00080000}, + {FEAT_DEF(DS), {0x1, 0, 0, 0, REG_EDX}, 0x00200000}, + {FEAT_DEF(ACPI), {0x1, 0, 0, 0, REG_EDX}, 0x00400000}, + {FEAT_DEF(MMX), {0x1, 0, 0, 0, REG_EDX}, 0x00800000}, + {FEAT_DEF(FXSR), {0x1, 0, 0, 0, REG_EDX}, 0x01000000}, + {FEAT_DEF(SSE), {0x1, 0, 0, 0, REG_EDX}, 0x02000000}, + {FEAT_DEF(SSE2), {0x1, 0, 0, 0, REG_EDX}, 0x04000000}, + {FEAT_DEF(SS), {0x1, 0, 0, 0, REG_EDX}, 0x08000000}, + {FEAT_DEF(HTT), {0x1, 0, 0, 0, REG_EDX}, 0x10000000}, + {FEAT_DEF(TM), {0x1, 0, 0, 0, REG_EDX}, 0x20000000}, + {FEAT_DEF(PBE), {0x1, 0, 0, 0, REG_EDX}, 0x80000000}, + + {FEAT_DEF(DIGTEMP), {0x6, 0, 0, 0, REG_EAX}, 0x00000001}, + {FEAT_DEF(TRBOBST), {0x6, 0, 0, 0, REG_EAX}, 0x00000002}, + {FEAT_DEF(ARAT), {0x6, 0, 0, 0, REG_EAX}, 0x00000004}, + {FEAT_DEF(PLN), {0x6, 0, 0, 0, REG_EAX}, 0x00000010}, + {FEAT_DEF(ECMD), {0x6, 0, 0, 0, REG_EAX}, 0x00000020}, + {FEAT_DEF(PTM), {0x6, 0, 0, 0, REG_EAX}, 0x00000040}, + + {FEAT_DEF(MPERF_APERF_MSR), {0x6, 0, 0, 0, REG_ECX}, 0x00000001}, + {FEAT_DEF(ACNT2), {0x6, 0, 0, 0, REG_ECX}, 0x00000002}, + {FEAT_DEF(ENERGY_EFF), {0x6, 0, 0, 0, REG_ECX}, 0x00000008}, + + {FEAT_DEF(FSGSBASE), {0x7, 0, 0, 0, REG_EBX}, 0x00000001}, + {FEAT_DEF(BMI1), {0x7, 0, 0, 0, REG_EBX}, 0x00000004}, + {FEAT_DEF(AVX2), {0x7, 0, 0, 0, REG_EBX}, 0x00000010}, + {FEAT_DEF(SMEP), {0x7, 0, 0, 0, REG_EBX}, 0x00000040}, + {FEAT_DEF(BMI2), {0x7, 0, 0, 0, REG_EBX}, 0x00000080}, + {FEAT_DEF(ERMS), {0x7, 0, 0, 0, REG_EBX}, 0x00000100}, + {FEAT_DEF(INVPCID), {0x7, 0, 0, 0, REG_EBX}, 0x00000400}, + + {FEAT_DEF(LAHF_SAHF), {0x80000001, 0, 0, 0, REG_ECX}, 0x00000001}, + {FEAT_DEF(LZCNT), {0x80000001, 0, 0, 0, REG_ECX}, 0x00000010}, + + {FEAT_DEF(SYSCALL), {0x80000001, 0, 0, 0, REG_EDX}, 0x00000800}, + {FEAT_DEF(XD), {0x80000001, 0, 0, 0, REG_EDX}, 0x00100000}, + {FEAT_DEF(1GB_PG), {0x80000001, 0, 0, 0, REG_EDX}, 0x04000000}, + {FEAT_DEF(RDTSCP), {0x80000001, 0, 0, 0, REG_EDX}, 0x08000000}, + {FEAT_DEF(EM64T), {0x80000001, 0, 0, 0, REG_EDX}, 0x20000000}, + + {FEAT_DEF(INVTSC), {0x80000007, 0, 0, 0, REG_EDX}, 0x00000100}, +}; + +/* + * Execute CPUID instruction and get contents of a specific register + * + * This function, when compiled with GCC, will generate architecture-neutral + * code, as per GCC manual. + */ +static inline int +rte_cpu_get_features(struct cpuid_parameters_t params) +{ + int eax, ebx, ecx, edx; /* registers */ + + asm volatile ("cpuid" + /* output */ + : "=a" (eax), + "=b" (ebx), + "=c" (ecx), + "=d" (edx) + /* input */ + : "a" (params.eax), + "b" (params.ebx), + "c" (params.ecx), + "d" (params.edx)); + + switch (params.return_register) { + case REG_EAX: + return eax; + case REG_EBX: + return ebx; + case REG_ECX: + return ecx; + case REG_EDX: + return edx; + default: + return 0; + } +} + +/* + * Checks if a particular flag is available on current machine. + */ +int +rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature) +{ + int value; + + if (feature >= RTE_CPUFLAG_NUMFLAGS) + /* Flag does not match anything in the feature tables */ + return -ENOENT; + + /* get value of the register containing the desired feature */ + value = rte_cpu_get_features(cpu_feature_table[feature].params); + + /* check if the feature is enabled */ + return (cpu_feature_table[feature].feature_mask & value) > 0; +} + +/** + * Checks if the machine is adequate for running the binary. If it is not, the + * program exits with status 1. + * The function attribute forces this function to be called before main(). But + * with ICC, the check is generated by the compiler. + */ +#ifndef __INTEL_COMPILER +static void __attribute__ ((__constructor__)) +rte_cpu_check_supported(void) +{ + /* This is generated at compile-time by the build system */ + static const enum rte_cpu_flag_t compile_time_flags[] = { + RTE_COMPILE_TIME_CPUFLAGS + }; + unsigned i; + + for (i = 0; i < sizeof(compile_time_flags)/sizeof(compile_time_flags[0]); i++) + if (rte_cpu_get_flag_enabled(compile_time_flags[i]) < 1) { + fprintf(stderr, + "ERROR: This system does not support \"%s\".\n" + "Please check that RTE_MACHINE is set correctly.\n", + cpu_feature_table[compile_time_flags[i]].name); + exit(1); + } +} +#endif diff --git a/lib/librte_eal/common/eal_common_errno.c b/lib/librte_eal/common/eal_common_errno.c new file mode 100644 index 0000000000..9ed45e57b4 --- /dev/null +++ b/lib/librte_eal/common/eal_common_errno.c @@ -0,0 +1,72 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +RTE_DEFINE_PER_LCORE(int, _rte_errno); + +const char * +rte_strerror(int errnum) +{ +#define RETVAL_SZ 256 + static RTE_DEFINE_PER_LCORE(char[RETVAL_SZ], retval); + + /* since some implementations of strerror_r throw an error + * themselves if errnum is too big, we handle that case here */ + if (errnum > RTE_MAX_ERRNO) + rte_snprintf(RTE_PER_LCORE(retval), RETVAL_SZ, + "Unknown error %d", errnum); + else + switch (errnum){ + case E_RTE_SECONDARY: + return "Invalid call in secondary process"; + case E_RTE_NO_CONFIG: + return "Missing rte_config structure"; + case E_RTE_NO_TAILQ: + return "No TAILQ initialised"; + default: + strerror_r(errnum, RTE_PER_LCORE(retval), RETVAL_SZ); + } + + return RTE_PER_LCORE(retval); +} diff --git a/lib/librte_eal/common/eal_common_launch.c b/lib/librte_eal/common/eal_common_launch.c new file mode 100644 index 0000000000..deef8e8d30 --- /dev/null +++ b/lib/librte_eal/common/eal_common_launch.c @@ -0,0 +1,122 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Wait until a lcore finished its job. + */ +int +rte_eal_wait_lcore(unsigned slave_id) +{ + if (lcore_config[slave_id].state == WAIT) + return 0; + + while (lcore_config[slave_id].state != WAIT && + lcore_config[slave_id].state != FINISHED); + + rte_rmb(); + + /* we are in finished state, go to wait state */ + lcore_config[slave_id].state = WAIT; + return lcore_config[slave_id].ret; +} + +/* + * Check that every SLAVE lcores are in WAIT state, then call + * rte_eal_remote_launch() for all of them. If call_master is true + * (set to CALL_MASTER), also call the function on the master lcore. + */ +int +rte_eal_mp_remote_launch(int (*f)(void *), void *arg, + enum rte_rmt_call_master_t call_master) +{ + int lcore_id; + int master = rte_get_master_lcore(); + + /* check state of lcores */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (lcore_config[lcore_id].state != WAIT) + return -EBUSY; + } + + /* send messages to cores */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(f, arg, lcore_id); + } + + if (call_master == CALL_MASTER) { + lcore_config[master].ret = f(arg); + lcore_config[master].state = FINISHED; + } + + return 0; +} + +/* + * Return the state of the lcore identified by slave_id. + */ +enum rte_lcore_state_t +rte_eal_get_lcore_state(unsigned lcore_id) +{ + return lcore_config[lcore_id].state; +} + +/* + * Do a rte_eal_wait_lcore() for every lcore. The return values are + * ignored. + */ +void +rte_eal_mp_wait_lcore(void) +{ + unsigned lcore_id; + + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_wait_lcore(lcore_id); + } +} + diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c new file mode 100644 index 0000000000..1362109d3f --- /dev/null +++ b/lib/librte_eal/common/eal_common_log.c @@ -0,0 +1,390 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" + +#define LOG_ELT_SIZE 2048 + +#define LOG_HISTORY_MP_NAME "log_history" + +STAILQ_HEAD(log_history_list, log_history); + +/** + * The structure of a message log in the log history. + */ +struct log_history { + STAILQ_ENTRY(log_history) next; + unsigned size; + char buf[0]; +}; + +static struct rte_mempool *log_history_mp = NULL; +static unsigned log_history_size = 0; +static struct log_history_list log_history; + +/* global log structure */ +struct rte_logs rte_logs = { + .type = ~0, + .level = RTE_LOG_DEBUG, + .file = NULL, +}; + +static rte_spinlock_t log_dump_lock = RTE_SPINLOCK_INITIALIZER; +static rte_spinlock_t log_list_lock = RTE_SPINLOCK_INITIALIZER; +static FILE *default_log_stream; +static int history_enabled = 1; + +/** + * This global structure stores some informations about the message + * that is currently beeing processed by one lcore + */ +struct log_cur_msg { + uint32_t loglevel; /**< log level - see rte_log.h */ + uint32_t logtype; /**< log type - see rte_log.h */ +} __rte_cache_aligned; +static struct log_cur_msg log_cur_msg[RTE_MAX_LCORE]; /**< per core log */ + +/* early logs */ + +/* + * early log function, used during boot when mempool (hence log + * history) is not available + */ +static ssize_t +early_log_write(__attribute__((unused)) void *c, const char *buf, size_t size) +{ + ssize_t ret; + ret = fwrite(buf, size, 1, stdout); + fflush(stdout); + if (ret == 0) + return -1; + return ret; +} + +static ssize_t +early_log_read(__attribute__((unused)) void *c, + __attribute__((unused)) char *buf, + __attribute__((unused)) size_t size) +{ + return 0; +} + +/* + * this is needed because cookies_io_functions_t has a different + * prototype between newlib and glibc + */ +#ifdef RTE_EXEC_ENV_LINUXAPP +static int +early_log_seek(__attribute__((unused)) void *c, + __attribute__((unused)) off64_t *offset, + __attribute__((unused)) int whence) +{ + return -1; +} +#else +static int +early_log_seek(__attribute__((unused)) void *c, + __attribute__((unused)) _off_t *offset, + __attribute__((unused)) int whence) +{ + return -1; +} +#endif + +static int +early_log_close(__attribute__((unused)) void *c) +{ + return 0; +} + +static cookie_io_functions_t early_log_func = { + .read = early_log_read, + .write = early_log_write, + .seek = early_log_seek, + .close = early_log_close +}; +static FILE *early_log_stream; + +/* default logs */ + +int +rte_log_add_in_history(const char *buf, size_t size) +{ + struct log_history *hist_buf = NULL; + void *obj; + + if (history_enabled == 0) + return 0; + + rte_spinlock_lock(&log_list_lock); + + /* get a buffer for adding in history */ + if (log_history_size > RTE_LOG_HISTORY) { + hist_buf = STAILQ_FIRST(&log_history); + STAILQ_REMOVE_HEAD(&log_history, next); + } + else { + if (rte_mempool_mc_get(log_history_mp, &obj) < 0) + obj = NULL; + hist_buf = obj; + } + + /* no buffer */ + if (hist_buf == NULL) { + rte_spinlock_unlock(&log_list_lock); + return -ENOBUFS; + } + + /* not enough room for msg, buffer go back in mempool */ + if (size >= (LOG_ELT_SIZE - sizeof(*hist_buf))) { + rte_mempool_mp_put(log_history_mp, hist_buf); + rte_spinlock_unlock(&log_list_lock); + return -ENOBUFS; + } + + /* add in history */ + memcpy(hist_buf->buf, buf, size); + hist_buf->buf[LOG_ELT_SIZE-1] = '\0'; + hist_buf->size = size; + STAILQ_INSERT_TAIL(&log_history, hist_buf, next); + rte_spinlock_unlock(&log_list_lock); + + return 0; +} + +void +rte_log_set_history(int enable) +{ + history_enabled = enable; +} + +/* Change the stream that will be used by logging system */ +int +rte_openlog_stream(FILE *f) +{ + if (f == NULL) + rte_logs.file = default_log_stream; + else + rte_logs.file = f; + return 0; +} + +/* Set global log level */ +void +rte_set_log_level(uint32_t level) +{ + rte_logs.level = (uint32_t)level; +} + +/* Set global log type */ +void +rte_set_log_type(uint32_t type, int enable) +{ + if (enable) + rte_logs.type |= type; + else + rte_logs.type &= (~type); +} + +/* get the current loglevel for the message beeing processed */ +int rte_log_cur_msg_loglevel(void) +{ + unsigned lcore_id; + lcore_id = rte_lcore_id(); + return log_cur_msg[lcore_id].loglevel; +} + +/* get the current logtype for the message beeing processed */ +int rte_log_cur_msg_logtype(void) +{ + unsigned lcore_id; + lcore_id = rte_lcore_id(); + return log_cur_msg[lcore_id].logtype; +} + +/* Dump log history on console */ +void +rte_log_dump_history(void) +{ + struct log_history_list tmp_log_history; + struct log_history *hist_buf; + unsigned i; + + /* only one dump at a time */ + rte_spinlock_lock(&log_dump_lock); + + /* save list, and re-init to allow logging during dump */ + rte_spinlock_lock(&log_list_lock); + tmp_log_history = log_history; + STAILQ_INIT(&log_history); + rte_spinlock_unlock(&log_list_lock); + + for (i=0; ibuf, hist_buf->size, 1, stdout) == 0) { + rte_mempool_mp_put(log_history_mp, hist_buf); + break; + } + + /* put back message structure in pool */ + rte_mempool_mp_put(log_history_mp, hist_buf); + } + fflush(stdout); + + rte_spinlock_unlock(&log_dump_lock); +} + +/* + * Generates a log message The message will be sent in the stream + * defined by the previous call to rte_openlog_stream(). + */ +int +rte_vlog(__attribute__((unused)) uint32_t level, + __attribute__((unused)) uint32_t logtype, + const char *format, va_list ap) +{ + int ret; + FILE *f = rte_logs.file; + unsigned lcore_id; + + /* save loglevel and logtype in a global per-lcore variable */ + lcore_id = rte_lcore_id(); + log_cur_msg[lcore_id].loglevel = level; + log_cur_msg[lcore_id].logtype = logtype; + + ret = vfprintf(f, format, ap); + fflush(f); + return ret; +} + +/* + * Generates a log message The message will be sent in the stream + * defined by the previous call to rte_openlog_stream(). + */ +int +rte_log(uint32_t level, uint32_t logtype, const char *format, ...) +{ + va_list ap; + int ret; + + va_start(ap, format); + ret = rte_vlog(level, logtype, format, ap); + va_end(ap); + return ret; +} + +/* + * init the log library, called by rte_eal_init() to enable early + * logs + */ +int +rte_eal_log_early_init(void) +{ + early_log_stream = fopencookie(NULL, "w+", early_log_func); + if (early_log_stream == NULL) { + printf("Cannot configure early_log_stream\n"); + return -1; + } + rte_openlog_stream(early_log_stream); + return 0; +} + +/* + * called by environment-specific log init function to initialize log + * history + */ +int +rte_eal_common_log_init(FILE *default_log) +{ + STAILQ_INIT(&log_history); + + /* reserve RTE_LOG_HISTORY*2 elements, so we can dump and + * keep logging during this time */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + log_history_mp = rte_mempool_create(LOG_HISTORY_MP_NAME, RTE_LOG_HISTORY*2, + LOG_ELT_SIZE, 0, 0, + NULL, NULL, + NULL, NULL, + SOCKET_ID_ANY, 0); + else + log_history_mp = rte_mempool_lookup(LOG_HISTORY_MP_NAME); + if (log_history_mp == NULL) { + RTE_LOG(ERR, EAL, "%s(): cannot create log_history mempool\n", + __func__); + return -1; + } + + default_log_stream = default_log; + rte_openlog_stream(default_log); + return 0; +} + diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c new file mode 100644 index 0000000000..448639e4bb --- /dev/null +++ b/lib/librte_eal/common/eal_common_memory.c @@ -0,0 +1,116 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "eal_private.h" + +/* + * Return a pointer to a read-only table of struct rte_physmem_desc + * elements, containing the layout of all addressable physical + * memory. The last element of the table contains a NULL address. + */ +const struct rte_memseg * +rte_eal_get_physmem_layout(void) +{ + return rte_eal_get_configuration()->mem_config->memseg; +} + + +/* get the total size of memory */ +uint64_t +rte_eal_get_physmem_size(void) +{ + const struct rte_mem_config *mcfg; + unsigned i = 0; + uint64_t total_len = 0; + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + + for (i=0; imemseg[i].addr == NULL) + break; + + total_len += mcfg->memseg[i].len; + } + + return total_len; +} + +/* Dump the physical memory layout on console */ +void +rte_dump_physmem_layout(void) +{ + const struct rte_mem_config *mcfg; + unsigned i = 0; + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + + for (i=0; imemseg[i].addr == NULL) + break; + printf("phys:0x%"PRIx64", len:0x%"PRIx64", virt:%p, " + "socket_id:%"PRId32"\n", + mcfg->memseg[i].phys_addr, + mcfg->memseg[i].len, + mcfg->memseg[i].addr, + mcfg->memseg[i].socket_id); + } +} + +/* return the number of memory channels */ +unsigned rte_memory_get_nchannel(void) +{ + return rte_eal_get_configuration()->mem_config->nchannel; +} + +/* return the number of memory rank */ +unsigned rte_memory_get_nrank(void) +{ + return rte_eal_get_configuration()->mem_config->nrank; +} diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c new file mode 100644 index 0000000000..dae4ea074e --- /dev/null +++ b/lib/librte_eal/common/eal_common_memzone.c @@ -0,0 +1,376 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" + +/* internal copy of free memory segments */ +static struct rte_memseg free_memseg[RTE_MAX_MEMSEG]; + +/* pointer to last reserved memzone */ +static unsigned memzone_idx; + +/* + * Return a pointer to a correctly filled memzone descriptor. If the + * allocation cannot be done, return NULL. + */ +const struct rte_memzone * +rte_memzone_reserve(const char *name, uint64_t len, int socket_id, + unsigned flags) +{ + return rte_memzone_reserve_aligned(name, + len, socket_id, flags, CACHE_LINE_SIZE); +} + +/* + * Return a pointer to a correctly filled memzone descriptor (with a + * specified alignment). If the allocation cannot be done, return NULL. + */ +const struct rte_memzone * +rte_memzone_reserve_aligned(const char *name, uint64_t len, + int socket_id, unsigned flags, unsigned align) +{ + struct rte_config *config; + unsigned i = 0; + int memseg_idx = -1; + uint64_t requested_len; + uint64_t memseg_len = 0; + phys_addr_t memseg_physaddr; + void *memseg_addr; + uintptr_t addr_offset; + + /* if secondary processes return error */ + if (rte_eal_process_type() == RTE_PROC_SECONDARY){ + RTE_LOG(ERR, EAL, "%s(): Not allowed in secondary process\n", __func__); + rte_errno = E_RTE_SECONDARY; + return NULL; + } + + /* if alignment is not a power of two */ + if (!rte_is_power_of_2(align)) { + RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__, + align); + rte_errno = EINVAL; + return NULL; + } + + /* alignment less than cache size is not allowed */ + if (align < CACHE_LINE_SIZE) + align = CACHE_LINE_SIZE; + + /* get pointer to global configuration */ + config = rte_eal_get_configuration(); + + /* no more room in config */ + if (memzone_idx >= RTE_MAX_MEMZONE) { + RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__); + rte_errno = ENOSPC; + return NULL; + } + + /* both sizes cannot be explicitly called for */ + if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) { + rte_errno = EINVAL; + return NULL; + } + + /* zone already exist */ + if (rte_memzone_lookup(name) != NULL) { + RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n", + __func__, name); + rte_errno = EEXIST; + return NULL; + } + + /* align length on cache boundary */ + len += CACHE_LINE_MASK; + len &= ~((uint64_t)CACHE_LINE_MASK); + + + + /* save requested length */ + requested_len = len; + + /* reserve extra space for future alignment */ + if (len) + len += align; + + /* find the smallest segment matching requirements */ + for (i = 0; i < RTE_MAX_MEMSEG; i++) { + + /* last segment */ + if (free_memseg[i].addr == NULL) + break; + + /* empty segment, skip it */ + if (free_memseg[i].len == 0) + continue; + + /* bad socket ID */ + if (socket_id != SOCKET_ID_ANY && + socket_id != free_memseg[i].socket_id) + continue; + + /* check len */ + if (len != 0 && len > free_memseg[i].len) + continue; + + /* check flags for hugepage sizes */ + if ((flags & RTE_MEMZONE_2MB) && + free_memseg[i].hugepage_sz == RTE_PGSIZE_1G ) + continue; + if ((flags & RTE_MEMZONE_1GB) && + free_memseg[i].hugepage_sz == RTE_PGSIZE_2M ) + continue; + + /* this segment is the best until now */ + if (memseg_idx == -1) { + memseg_idx = i; + memseg_len = free_memseg[i].len; + } + /* find the biggest contiguous zone */ + else if (len == 0) { + if (free_memseg[i].len > memseg_len) { + memseg_idx = i; + memseg_len = free_memseg[i].len; + } + } + /* + * find the smallest (we already checked that current + * zone length is > len + */ + else if (free_memseg[i].len < memseg_len) { + memseg_idx = i; + memseg_len = free_memseg[i].len; + } + } + + /* no segment found */ + if (memseg_idx == -1) { + /* + * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified, + * try allocating again without the size parameter otherwise -fail. + */ + if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY) && + ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB))) + return rte_memzone_reserve_aligned(name, len - align, + socket_id, 0, align); + + RTE_LOG(ERR, EAL, "%s(): No appropriate segment found\n", __func__); + rte_errno = ENOMEM; + return NULL; + } + + /* get offset needed to adjust alignment */ + addr_offset = (uintptr_t) RTE_PTR_SUB( + RTE_ALIGN_CEIL(free_memseg[memseg_idx].addr, (uintptr_t) align), + (uintptr_t) free_memseg[memseg_idx].addr); + + /* save aligned physical and virtual addresses */ + memseg_physaddr = free_memseg[memseg_idx].phys_addr + addr_offset; + memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr, addr_offset); + + /* if we are looking for a biggest memzone */ + if (requested_len == 0) + requested_len = memseg_len - addr_offset; + + /* set length to correct value */ + len = addr_offset + requested_len; + + /* update our internal state */ + free_memseg[memseg_idx].len -= len; + free_memseg[memseg_idx].phys_addr += len; + free_memseg[memseg_idx].addr = + (char *)free_memseg[memseg_idx].addr + len; + + /* fill the zone in config */ + struct rte_memzone *mz = &config->mem_config->memzone[memzone_idx++]; + rte_snprintf(mz->name, sizeof(mz->name), "%s", name); + mz->phys_addr = memseg_physaddr; + mz->addr = memseg_addr; + mz->len = requested_len; + mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz; + mz->socket_id = free_memseg[memseg_idx].socket_id; + mz->flags = 0; + + return mz; +} + +/* + * Lookup for the memzone identified by the given name + */ +const struct rte_memzone * +rte_memzone_lookup(const char *name) +{ + const struct rte_mem_config *mcfg; + unsigned i = 0; + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + + /* + * the algorithm is not optimal (linear), but there are few + * zones and this function should be called at init only + */ + for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) { + if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE)) + return &mcfg->memzone[i]; + } + return NULL; +} + +/* Dump all reserved memory zones on console */ +void +rte_memzone_dump(void) +{ + const struct rte_mem_config *mcfg; + unsigned i = 0; + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + + /* dump all zones */ + for (i=0; imemzone[i].addr == NULL) + break; + printf("name:<%s>, phys:0x%"PRIx64", len:0x%"PRIx64"" + ", virt:%p, socket_id:%"PRId32"\n", + mcfg->memzone[i].name, + mcfg->memzone[i].phys_addr, + mcfg->memzone[i].len, + mcfg->memzone[i].addr, + mcfg->memzone[i].socket_id); + } +} + +/* + * called by init: modify the free memseg list to have cache-aligned + * addresses and cache-aligned lengths + */ +static int +memseg_sanitize(struct rte_memseg *memseg) +{ + unsigned phys_align; + unsigned virt_align; + unsigned off; + + phys_align = memseg->phys_addr & CACHE_LINE_MASK; + virt_align = (unsigned long)memseg->addr & CACHE_LINE_MASK; + + /* + * sanity check: phys_addr and addr must have the same + * alignment + */ + if (phys_align != virt_align) + return -1; + + /* memseg is really too small, don't bother with it */ + if (memseg->len < (2 * CACHE_LINE_SIZE)) { + memseg->len = 0; + return 0; + } + + /* align start address */ + off = (CACHE_LINE_SIZE - phys_align) & CACHE_LINE_MASK; + memseg->phys_addr += off; + memseg->addr = (char *)memseg->addr + off; + memseg->len -= off; + + /* align end address */ + memseg->len &= ~((uint64_t)CACHE_LINE_MASK); + + return 0; +} + +/* + * Init the memzone subsystem + */ +int +rte_eal_memzone_init(void) +{ + struct rte_config *config; + const struct rte_memseg *memseg; + unsigned i = 0; + + /* secondary processes don't need to initialise anything */ + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return 0; + + /* get pointer to global configuration */ + config = rte_eal_get_configuration(); + + memseg = rte_eal_get_physmem_layout(); + if (memseg == NULL) { + RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__); + return -1; + } + + /* duplicate the memsegs from config */ + memcpy(free_memseg, memseg, sizeof(free_memseg)); + + /* make all zones cache-aligned */ + for (i=0; imem_config->memzone, 0, sizeof(config->mem_config->memzone)); + + return 0; +} diff --git a/lib/librte_eal/common/eal_common_pci.c b/lib/librte_eal/common/eal_common_pci.c new file mode 100644 index 0000000000..fe2426509f --- /dev/null +++ b/lib/librte_eal/common/eal_common_pci.c @@ -0,0 +1,145 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" + +struct pci_driver_list driver_list; +struct pci_device_list device_list; + +static struct rte_pci_addr *dev_blacklist = NULL; +static unsigned dev_blacklist_size = 0; + +static int is_blacklisted(struct rte_pci_device *dev) +{ + struct rte_pci_addr *loc = &dev->addr; + unsigned i; + + for (i = 0; i < dev_blacklist_size; i++) { + if ((loc->domain == dev_blacklist[i].domain) && + (loc->bus == dev_blacklist[i].bus) && + (loc->devid == dev_blacklist[i].devid) && + (loc->function == dev_blacklist[i].function)) { + return 1; + } + } + + return 0; /* not in blacklist */ +} + +/* + * If vendor/device ID match, call the devinit() function of all + * registered driver for the given device. Return -1 if no driver is + * found for this device. + */ +static int +pci_probe_all_drivers(struct rte_pci_device *dev) +{ + struct rte_pci_driver *dr = NULL; + + TAILQ_FOREACH(dr, &driver_list, next) { + if (is_blacklisted(dev)) + return -1; + if (rte_eal_pci_probe_one_driver(dr, dev) == 0) + return 0; + } + return -1; +} + +/* + * Scan the content of the PCI bus, and call the devinit() function for + * all registered drivers that have a matching entry in its id_table + * for discovered devices. + */ +int +rte_eal_pci_probe(void) +{ + struct rte_pci_device *dev = NULL; + + TAILQ_FOREACH(dev, &device_list, next) + pci_probe_all_drivers(dev); + + return 0; +} + +/* dump one device */ +static int +pci_dump_one_device(struct rte_pci_device *dev) +{ + printf(PCI_PRI_FMT, dev->addr.domain, dev->addr.bus, + dev->addr.devid, dev->addr.function); + printf(" - vendor:%x device:%x\n", dev->id.vendor_id, + dev->id.device_id); + printf(" %16.16"PRIx64" %16.16"PRIx64"\n", + dev->mem_resource.phys_addr, dev->mem_resource.len); + return 0; +} + +/* dump devices on the bus */ +void +rte_eal_pci_dump(void) +{ + struct rte_pci_device *dev = NULL; + + TAILQ_FOREACH(dev, &device_list, next) { + pci_dump_one_device(dev); + } +} + +/* register a driver */ +void +rte_eal_pci_register(struct rte_pci_driver *driver) +{ + TAILQ_INSERT_TAIL(&driver_list, driver, next); +} + +void +rte_eal_pci_set_blacklist(struct rte_pci_addr *blacklist, unsigned size) +{ + dev_blacklist = blacklist; + dev_blacklist_size = size; +} diff --git a/lib/librte_eal/common/eal_common_tailqs.c b/lib/librte_eal/common/eal_common_tailqs.c new file mode 100644 index 0000000000..7702b1f9ad --- /dev/null +++ b/lib/librte_eal/common/eal_common_tailqs.c @@ -0,0 +1,113 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "eal_private.h" + +static unsigned tailq_idx = 0; + +struct rte_tailq_head * +rte_eal_tailq_lookup(const char *name) +{ + unsigned i; + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + + /* + * the algorithm is not optimal (linear), but there are few + * tailq's and this function should be called at init only + */ + for (i = 0; i < RTE_MAX_TAILQ; i++) { + if (!strncmp(name, mcfg->tailq_head[i].qname, RTE_TAILQ_NAMESIZE-1)) + return &mcfg->tailq_head[i]; + } + return NULL; +} + +struct rte_tailq_head * +rte_eal_tailq_reserve(const char *name) +{ + struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return rte_eal_tailq_lookup(name); + + if (tailq_idx == RTE_MAX_TAILQ){ + RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__); + return NULL; + } + + /* zone already exist */ + if (rte_eal_tailq_lookup(name) != NULL) { + RTE_LOG(DEBUG, EAL, "%s(): tailq <%s> already exists\n", + __func__, name); + return NULL; + } + + rte_snprintf(mcfg->tailq_head[tailq_idx].qname, RTE_TAILQ_NAMESIZE, + "%.*s", (int)(RTE_TAILQ_NAMESIZE - 1), name); + + return &mcfg->tailq_head[tailq_idx++]; +} + +int +rte_eal_tailqs_init(void) +{ + unsigned i; + struct rte_config *cfg = rte_eal_get_configuration(); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + for (i = 0; i < RTE_MAX_TAILQ; i++) + TAILQ_INIT(&cfg->mem_config->tailq_head[i].tailq_head); + + return 0; +} diff --git a/lib/librte_eal/common/include/eal_private.h b/lib/librte_eal/common/include/eal_private.h new file mode 100644 index 0000000000..023e4183cf --- /dev/null +++ b/lib/librte_eal/common/include/eal_private.h @@ -0,0 +1,176 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _EAL_PRIVATE_H_ +#define _EAL_PRIVATE_H_ + +/** + * Initialize the memzone subsystem (private to eal). + * + * @return + * - 0 on success + * - Negative on error + */ +int rte_eal_memzone_init(void); + +/** + * Common log initialization function (private to eal). + * + * Called by environment-specific log initialization function to initialize + * log history. + * + * @param default_log + * The default log stream to be used. + * @return + * - 0 on success + * - Negative on error + */ +int rte_eal_common_log_init(FILE *default_log); + +/** + * Fill configuration with number of physical and logical processors + * + * This function is private to EAL. + * + * Parse /proc/cpuinfo to get the number of physical and logical + * processors on the machine. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_cpu_init(void); + +/** + * Map memory + * + * This function is private to EAL. + * + * Fill configuration structure with these infos, and return 0 on success. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_memory_init(void); + +/** + * Configure HPET + * + * This function is private to EAL. + * + * Mmap memory areas used by HPET (high precision event timer) that will + * provide our time reference. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_hpet_init(void); + +/** + * Init early logs + * + * This function is private to EAL. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_log_early_init(void); + +/** + * Init the default log stream + * + * This function is private to EAL. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_log_init(void); + +/** + * Init the default log stream + * + * This function is private to EAL. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_pci_init(void); + +struct rte_pci_driver; +struct rte_pci_device; + +/** + * Mmap memory for single PCI device + * + * This function is private to EAL. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, + struct rte_pci_device *dev); + +/** + * Init tail queues for non-EAL library structures. This is to allow + * the rings, mempools, etc. lists to be shared among multiple processes + * + * This function is private to EAL + * + * @return + * 0 on success, negative on error + */ +int rte_eal_tailqs_init(void); + +/** + * Init interrupt handling. + * + * This function is private to EAL. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_intr_init(void); + +/** + * Init alarm mechanism. This is to allow a callback be called after + * specific time. + * + * This function is private to EAL. + * + * @return + * 0 on success, negative on error + */ +int rte_eal_alarm_init(void); + +#endif /* _EAL_PRIVATE_H_ */ diff --git a/lib/librte_eal/common/include/i686/arch/rte_atomic.h b/lib/librte_eal/common/include/i686/arch/rte_atomic.h new file mode 100644 index 0000000000..c834290e26 --- /dev/null +++ b/lib/librte_eal/common/include/i686/arch/rte_atomic.h @@ -0,0 +1,959 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Inspired from FreeBSD src/sys/i386/include/atomic.h + * Copyright (c) 1998 Doug Rabson + * All rights reserved. + */ + +#ifndef _RTE_ATOMIC_H_ +#error "don't include this file directly, please include generic " +#endif + +#ifndef _RTE_I686_ATOMIC_H_ +#define _RTE_I686_ATOMIC_H_ + +/** + * @file + * Atomic Operations on i686 + */ + +#if RTE_MAX_LCORE == 1 +#define MPLOCKED /**< No need to insert MP lock prefix. */ +#else +#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */ +#endif + +/** + * General memory barrier. + * + * Guarantees that the LOAD and STORE operations generated before the + * barrier occur before the LOAD and STORE operations generated after. + */ +#define rte_mb() asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory") + +/** + * Write memory barrier. + * + * Guarantees that the STORE operations generated before the barrier + * occur before the STORE operations generated after. + */ +#define rte_wmb() asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory") + +/** + * Read memory barrier. + * + * Guarantees that the LOAD operations generated before the barrier + * occur before the LOAD operations generated after. + */ +#define rte_rmb() asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory") + +/*------------------------- 16 bit atomic operations -------------------------*/ + +/** + * Atomic compare and set. + * + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 16-bit words) + * + * @param dst + * The destination location into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) +{ + uint8_t res; + + asm volatile( + MPLOCKED + "cmpxchgw %[src], %[dst];" + "sete %[res];" + : [res] "=a" (res), /* output */ + [dst] "=m" (*dst) + : [src] "r" (src), /* input */ + "a" (exp), + "m" (*dst) + : "memory"); /* no-clobber list */ + return res; +} + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int16_t cnt; /**< An internal counter value. */ +} rte_atomic16_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC16_INIT(val) { (val) } + +/** + * Initialize an atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_init(rte_atomic16_t *v) +{ + v->cnt = 0; +} + +/** + * Atomically read a 16-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int16_t +rte_atomic16_read(const rte_atomic16_t *v) +{ + return v->cnt; +} + +/** + * Atomically set a counter to a 16-bit value. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value for the counter. + */ +static inline void +rte_atomic16_set(rte_atomic16_t *v, int16_t new_value) +{ + v->cnt = new_value; +} + +/** + * Atomically add a 16-bit value to an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic16_add(rte_atomic16_t *v, int16_t inc) +{ + asm volatile( + MPLOCKED + "addw %[inc], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [inc] "ir" (inc), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically subtract a 16-bit value from an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic16_sub(rte_atomic16_t *v, int16_t dec) +{ + asm volatile( + MPLOCKED + "subw %[dec], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [dec] "ir" (dec), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically increment a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_inc(rte_atomic16_t *v) +{ + asm volatile( + MPLOCKED + "incw %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically decrement a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_dec(rte_atomic16_t *v) +{ + asm volatile( + MPLOCKED + "decw %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically add a 16-bit value to a counter and return the result. + * + * Atomically adds the 16-bits value (inc) to the atomic counter (v) and + * returns the value of v after addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int16_t +rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) +{ + int16_t prev = inc; + + asm volatile( + MPLOCKED + "xaddw %[prev], %[cnt]" + : [prev] "+r" (prev), /* output */ + [cnt] "=m" (v->cnt) + : "m" (v->cnt) /* input */ + ); + return (int16_t)(prev + inc); +} + +/** + * Atomically subtract a 16-bit value from a counter and return + * the result. + * + * Atomically subtracts the 16-bit value (inc) from the atomic counter + * (v) and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int16_t +rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec) +{ + return rte_atomic16_add_return(v, (int16_t)-dec); +} + +/** + * Atomically increment a 16-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the increment operation is 0; false otherwise. + */ +static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) +{ + uint8_t ret; + + asm volatile( + MPLOCKED + "incw %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return (ret != 0); +} + +/** + * Atomically decrement a 16-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the decrement operation is 0; false otherwise. + */ +static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) +{ + uint8_t ret; + + asm volatile(MPLOCKED + "decw %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return (ret != 0); +} + +/** + * Atomically test and set a 16-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) +{ + return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1); +} + +/** + * Atomically set a 16-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic16_clear(rte_atomic16_t *v) +{ + v->cnt = 0; +} + +/*------------------------- 32 bit atomic operations -------------------------*/ + +/** + * Atomic compare and set. + * + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 32-bit words) + * + * @param dst + * The destination location into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) +{ + uint8_t res; + + asm volatile( + MPLOCKED + "cmpxchgl %[src], %[dst];" + "sete %[res];" + : [res] "=a" (res), /* output */ + [dst] "=m" (*dst) + : [src] "r" (src), /* input */ + "a" (exp), + "m" (*dst) + : "memory"); /* no-clobber list */ + return res; +} + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int32_t cnt; /**< An internal counter value. */ +} rte_atomic32_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC32_INIT(val) { (val) } + +/** + * Initialize an atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_init(rte_atomic32_t *v) +{ + v->cnt = 0; +} + +/** + * Atomically read a 32-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int32_t +rte_atomic32_read(const rte_atomic32_t *v) +{ + return v->cnt; +} + +/** + * Atomically set a counter to a 32-bit value. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value for the counter. + */ +static inline void +rte_atomic32_set(rte_atomic32_t *v, int32_t new_value) +{ + v->cnt = new_value; +} + +/** + * Atomically add a 32-bit value to an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic32_add(rte_atomic32_t *v, int32_t inc) +{ + asm volatile( + MPLOCKED + "addl %[inc], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [inc] "ir" (inc), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically subtract a 32-bit value from an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic32_sub(rte_atomic32_t *v, int32_t dec) +{ + asm volatile( + MPLOCKED + "subl %[dec], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [dec] "ir" (dec), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically increment a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_inc(rte_atomic32_t *v) +{ + asm volatile( + MPLOCKED + "incl %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically decrement a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_dec(rte_atomic32_t *v) +{ + asm volatile( + MPLOCKED + "decl %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically add a 32-bit value to a counter and return the result. + * + * Atomically adds the 32-bits value (inc) to the atomic counter (v) and + * returns the value of v after addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int32_t +rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) +{ + int32_t prev = inc; + + asm volatile( + MPLOCKED + "xaddl %[prev], %[cnt]" + : [prev] "+r" (prev), /* output */ + [cnt] "=m" (v->cnt) + : "m" (v->cnt) /* input */ + ); + return (int32_t)(prev + inc); +} + +/** + * Atomically subtract a 32-bit value from a counter and return + * the result. + * + * Atomically subtracts the 32-bit value (inc) from the atomic counter + * (v) and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int32_t +rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec) +{ + return rte_atomic32_add_return(v, -dec); +} + +/** + * Atomically increment a 32-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the increment operation is 0; false otherwise. + */ +static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) +{ + uint8_t ret; + + asm volatile( + MPLOCKED + "incl %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return (ret != 0); +} + +/** + * Atomically decrement a 32-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the decrement operation is 0; false otherwise. + */ +static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) +{ + uint8_t ret; + + asm volatile(MPLOCKED + "decl %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return (ret != 0); +} + +/** + * Atomically test and set a 32-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) +{ + return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1); +} + +/** + * Atomically set a 32-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic32_clear(rte_atomic32_t *v) +{ + v->cnt = 0; +} + +/*------------------------- 64 bit atomic operations -------------------------*/ + +/** + * An atomic compare and set function used by the mutex functions. + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 64-bit words) + * + * @param dst + * The destination into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) +{ + uint8_t res; + union { + struct { + uint32_t l32; + uint32_t h32; + }; + uint64_t u64; + } _exp, _src; + + _exp.u64 = exp; + _src.u64 = src; + + asm volatile ( + MPLOCKED + "cmpxchg8b (%[dst]);" + "setz %[res];" + : [res] "=a" (res) /* result in eax */ + : [dst] "S" (dst), /* esi */ + "b" (_src.l32), /* ebx */ + "c" (_src.h32), /* ecx */ + "a" (_exp.l32), /* eax */ + "d" (_exp.h32) /* edx */ + : "memory" ); /* no-clobber list */ + + return res; +} + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int64_t cnt; /**< Internal counter value. */ +} rte_atomic64_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC64_INIT(val) { (val) } + +/** + * Initialize the atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_init(rte_atomic64_t *v) +{ + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, 0); + } +} + +/** + * Atomically read a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int64_t +rte_atomic64_read(rte_atomic64_t *v) +{ + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + /* replace the value by itself */ + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, tmp); + } + return tmp; +} + +/** + * Atomically set a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value of the counter. + */ +static inline void +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) +{ + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, new_value); + } +} + +/** + * Atomically add a 64-bit value to a counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic64_add(rte_atomic64_t *v, int64_t inc) +{ + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, tmp + inc); + } +} + +/** + * Atomically subtract a 64-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be substracted from the counter. + */ +static inline void +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) +{ + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, tmp - dec); + } +} + +/** + * Atomically increment a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_inc(rte_atomic64_t *v) +{ + rte_atomic64_add(v, 1); +} + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_dec(rte_atomic64_t *v) +{ + rte_atomic64_sub(v, 1); +} + +/** + * Add a 64-bit value to an atomic counter and return the result. + * + * Atomically adds the 64-bit value (inc) to the atomic counter (v) and + * returns the value of v after the addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int64_t +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) +{ + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, tmp + inc); + } + + return tmp + inc; +} + +/** + * Subtract a 64-bit value from an atomic counter and return the result. + * + * Atomically subtracts the 64-bit value (dec) from the atomic counter (v) + * and returns the value of v after the substraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be substracted from the counter. + * @return + * The value of v after the substraction. + */ +static inline int64_t +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) +{ + int success = 0; + uint64_t tmp; + + while (success == 0) { + tmp = v->cnt; + success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, + tmp, tmp - dec); + } + + return tmp - dec; +} + +/** + * Atomically increment a 64-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns + * true if the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the addition is 0; false otherwise. + */ +static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) +{ + return rte_atomic64_add_return(v, 1) == 0; +} + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after substraction is 0; false otherwise. + */ +static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) +{ + return rte_atomic64_sub_return(v, 1) == 0; +} + +/** + * Atomically test and set a 64-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) +{ + return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); +} + +/** + * Atomically set a 64-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic64_clear(rte_atomic64_t *v) +{ + rte_atomic64_set(v, 0); +} + +#endif /* _RTE_I686_ATOMIC_H_ */ diff --git a/lib/librte_eal/common/include/rte_alarm.h b/lib/librte_eal/common/include/rte_alarm.h new file mode 100644 index 0000000000..2ed2a11b1f --- /dev/null +++ b/lib/librte_eal/common/include/rte_alarm.h @@ -0,0 +1,100 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_ALARM_H_ +#define _RTE_ALARM_H_ + +/** + * @file + * + * Alarm functions + * + * Simple alarm-clock functionality supplied by eal. + * Does not require hpet support. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * Signature of callback back function called when an alarm goes off. + */ +typedef void (*rte_eal_alarm_callback)(void *arg); + +/** + * Function to set a callback to be triggered when us microseconds + * have expired. Accuracy of timing to the microsecond is not guaranteed. The + * alarm function will not be called *before* the requested time, but may + * be called a short period of time afterwards. + * The alarm handler will be called only once. There is no need to call + * "rte_eal_alarm_cancel" from within the callback function. + * + * @param us + * The time in microseconds before the callback is called + * @param cb + * The function to be called when the alarm expires + * @param cb_arg + * Pointer parameter to be passed to the callback function + * + * @return + * On success, zero. + * On failure, a negative error number + */ +int rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb, void *cb_arg); + +/** + * Function to cancel an alarm callback which has been registered before. + * + * @param cb_fn + * alarm callback + * @param cb_arg + * Pointer parameter to be passed to the callback function. To remove all + * copies of a given callback function, irrespective of parameter, (void *)-1 + * can be used here. + * + * @return + * - The number of callbacks removed + */ +int rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg); + +#ifdef __cplusplus +} +#endif + + +#endif /* _RTE_ALARM_H_ */ diff --git a/lib/librte_eal/common/include/rte_atomic.h b/lib/librte_eal/common/include/rte_atomic.h new file mode 100644 index 0000000000..dd413970b4 --- /dev/null +++ b/lib/librte_eal/common/include/rte_atomic.h @@ -0,0 +1,657 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_ATOMIC_H_ +#define _RTE_ATOMIC_H_ + +/** + * @file + * Atomic Operations + * + * This file defines a generic API for atomic + * operations. The implementation is architecture-specific. + * + * See lib/librte_eal/common/include/i686/arch/rte_atomic.h + * See lib/librte_eal/common/include/x86_64/arch/rte_atomic.h + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include "arch/rte_atomic.h" + + +#ifdef __DOXYGEN__ + +/** + * General memory barrier. + * + * Guarantees that the LOAD and STORE operations generated before the + * barrier occur before the LOAD and STORE operations generated after. + */ +#define rte_mb() asm volatile("mfence;" : : : "memory") + +/** + * Write memory barrier. + * + * Guarantees that the STORE operations generated before the barrier + * occur before the STORE operations generated after. + */ +#define rte_wmb() asm volatile("sfence;" : : : "memory") + +/** + * Read memory barrier. + * + * Guarantees that the LOAD operations generated before the barrier + * occur before the LOAD operations generated after. + */ +#define rte_rmb() asm volatile("lfence;" : : : "memory") + +/*------------------------- 16 bit atomic operations -------------------------*/ + +/** + * Atomic compare and set. + * + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 16-bit words) + * + * @param dst + * The destination location into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src); + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int16_t cnt; /**< An internal counter value. */ +} rte_atomic16_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC16_INIT(val) { (val) } + +/** + * Initialize an atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_init(rte_atomic16_t *v) +{ + v->cnt = 0; +} + +/** + * Atomically read a 16-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int16_t +rte_atomic16_read(const rte_atomic16_t *v); + +/** + * Atomically set a counter to a 16-bit value. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value for the counter. + */ +static inline void +rte_atomic16_set(rte_atomic16_t *v, int16_t new_value); + +/** + * Atomically add a 16-bit value to an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic16_add(rte_atomic16_t *v, int16_t inc); + +/** + * Atomically subtract a 16-bit value from an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic16_sub(rte_atomic16_t *v, int16_t dec); + +/** + * Atomically increment a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_inc(rte_atomic16_t *v); + +/** + * Atomically decrement a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_dec(rte_atomic16_t *v); + +/** + * Atomically add a 16-bit value to a counter and return the result. + * + * Atomically adds the 16-bits value (inc) to the atomic counter (v) and + * returns the value of v after addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int16_t +rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc); + +/** + * Atomically subtract a 16-bit value from a counter and return + * the result. + * + * Atomically subtracts the 16-bit value (inc) from the atomic counter + * (v) and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int16_t +rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec); + +/** + * Atomically increment a 16-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the increment operation is 0; false otherwise. + */ +static inline int +rte_atomic16_inc_and_test(rte_atomic16_t *v); + +/** + * Atomically decrement a 16-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the decrement operation is 0; false otherwise. + */ +static inline int +rte_atomic16_dec_and_test(rte_atomic16_t *v); + +/** + * Atomically test and set a 16-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int +rte_atomic16_test_and_set(rte_atomic16_t *v); + +/** + * Atomically set a 16-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_clear(rte_atomic16_t *v); + +/*------------------------- 32 bit atomic operations -------------------------*/ + +/** + * Atomic compare and set. + * + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 32-bit words) + * + * @param dst + * The destination location into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src); + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int32_t cnt; /**< An internal counter value. */ +} rte_atomic32_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC32_INIT(val) { (val) } + +/** + * Initialize an atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_init(rte_atomic32_t *v); + +/** + * Atomically read a 32-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int32_t +rte_atomic32_read(const rte_atomic32_t *v); + +/** + * Atomically set a counter to a 32-bit value. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value for the counter. + */ +static inline void +rte_atomic32_set(rte_atomic32_t *v, int32_t new_value); + +/** + * Atomically add a 32-bit value to an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic32_add(rte_atomic32_t *v, int32_t inc); + +/** + * Atomically subtract a 32-bit value from an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic32_sub(rte_atomic32_t *v, int32_t dec); + +/** + * Atomically increment a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_inc(rte_atomic32_t *v); + +/** + * Atomically decrement a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_dec(rte_atomic32_t *v); + +/** + * Atomically add a 32-bit value to a counter and return the result. + * + * Atomically adds the 32-bits value (inc) to the atomic counter (v) and + * returns the value of v after addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int32_t +rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc); + +/** + * Atomically subtract a 32-bit value from a counter and return + * the result. + * + * Atomically subtracts the 32-bit value (inc) from the atomic counter + * (v) and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int32_t +rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec); + +/** + * Atomically increment a 32-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the increment operation is 0; false otherwise. + */ +static inline int +rte_atomic32_inc_and_test(rte_atomic32_t *v); + +/** + * Atomically decrement a 32-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the decrement operation is 0; false otherwise. + */ +static inline int +rte_atomic32_dec_and_test(rte_atomic32_t *v); + +/** + * Atomically test and set a 32-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int +rte_atomic32_test_and_set(rte_atomic32_t *v); + +/** + * Atomically set a 32-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_clear(rte_atomic32_t *v); + +/*------------------------- 64 bit atomic operations -------------------------*/ + +/** + * An atomic compare and set function used by the mutex functions. + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 64-bit words) + * + * @param dst + * The destination into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src); + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int64_t cnt; /**< Internal counter value. */ +} rte_atomic64_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC64_INIT(val) { (val) } + +/** + * Initialize the atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_init(rte_atomic64_t *v); + +/** + * Atomically read a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int64_t +rte_atomic64_read(rte_atomic64_t *v); + +/** + * Atomically set a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value of the counter. + */ +static inline void +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value); + +/** + * Atomically add a 64-bit value to a counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic64_add(rte_atomic64_t *v, int64_t inc); + +/** + * Atomically subtract a 64-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec); + +/** + * Atomically increment a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_inc(rte_atomic64_t *v); + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_dec(rte_atomic64_t *v); + +/** + * Add a 64-bit value to an atomic counter and return the result. + * + * Atomically adds the 64-bit value (inc) to the atomic counter (v) and + * returns the value of v after the addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int64_t +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc); + +/** + * Subtract a 64-bit value from an atomic counter and return the result. + * + * Atomically subtracts the 64-bit value (dec) from the atomic counter (v) + * and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int64_t +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec); + +/** + * Atomically increment a 64-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns + * true if the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the addition is 0; false otherwise. + */ +static inline int +rte_atomic64_inc_and_test(rte_atomic64_t *v); + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after subtraction is 0; false otherwise. + */ +static inline int +rte_atomic64_dec_and_test(rte_atomic64_t *v); + +/** + * Atomically test and set a 64-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int +rte_atomic64_test_and_set(rte_atomic64_t *v); + +/** + * Atomically set a 64-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_clear(rte_atomic64_t *v); + +#endif /* __DOXYGEN__ */ + + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_ATOMIC_H_ */ diff --git a/lib/librte_eal/common/include/rte_branch_prediction.h b/lib/librte_eal/common/include/rte_branch_prediction.h new file mode 100644 index 0000000000..a65a7221e8 --- /dev/null +++ b/lib/librte_eal/common/include/rte_branch_prediction.h @@ -0,0 +1,72 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/** + * @file + * Branch Prediction Helpers in RTE + */ + +#ifndef _RTE_BRANCH_PREDICTION_H_ +#define _RTE_BRANCH_PREDICTION_H_ + +/** + * Check if a branch is likely to be taken. + * + * This compiler builtin allows the developer to indicate if a branch is + * likely to be taken. Example: + * + * if (likely(x > 1)) + * do_stuff(); + * + */ +#ifndef likely +#define likely(x) __builtin_expect((x),1) +#endif /* likely */ + +/** + * Check if a branch is unlikely to be taken. + * + * This compiler builtin allows the developer to indicate if a branch is + * unlikely to be taken. Example: + * + * if (unlikely(x < 1)) + * do_stuff(); + * + */ +#ifndef unlikely +#define unlikely(x) __builtin_expect((x),0) +#endif /* unlikely */ + +#endif /* _RTE_BRANCH_PREDICTION_H_ */ diff --git a/lib/librte_eal/common/include/rte_byteorder.h b/lib/librte_eal/common/include/rte_byteorder.h new file mode 100644 index 0000000000..ccaa528bf3 --- /dev/null +++ b/lib/librte_eal/common/include/rte_byteorder.h @@ -0,0 +1,244 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_BYTEORDER_H_ +#define _RTE_BYTEORDER_H_ + +/** + * @file + * + * Byte Swap Operations + * + * This file defines a generic API for byte swap operations. Part of + * the implementation is architecture-specific. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* + * An internal function to swap bytes in a 16-bit value. + * + * It is used by rte_bswap16() when the value is constant. Do not use + * this function directly; rte_bswap16() is preferred. + */ +static inline uint16_t +rte_constant_bswap16(uint16_t x) +{ + return (uint16_t)(((x & 0x00ffU) << 8) | + ((x & 0xff00U) >> 8)); +} + +/* + * An internal function to swap bytes in a 32-bit value. + * + * It is used by rte_bswap32() when the value is constant. Do not use + * this function directly; rte_bswap32() is preferred. + */ +static inline uint32_t +rte_constant_bswap32(uint32_t x) +{ + return ((x & 0x000000ffUL) << 24) | + ((x & 0x0000ff00UL) << 8) | + ((x & 0x00ff0000UL) >> 8) | + ((x & 0xff000000UL) >> 24); +} + +/* + * An internal function to swap bytes of a 64-bit value. + * + * It is used by rte_bswap64() when the value is constant. Do not use + * this function directly; rte_bswap64() is preferred. + */ +static inline uint64_t +rte_constant_bswap64(uint64_t x) +{ + return ((x & 0x00000000000000ffULL) << 56) | + ((x & 0x000000000000ff00ULL) << 40) | + ((x & 0x0000000000ff0000ULL) << 24) | + ((x & 0x00000000ff000000ULL) << 8) | + ((x & 0x000000ff00000000ULL) >> 8) | + ((x & 0x0000ff0000000000ULL) >> 24) | + ((x & 0x00ff000000000000ULL) >> 40) | + ((x & 0xff00000000000000ULL) >> 56); +} + +/* + * An architecture-optimized byte swap for a 16-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap16(). + */ +static inline uint16_t rte_arch_bswap16(uint16_t _x) +{ + register uint16_t x = _x; + asm volatile ("xchgb %b[x1],%h[x2]" + : [x1] "=Q" (x) + : [x2] "0" (x) + ); + return x; +} + +/* + * An architecture-optimized byte swap for a 32-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap32(). + */ +static inline uint32_t rte_arch_bswap32(uint32_t _x) +{ + register uint32_t x = _x; + asm volatile ("bswap %[x]" + : [x] "+r" (x) + ); + return x; +} + +/* + * An architecture-optimized byte swap for a 64-bit value. + * + * Do not use this function directly. The preferred function is rte_bswap64(). + */ +#ifdef RTE_ARCH_X86_64 +/* 64-bit mode */ +static inline uint64_t rte_arch_bswap64(uint64_t _x) +{ + register uint64_t x = _x; + asm volatile ("bswap %[x]" + : [x] "+r" (x) + ); + return x; +} +#else /* ! RTE_ARCH_X86_64 */ +/* Compat./Leg. mode */ +static inline uint64_t rte_arch_bswap64(uint64_t x) +{ + uint64_t ret = 0; + ret |= ((uint64_t)rte_arch_bswap32(x & 0xffffffffUL) << 32); + ret |= ((uint64_t)rte_arch_bswap32((x >> 32) & 0xffffffffUL)); + return ret; +} +#endif /* RTE_ARCH_X86_64 */ + +/** + * Swap bytes in a 16-bit value. + */ +#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap16(x) : \ + rte_arch_bswap16(x))) \ + +/** + * Swap bytes in a 32-bit value. + */ +#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap32(x) : \ + rte_arch_bswap32(x))) \ + +/** + * Swap bytes in a 64-bit value. + */ +#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ? \ + rte_constant_bswap64(x) : \ + rte_arch_bswap64(x))) \ + +/** + * Convert a 16-bit value from CPU order to little endian. + */ +#define rte_cpu_to_le_16(x) (x) + +/** + * Convert a 32-bit value from CPU order to little endian. + */ +#define rte_cpu_to_le_32(x) (x) + +/** + * Convert a 64-bit value from CPU order to little endian. + */ +#define rte_cpu_to_le_64(x) (x) + + +/** + * Convert a 16-bit value from CPU order to big endian. + */ +#define rte_cpu_to_be_16(x) rte_bswap16(x) + +/** + * Convert a 32-bit value from CPU order to big endian. + */ +#define rte_cpu_to_be_32(x) rte_bswap32(x) + +/** + * Convert a 64-bit value from CPU order to big endian. + */ +#define rte_cpu_to_be_64(x) rte_bswap64(x) + + +/** + * Convert a 16-bit value from little endian to CPU order. + */ +#define rte_le_to_cpu_16(x) (x) + +/** + * Convert a 32-bit value from little endian to CPU order. + */ +#define rte_le_to_cpu_32(x) (x) + +/** + * Convert a 64-bit value from little endian to CPU order. + */ +#define rte_le_to_cpu_64(x) (x) + + +/** + * Convert a 16-bit value from big endian to CPU order. + */ +#define rte_be_to_cpu_16(x) rte_bswap16(x) + +/** + * Convert a 32-bit value from big endian to CPU order. + */ +#define rte_be_to_cpu_32(x) rte_bswap32(x) + +/** + * Convert a 64-bit value from big endian to CPU order. + */ +#define rte_be_to_cpu_64(x) rte_bswap64(x) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_BYTEORDER_H_ */ diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h new file mode 100644 index 0000000000..3c845697e7 --- /dev/null +++ b/lib/librte_eal/common/include/rte_common.h @@ -0,0 +1,310 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_COMMON_H_ +#define _RTE_COMMON_H_ + +/** + * @file + * + * Generic, commonly-used macro and inline function definitions + * for Intel DPDK. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/*********** Macros to eliminate unused variable warnings ********/ + +/** + * short definition to mark a function parameter unused + */ +#define __rte_unused __attribute__((__unused__)) + +/** + * definition to mark a variable or function parameter as used so + * as to avoid a compiler warning + */ +#define RTE_SET_USED(x) (void)(x) + +/*********** Macros for pointer arithmetic ********/ + +/** + * add a byte-value offset from a pointer + */ +#define RTE_PTR_ADD(ptr, x) ((typeof(ptr))((uintptr_t)ptr + (x))) + +/** + * subtract a byte-value offset from a pointer + */ +#define RTE_PTR_SUB(ptr, x) ((typeof(ptr))((uintptr_t)ptr - (x))) + +/** + * get the difference between two pointer values, i.e. how far apart + * in bytes are the locations they point two. It is assumed that + * ptr1 is greater than ptr2. + */ +#define RTE_PTR_DIFF(ptr1, ptr2) ((uintptr_t)(ptr1) - (uintptr_t)(ptr2)) + +/*********** Macros/static functions for doing alignment ********/ + +/** + * Function which rounds an unsigned int down to a given power-of-two value. + * Takes uintptr_t types as parameters, as this type of operation is most + * commonly done for pointer alignment. (See also RTE_ALIGN_FLOOR, + * RTE_ALIGN_CEIL, RTE_ALIGN, RTE_PTR_ALIGN_FLOOR, RTE_PTR_ALIGN_CEL, + * RTE_PTR_ALIGN macros) + * @param ptr + * The value to be rounded down + * @param align + * The power-of-two of which the result must be a multiple. + * @return + * Function returns a properly aligned value where align is a power-of-two. + * If align is not a power-of-two, result will be incorrect. + */ +static inline uintptr_t +rte_align_floor_int(uintptr_t ptr, uintptr_t align) +{ + return (ptr & ~(align - 1)); +} + +/** + * Macro to align a pointer to a given power-of-two. The resultant + * pointer will be a pointer of the same type as the first parameter, and + * point to an address no higher than the first parameter. Second parameter + * must be a power-of-two value. + */ +#define RTE_ALIGN_FLOOR(ptr, align) \ + (typeof(ptr))rte_align_floor_int((uintptr_t)ptr, align) + +/** + * Macro to align a pointer to a given power-of-two. The resultant + * pointer will be a pointer of the same type as the first parameter, and + * point to an address no lower than the first parameter. Second parameter + * must be a power-of-two value. + */ +#define RTE_ALIGN_CEIL(ptr, align) \ + RTE_ALIGN_FLOOR(RTE_PTR_ADD(ptr, align - 1), align) + +/** + * Macro to align a pointer to a given power-of-two. The resultant + * pointer will be a pointer of the same type as the first parameter, and + * point to an address no lower than the first parameter. Second parameter + * must be a power-of-two value. + * This function is the same as RTE_ALIGN_CEIL + */ +#define RTE_ALIGN(ptr, align) RTE_ALIGN_CEIL(ptr, align) + +/** + * Checks if a pointer is aligned to a given power-of-two value + * + * @param ptr + * The pointer whose alignment is to be checked + * @param align + * The power-of-two value to which the ptr should be aligned + * + * @return + * True(1) where the pointer is correctly aligned, false(0) otherwise + */ +static inline int +rte_is_aligned(void *ptr, unsigned align) +{ + return RTE_ALIGN(ptr, align) == ptr; +} + +/*********** Macros for compile type checks ********/ + +/** + * Triggers an error at compilation time if the condition is true. + */ +#ifndef __OPTIMIZE__ +#define RTE_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else +extern int RTE_BUILD_BUG_ON_detected_error; +#define RTE_BUILD_BUG_ON(condition) do { \ + ((void)sizeof(char[1 - 2*!!(condition)])); \ + if (condition) \ + RTE_BUILD_BUG_ON_detected_error = 1; \ +} while(0) +#endif + +/*********** Macros to work with powers of 2 ********/ + +/** + * Returns true if n is a power of 2 + * @param n + * Number to check + * @return 1 if true, 0 otherwise + */ +static inline int +rte_is_power_of_2(uint32_t n) +{ + return ((n-1) & n) == 0; +} + +/** + * Aligns input parameter to the next power of 2 + * + * @param x + * The integer value to algin + * + * @return + * Input parameter aligned to the next power of 2 + */ +static inline uint32_t +rte_align32pow2(uint32_t x) +{ + x--; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + + return x + 1; +} + +/*********** Macros for calculating min and max **********/ + +/** + * Macro to return the minimum of two numbers + */ +#define RTE_MIN(a, b) ({ \ + typeof (a) _a = (a); \ + typeof (b) _b = (b); \ + _a < _b ? _a : _b; \ + }) + +/** + * Macro to return the maximum of two numbers + */ +#define RTE_MAX(a, b) ({ \ + typeof (a) _a = (a); \ + typeof (b) _b = (b); \ + _a > _b ? _a : _b; \ + }) + +/*********** Other general functions / macros ********/ + +/** + * PAUSE instruction for tight loops (avoid busy waiting) + */ +static inline void +rte_pause (void) +{ + asm volatile ("pause"); +} + +#ifndef offsetof +/** Return the offset of a field in a structure. */ +#define offsetof(TYPE, MEMBER) __builtin_offsetof (TYPE, MEMBER) +#endif + +#define _RTE_STR(x) #x +/** Take a macro value and get a string version of it */ +#define RTE_STR(x) _RTE_STR(x) + +/** + * Converts a numeric string to the equivalent uint64_t value. + * As well as straight number conversion, also recognises the suffixes + * k, m and g for kilobytes, megabytes and gigabytes respectively. + * + * If a negative number is passed in i.e. a string with the first non-black + * character being "-", zero is returned. Zero is also returned in the case of + * an error with the strtoull call in the function. + * + * @param str + * String containing number to convert. + * @return + * Number. + */ +static inline uint64_t +rte_str_to_size(const char *str) +{ + char *endptr; + unsigned long long size; + + while (isspace((int)*str)) + str++; + if (*str == '-') + return 0; + + errno = 0; + size = strtoull(str, &endptr, 0); + if (errno) + return 0; + + if (*endptr == ' ') + endptr++; /* allow 1 space gap */ + + switch (*endptr){ + case 'G': case 'g': size *= 1024; /* fall-through */ + case 'M': case 'm': size *= 1024; /* fall-through */ + case 'K': case 'k': size *= 1024; /* fall-through */ + default: + break; + } + return size; +} + +/** + * Function to terminate the application immediately, printing an error + * message and returning the exit_code back to the shell. + * + * This function never returns + * + * @param exit_code + * The exit code to be returned by the application + * @param format + * The format string to be used for printing the message. This can include + * printf format characters which will be expanded using any further parameters + * to the function. + */ +void +rte_exit(int exit_code, const char *format, ...) + __attribute__((noreturn)) + __attribute__((format(printf, 2, 3))); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/lib/librte_eal/common/include/rte_cpuflags.h b/lib/librte_eal/common/include/rte_cpuflags.h new file mode 100644 index 0000000000..72c3f2b63b --- /dev/null +++ b/lib/librte_eal/common/include/rte_cpuflags.h @@ -0,0 +1,174 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_CPUFLAGS_H_ +#define _RTE_CPUFLAGS_H_ + +/** + * @file + * Simple API to determine available CPU features at runtime. + */ + +#ifdef __cplusplus +extern "C" { +#endif + + +/** + * Enumeration of all CPU features supported + */ +enum rte_cpu_flag_t { + /* (EAX 01h) ECX features*/ + RTE_CPUFLAG_SSE3 = 0, /**< SSE3 */ + RTE_CPUFLAG_PCLMULQDQ, /**< PCLMULQDQ */ + RTE_CPUFLAG_DTES64, /**< DTES64 */ + RTE_CPUFLAG_MONITOR, /**< MONITOR */ + RTE_CPUFLAG_DS_CPL, /**< DS_CPL */ + RTE_CPUFLAG_VMX, /**< VMX */ + RTE_CPUFLAG_SMX, /**< SMX */ + RTE_CPUFLAG_EIST, /**< EIST */ + RTE_CPUFLAG_TM2, /**< TM2 */ + RTE_CPUFLAG_SSSE3, /**< SSSE3 */ + RTE_CPUFLAG_CNXT_ID, /**< CNXT_ID */ + RTE_CPUFLAG_FMA, /**< FMA */ + RTE_CPUFLAG_CMPXCHG16B, /**< CMPXCHG16B */ + RTE_CPUFLAG_XTPR, /**< XTPR */ + RTE_CPUFLAG_PDCM, /**< PDCM */ + RTE_CPUFLAG_PCID, /**< PCID */ + RTE_CPUFLAG_DCA, /**< DCA */ + RTE_CPUFLAG_SSE4_1, /**< SSE4_1 */ + RTE_CPUFLAG_SSE4_2, /**< SSE4_2 */ + RTE_CPUFLAG_X2APIC, /**< X2APIC */ + RTE_CPUFLAG_MOVBE, /**< MOVBE */ + RTE_CPUFLAG_POPCNT, /**< POPCNT */ + RTE_CPUFLAG_TSC_DEADLINE, /**< TSC_DEADLINE */ + RTE_CPUFLAG_AES, /**< AES */ + RTE_CPUFLAG_XSAVE, /**< XSAVE */ + RTE_CPUFLAG_OSXSAVE, /**< OSXSAVE */ + RTE_CPUFLAG_AVX, /**< AVX */ + RTE_CPUFLAG_F16C, /**< F16C */ + RTE_CPUFLAG_RDRAND, /**< RDRAND */ + + /* (EAX 01h) EDX features */ + RTE_CPUFLAG_FPU, /**< FPU */ + RTE_CPUFLAG_VME, /**< VME */ + RTE_CPUFLAG_DE, /**< DE */ + RTE_CPUFLAG_PSE, /**< PSE */ + RTE_CPUFLAG_TSC, /**< TSC */ + RTE_CPUFLAG_MSR, /**< MSR */ + RTE_CPUFLAG_PAE, /**< PAE */ + RTE_CPUFLAG_MCE, /**< MCE */ + RTE_CPUFLAG_CX8, /**< CX8 */ + RTE_CPUFLAG_APIC, /**< APIC */ + RTE_CPUFLAG_SEP, /**< SEP */ + RTE_CPUFLAG_MTRR, /**< MTRR */ + RTE_CPUFLAG_PGE, /**< PGE */ + RTE_CPUFLAG_MCA, /**< MCA */ + RTE_CPUFLAG_CMOV, /**< CMOV */ + RTE_CPUFLAG_PAT, /**< PAT */ + RTE_CPUFLAG_PSE36, /**< PSE36 */ + RTE_CPUFLAG_PSN, /**< PSN */ + RTE_CPUFLAG_CLFSH, /**< CLFSH */ + RTE_CPUFLAG_DS, /**< DS */ + RTE_CPUFLAG_ACPI, /**< ACPI */ + RTE_CPUFLAG_MMX, /**< MMX */ + RTE_CPUFLAG_FXSR, /**< FXSR */ + RTE_CPUFLAG_SSE, /**< SSE */ + RTE_CPUFLAG_SSE2, /**< SSE2 */ + RTE_CPUFLAG_SS, /**< SS */ + RTE_CPUFLAG_HTT, /**< HTT */ + RTE_CPUFLAG_TM, /**< TM */ + RTE_CPUFLAG_PBE, /**< PBE */ + + /* (EAX 06h) EAX features */ + RTE_CPUFLAG_DIGTEMP, /**< DIGTEMP */ + RTE_CPUFLAG_TRBOBST, /**< TRBOBST */ + RTE_CPUFLAG_ARAT, /**< ARAT */ + RTE_CPUFLAG_PLN, /**< PLN */ + RTE_CPUFLAG_ECMD, /**< ECMD */ + RTE_CPUFLAG_PTM, /**< PTM */ + + /* (EAX 06h) ECX features */ + RTE_CPUFLAG_MPERF_APERF_MSR, /**< MPERF_APERF_MSR */ + RTE_CPUFLAG_ACNT2, /**< ACNT2 */ + RTE_CPUFLAG_ENERGY_EFF, /**< ENERGY_EFF */ + + /* (EAX 07h, ECX 0h) EBX features */ + RTE_CPUFLAG_FSGSBASE, /**< FSGSBASE */ + RTE_CPUFLAG_BMI1, /**< BMI1 */ + RTE_CPUFLAG_AVX2, /**< AVX2 */ + RTE_CPUFLAG_SMEP, /**< SMEP */ + RTE_CPUFLAG_BMI2, /**< BMI2 */ + RTE_CPUFLAG_ERMS, /**< ERMS */ + RTE_CPUFLAG_INVPCID, /**< INVPCID */ + + /* (EAX 80000001h) ECX features */ + RTE_CPUFLAG_LAHF_SAHF, /**< LAHF_SAHF */ + RTE_CPUFLAG_LZCNT, /**< LZCNT */ + + /* (EAX 80000001h) EDX features */ + RTE_CPUFLAG_SYSCALL, /**< SYSCALL */ + RTE_CPUFLAG_XD, /**< XD */ + RTE_CPUFLAG_1GB_PG, /**< 1GB_PG */ + RTE_CPUFLAG_RDTSCP, /**< RDTSCP */ + RTE_CPUFLAG_EM64T, /**< EM64T */ + + /* (EAX 80000007h) EDX features */ + RTE_CPUFLAG_INVTSC, /**< INVTSC */ + + /* The last item */ + RTE_CPUFLAG_NUMFLAGS, /**< This should always be the last! */ +}; + + +/** + * Function for checking a CPU flag availability + * + * @param flag + * CPU flag to query CPU for + * @return + * 1 if flag is available + * 0 if flag is not available + * -ENOENT if flag is invalid + */ +int +rte_cpu_get_flag_enabled(enum rte_cpu_flag_t flag); + +#ifdef __cplusplus +} +#endif + + +#endif /* _RTE_CPUFLAGS_H_ */ diff --git a/lib/librte_eal/common/include/rte_cycles.h b/lib/librte_eal/common/include/rte_cycles.h new file mode 100644 index 0000000000..a1eca6c074 --- /dev/null +++ b/lib/librte_eal/common/include/rte_cycles.h @@ -0,0 +1,120 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_CYCLES_H_ +#define _RTE_CYCLES_H_ + +/** + * @file + * + * Simple Time Reference Functions (Cycles and HPET). + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * Read the TSC register. + * + * @return + * The TSC for this lcore. + */ +static inline uint64_t +rte_rdtsc(void) +{ + union { + uint64_t tsc_64; + struct { + uint32_t lo_32; + uint32_t hi_32; + }; + } tsc; + + asm volatile("rdtsc" : + "=a" (tsc.lo_32), + "=d" (tsc.hi_32)); + return tsc.tsc_64; +} + +/** + * Return the number of HPET cycles since boot + * + * This counter is global for all execution units. The number of + * cycles in one second can be retrived using rte_get_hpet_hz(). + * + * @return + * the number of cycles + */ +uint64_t +rte_get_hpet_cycles(void); + +/** + * Get the number of cycles in one second. + * + * @return + * The number of cycles in one second. + */ +uint64_t +rte_get_hpet_hz(void); + +/** + * Wait at least us microseconds. + * + * @param us + * The number of microseconds to wait. + */ +void +rte_delay_us(unsigned us); + +/** + * Wait at least ms milliseconds. + * + * @param ms + * The number of milliseconds to wait. + */ +static inline void +rte_delay_ms(unsigned ms) +{ + rte_delay_us(ms * 1000); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CYCLES_H_ */ diff --git a/lib/librte_eal/common/include/rte_debug.h b/lib/librte_eal/common/include/rte_debug.h new file mode 100644 index 0000000000..451220e7a1 --- /dev/null +++ b/lib/librte_eal/common/include/rte_debug.h @@ -0,0 +1,96 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_DEBUG_H_ +#define _RTE_DEBUG_H_ + +/** + * @file + * + * Debug Functions in RTE + * + * This file defines a generic API for debug operations. Part of + * the implementation is architecture-specific. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Dump the stack of the calling core to the console. + */ +void rte_dump_stack(void); + +/** + * Dump the registers of the calling core to the console. + * + * Note: Not implemented in a userapp environment; use gdb instead. + */ +void rte_dump_registers(void); + +/** + * Provide notification of a critical non-recoverable error and terminate + * execution abnormally. + * + * Display the format string and its expanded arguments (printf-like). + * + * In a linuxapp environment, this function dumps the stack and calls + * abort() resulting in a core dump if enabled. + * + * The function never returns. + * + * @param format + * The format string + * @param args + * The variable list of arguments. + */ +#define rte_panic(format, args...) __rte_panic(__func__, format, ## args) + +/* + * Provide notification of a critical non-recoverable error and stop. + * + * This function should not be called directly. Refer to rte_panic() macro + * documentation. + */ +void __rte_panic(const char *funcname , const char *format, ...) + __attribute__((noreturn)) + __attribute__((format(printf, 2, 3))); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_DEBUG_H_ */ diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h new file mode 100644 index 0000000000..58fa1cc9ad --- /dev/null +++ b/lib/librte_eal/common/include/rte_eal.h @@ -0,0 +1,174 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_EAL_H_ +#define _RTE_EAL_H_ + +/** + * @file + * + * EAL Configuration API + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_VERSION 1 /**< The version of the RTE configuration structure. */ +#define RTE_MAGIC 19820526 /**< Magic number written by the main partition when ready. */ + +/** + * The lcore role (used in RTE or not). + */ +enum rte_lcore_role_t { + ROLE_RTE, + ROLE_OFF, +}; + +/** + * The type of process in a linuxapp, multi-process setup + */ +enum rte_proc_type_t { + RTE_PROC_AUTO = -1, /* allow auto-detection of primary/secondary */ + RTE_PROC_PRIMARY = 0, /* set to zero, so primary is the default */ + RTE_PROC_SECONDARY, + + RTE_PROC_INVALID +}; + +/** + * the structure for the memory configuration for the RTE. + * Used by the rte_config structure. It is separated out, as for multi-process + * support, the memory details should be shared across instances + */ +struct rte_mem_config { + /* memory topology */ + uint32_t nchannel; /**< Number of channels (0 if unknown). */ + uint32_t nrank; /**< Number of ranks (0 if unknown). */ + + /* memory segments and zones */ + struct rte_memseg memseg[RTE_MAX_MEMSEG]; /**< Physmem descriptors. */ + struct rte_memzone memzone[RTE_MAX_MEMZONE]; /**< Memzone descriptors. */ + + struct rte_tailq_head tailq_head[RTE_MAX_TAILQ]; /**< Tailqs for objects */ +} __attribute__((__packed__)); + +/** + * The global RTE configuration structure. + */ +struct rte_config { + uint32_t version; /**< Configuration [structure] version. */ + uint32_t magic; /**< Magic number - Sanity check. */ + + + uint32_t master_lcore; /**< Id of the master lcore */ + uint32_t lcore_count; /**< Number of available logical cores. */ + enum rte_lcore_role_t lcore_role[RTE_MAX_LCORE]; /**< State of cores. */ + + /** Primary or secondary configuration */ + enum rte_proc_type_t process_type; + + /** + * Pointer to memory configuration, which may be shared across multiple + * Intel DPDK instances + */ + struct rte_mem_config *mem_config; +} __attribute__((__packed__)); + +/** + * Get the global configuration structure. + * + * @return + * A pointer to the global configuration structure. + */ +struct rte_config *rte_eal_get_configuration(void); + +/** + * Get a lcore's role. + * + * @param lcore_id + * The identifier of the lcore. + * @return + * The role of the lcore. + */ +enum rte_lcore_role_t rte_eal_lcore_role(unsigned lcore_id); + + +/** + * Get the process type in a multi-process setup + * + * @return + * The process type + */ +enum rte_proc_type_t rte_eal_process_type(void); + +/** + * Initialize the Environment Abstraction Layer (EAL). + * + * This function is to be executed on the MASTER lcore only, as soon + * as possible in the application's main() function. + * + * The function finishes the initialization process that was started + * during boot (in case of baremetal) or before main() is called (in + * case of linuxapp). It puts the SLAVE lcores in the WAIT state. + * + * When the multi-partition feature is supported, depending on the + * configuration (if CONFIG_RTE_EAL_MAIN_PARTITION is disabled), this + * function waits to ensure that the magic number is set before + * returning. See also the rte_eal_get_configuration() function. Note: + * This behavior may change in the future. + * + * @param argc + * The argc argument that was given to the main() function. + * @param argv + * The argv argument that was given to the main() function. + * @return + * - On success, the number of parsed arguments, which is greater or + * equal to zero. After the call to rte_eal_init(), + * all arguments argv[x] with x < ret may be modified and should + * not be accessed by the application. + * - On failure, a negative error value. + */ +int rte_eal_init(int argc, char **argv); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_EAL_H_ */ diff --git a/lib/librte_eal/common/include/rte_errno.h b/lib/librte_eal/common/include/rte_errno.h new file mode 100644 index 0000000000..53f7b400f4 --- /dev/null +++ b/lib/librte_eal/common/include/rte_errno.h @@ -0,0 +1,98 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/** + * @file + * + * API for error cause tracking + */ + +#ifndef _RTE_ERRNO_H_ +#define _RTE_ERRNO_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +RTE_DECLARE_PER_LCORE(int, _rte_errno); /**< Per core error number. */ + +/** + * Error number value, stored per-thread, which can be queried after + * calls to certain functions to determine why those functions failed. + * + * Uses standard values from errno.h wherever possible, with a small number + * of additional possible values for RTE-specific conditions. + */ +#define rte_errno RTE_PER_LCORE(_rte_errno) + +/** + * Function which returns a printable string describing a particular + * error code. For non-RTE-specific error codes, this function returns + * the value from the libc strerror function. + * + * @param errnum + * The error number to be looked up - generally the value of rte_errno + * @return + * A pointer to a thread-local string containing the text describing + * the error. + */ +const char *rte_strerror(int errnum); + +#ifndef __ELASTERROR +/** + * Check if we have a defined value for the max system-defined errno values. + * if no max defined, start from 1000 to prevent overlap with standard values + */ +#define __ELASTERROR 1000 +#endif + +/** Error types */ +enum { + RTE_MIN_ERRNO = __ELASTERROR, /**< Start numbering above std errno vals */ + + E_RTE_SECONDARY, /**< Operation not allowed in secondary processes */ + E_RTE_NO_CONFIG, /**< Missing rte_config */ + E_RTE_NO_TAILQ, /**< Uninitialised TAILQ */ + + RTE_MAX_ERRNO /**< Max RTE error number */ +}; + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_ERRNO_H_ */ diff --git a/lib/librte_eal/common/include/rte_interrupts.h b/lib/librte_eal/common/include/rte_interrupts.h new file mode 100644 index 0000000000..151df98d63 --- /dev/null +++ b/lib/librte_eal/common/include/rte_interrupts.h @@ -0,0 +1,123 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_INTERRUPTS_H_ +#define _RTE_INTERRUPTS_H_ + +/** + * @file + * + * The RTE interrupt interface provides functions to register/unregister + * callbacks for a specific interrupt. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** Interupt handle */ +struct rte_intr_handle; + +/** Function to be registered for the specific interrupt */ +typedef void (*rte_intr_callback_fn)(struct rte_intr_handle *intr_handle, + void *cb_arg); + +#include + +/** + * It registers the callback for the specific interrupt. Multiple + * callbacks cal be registered at the same time. + * @param intr_handle + * Pointer to the interrupt handle. + * @param cb + * callback address. + * @param cb_arg + * address of parameter for callback. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_intr_callback_register(struct rte_intr_handle *intr_handle, + rte_intr_callback_fn cb, void *cb_arg); + +/** + * It unregisters the callback according to the specified interrupt handle. + * + * @param intr_handle + * pointer to the interrupt handle. + * @param cb + * callback address. + * @param cb_arg + * address of parameter for callback, (void *)-1 means to remove all + * registered which has the same callback address. + * + * @return + * - On success, return the number of callback entities removed. + * - On failure, a negative value. + */ +int rte_intr_callback_unregister(struct rte_intr_handle *intr_handle, + rte_intr_callback_fn cb, void *cb_arg); + +/** + * It enables the interrupt for the specified handle. + * + * @param intr_handle + * pointer to the interrupt handle. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_intr_enable(struct rte_intr_handle *intr_handle); + +/** + * It disables the interrupt for the specified handle. + * + * @param intr_handle + * pointer to the interrupt handle. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_intr_disable(struct rte_intr_handle *intr_handle); + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/lib/librte_eal/common/include/rte_launch.h b/lib/librte_eal/common/include/rte_launch.h new file mode 100644 index 0000000000..e8ad0a5e58 --- /dev/null +++ b/lib/librte_eal/common/include/rte_launch.h @@ -0,0 +1,179 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_LAUNCH_H_ +#define _RTE_LAUNCH_H_ + +/** + * @file + * + * Launch tasks on other lcores + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * State of an lcore. + */ +enum rte_lcore_state_t { + WAIT, /**< waiting a new command */ + RUNNING, /**< executing command */ + FINISHED, /**< command executed */ +}; + +/** + * Definition of a remote launch function. + */ +typedef int (lcore_function_t)(void *); + +/** + * Launch a function on another lcore. + * + * To be executed on the MASTER lcore only. + * + * Sends a message to a slave lcore (identified by the slave_id) that + * is in the WAIT state (this is true after the first call to + * rte_eal_init()). This can be checked by first calling + * rte_eal_wait_lcore(slave_id). + * + * When the remote lcore receives the message, it switches to + * the RUNNING state, then calls the function f with argument arg. Once the + * execution is done, the remote lcore switches to a FINISHED state and + * the return value of f is stored in a local variable to be read using + * rte_eal_wait_lcore(). + * + * The MASTER lcore returns as soon as the message is sent and knows + * nothing about the completion of f. + * + * Note: This function is not designed to offer optimum + * performance. It is just a practical way to launch a function on + * another lcore at initialization time. + * + * @param f + * The function to be called. + * @param arg + * The argument for the function. + * @param slave_id + * The identifier of the lcore on which the function should be executed. + * @return + * - 0: Success. Execution of function f started on the remote lcore. + * - (-EBUSY): The remote lcore is not in a WAIT state. + */ +int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id); + +/** + * This enum indicates whether the master core must execute the handler + * launched on all logical cores. + */ +enum rte_rmt_call_master_t { + SKIP_MASTER = 0, /**< lcore handler not executed by master core. */ + CALL_MASTER, /**< lcore handler executed by master core. */ +}; + +/** + * Launch a function on all lcores. + * + * Check that each SLAVE lcore is in a WAIT state, then call + * rte_eal_remote_launch() for each lcore. + * + * @param f + * The function to be called. + * @param arg + * The argument for the function. + * @param call_master + * If call_master set to SKIP_MASTER, the MASTER lcore does not call + * the function. If call_master is set to CALL_MASTER, the function + * is also called on master before returning. In any case, the master + * lcore returns as soon as it finished its job and knows nothing + * about the completion of f on the other lcores. + * @return + * - 0: Success. Execution of function f started on all remote lcores. + * - (-EBUSY): At least one remote lcore is not in a WAIT state. In this + * case, no message is sent to any of the lcores. + */ +int rte_eal_mp_remote_launch(lcore_function_t *f, void *arg, + enum rte_rmt_call_master_t call_master); + +/** + * Get the state of the lcore identified by slave_id. + * + * To be executed on the MASTER lcore only. + * + * @param slave_id + * The identifier of the lcore. + * @return + * The state of the lcore. + */ +enum rte_lcore_state_t rte_eal_get_lcore_state(unsigned slave_id); + +/** + * Wait until an lcore finishes its job. + * + * To be executed on the MASTER lcore only. + * + * If the slave lcore identified by the slave_id is in a FINISHED state, + * switch to the WAIT state. If the lcore is in RUNNING state, wait until + * the lcore finishes its job and moves to the FINISHED state. + * + * @param slave_id + * The identifier of the lcore. + * @return + * - 0: If the lcore identified by the slave_id is in a WAIT state. + * - The value that was returned by the previous remote launch + * function call if the lcore identified by the slave_id was in a + * FINISHED or RUNNING state. In this case, it changes the state + * of the lcore to WAIT. + */ +int rte_eal_wait_lcore(unsigned slave_id); + +/** + * Wait until all lcores finish their jobs. + * + * To be executed on the MASTER lcore only. Issue an + * rte_eal_wait_lcore() for every lcore. The return values are + * ignored. + * + * After a call to rte_eal_mp_wait_lcores(), the caller can assume + * that all slave lcores are in a WAIT state. + */ +void rte_eal_mp_wait_lcore(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_LAUNCH_H_ */ diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h new file mode 100644 index 0000000000..f9308c40bc --- /dev/null +++ b/lib/librte_eal/common/include/rte_lcore.h @@ -0,0 +1,191 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_LCORE_H_ +#define _RTE_LCORE_H_ + +/** + * @file + * + * API for lcore and Socket Manipulation. Parts of this are execution + * environment specific. + * + */ +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define LCORE_ID_ANY -1 /**< Any lcore. */ + +RTE_DECLARE_PER_LCORE(unsigned, _lcore_id); /**< Per core "core id". */ + +/** + * Return the ID of the execution unit we are running on. + * @return + * Logical core ID + */ +static inline unsigned +rte_lcore_id(void) +{ + return RTE_PER_LCORE(_lcore_id); +} + +/** + * Get the id of the master lcore + * + * @return + * the id of the master lcore + */ +static inline unsigned +rte_get_master_lcore(void) +{ + return rte_eal_get_configuration()->master_lcore; +} + +/** + * Return the number of execution units (lcores) on the system. + * + * @return + * the number of execution units (lcores) on the system. + */ +static inline unsigned +rte_lcore_count(void) +{ + const struct rte_config *cfg = rte_eal_get_configuration(); + return cfg->lcore_count; +} + +#include + +#ifdef __DOXYGEN__ +/** + * Return the ID of the physical socket of the logical core we are + * running on. + * @return + * Socket ID + */ +static inline unsigned +rte_socket_id(void); + +/** + * Get the ID of the physical socket of the specified lcore + * + * @param lcore_id + * the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1. + * @return + * the ID of lcoreid's physical socket + */ +static inline unsigned +rte_lcore_to_socket_id(unsigned lcore_id); + +#endif + + +/** + * Test if an lcore is enabled. + * + * @param lcore_id + * The identifier of the lcore, which MUST be between 0 and + * RTE_MAX_LCORE-1. + * @return + * True if the given lcore is enabled; false otherwise. + */ +static inline int +rte_lcore_is_enabled(unsigned lcore_id) +{ + struct rte_config *cfg = rte_eal_get_configuration(); + if (lcore_id >= RTE_MAX_LCORE) + return 0; + return (cfg->lcore_role[lcore_id] != ROLE_OFF); +} + +/** + * Get the next enabled lcore ID. + * + * @param i + * The current lcore (reference). + * @param skip_master + * If true, do not return the ID of the master lcore. + * @param wrap + * If true, go back to 0 when RTE_MAX_LCORE is reached; otherwise, + * return RTE_MAX_LCORE. + * @return + * The next lcore_id or RTE_MAX_LCORE if not found. + */ +static inline unsigned +rte_get_next_lcore(unsigned i, int skip_master, int wrap) +{ + i++; + if (wrap) + i %= RTE_MAX_LCORE; + + while (i < RTE_MAX_LCORE) { + if (!rte_lcore_is_enabled(i) || + (skip_master && (i == rte_get_master_lcore()))) { + i++; + if (wrap) + i %= RTE_MAX_LCORE; + continue; + } + break; + } + return i; +} +/** + * Macro to browse all running lcores. + */ +#define RTE_LCORE_FOREACH(i) \ + for (i = rte_get_next_lcore(-1, 0, 0); \ + i +#include +#include + +/** The rte_log structure. */ +struct rte_logs { + uint32_t type; /**< Bitfield with enabled logs. */ + uint32_t level; /**< Log level. */ + FILE *file; /**< Pointer to current FILE* for logs. */ +}; + +/** Global log informations */ +extern struct rte_logs rte_logs; + +/* SDK log type */ +#define RTE_LOGTYPE_EAL 0x00000001 /**< Log related to eal. */ +#define RTE_LOGTYPE_MALLOC 0x00000002 /**< Log related to malloc. */ +#define RTE_LOGTYPE_RING 0x00000004 /**< Log related to ring. */ +#define RTE_LOGTYPE_MEMPOOL 0x00000008 /**< Log related to mempool. */ +#define RTE_LOGTYPE_TIMER 0x00000010 /**< Log related to timers. */ +#define RTE_LOGTYPE_PMD 0x00000020 /**< Log related to poll mode driver. */ +#define RTE_LOGTYPE_HASH 0x00000040 /**< Log related to hash table. */ +#define RTE_LOGTYPE_LPM 0x00000080 /**< Log related to LPM. */ + +/* these log types can be used in an application */ +#define RTE_LOGTYPE_USER1 0x01000000 /**< User-defined log type 1. */ +#define RTE_LOGTYPE_USER2 0x02000000 /**< User-defined log type 2. */ +#define RTE_LOGTYPE_USER3 0x04000000 /**< User-defined log type 3. */ +#define RTE_LOGTYPE_USER4 0x08000000 /**< User-defined log type 4. */ +#define RTE_LOGTYPE_USER5 0x10000000 /**< User-defined log type 5. */ +#define RTE_LOGTYPE_USER6 0x20000000 /**< User-defined log type 6. */ +#define RTE_LOGTYPE_USER7 0x40000000 /**< User-defined log type 7. */ +#define RTE_LOGTYPE_USER8 0x80000000 /**< User-defined log type 8. */ + +/* Can't use 0, as it gives compiler warnings */ +#define RTE_LOG_EMERG 1U /**< System is unusable. */ +#define RTE_LOG_ALERT 2U /**< Action must be taken immediately. */ +#define RTE_LOG_CRIT 3U /**< Critical conditions. */ +#define RTE_LOG_ERR 4U /**< Error conditions. */ +#define RTE_LOG_WARNING 5U /**< Warning conditions. */ +#define RTE_LOG_NOTICE 6U /**< Normal but significant condition. */ +#define RTE_LOG_INFO 7U /**< Informational. */ +#define RTE_LOG_DEBUG 8U /**< Debug-level messages. */ + +/** The default log stream. */ +extern FILE *eal_default_log_stream; + +/** + * Change the stream that will be used by the logging system. + * + * This can be done at any time. The f argument represents the stream + * to be used to send the logs. If f is NULL, the default output is + * used, which is the serial line in case of bare metal, or directly + * sent to syslog in case of linux application. + * + * @param f + * Pointer to the stream. + * @return + * - 0 on success. + * - Negative on error. + */ +int rte_openlog_stream(FILE *f); + +/** + * Set the global log level. + * + * After this call, all logs that are lower or equal than level and + * lower or equal than the RTE_LOG_LEVEL configuration option will be + * displayed. + * + * @param level + * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). + */ +void rte_set_log_level(uint32_t level); + +/** + * Enable or disable the log type. + * + * @param type + * Log type, for example, RTE_LOGTYPE_EAL. + * @param enable + * True for enable; false for disable. + */ +void rte_set_log_type(uint32_t type, int enable); + +/** + * Get the current loglevel for the message being processed. + * + * Before calling the user-defined stream for logging, the log + * subsystem sets a per-lcore variable containing the loglevel and the + * logtype of the message being processed. This information can be + * accessed by the user-defined log output function through this + * function. + * + * @return + * The loglevel of the message being processed. + */ +int rte_log_cur_msg_loglevel(void); + +/** + * Get the current logtype for the message being processed. + * + * Before calling the user-defined stream for logging, the log + * subsystem sets a per-lcore variable containing the loglevel and the + * logtype of the message being processed. This information can be + * accessed by the user-defined log output function through this + * function. + * + * @return + * The logtype of the message being processed. + */ +int rte_log_cur_msg_logtype(void); + +/** + * Enable or disable the history (enabled by default) + * + * @param enable + * true to enable, or 0 to disable history. + */ +void rte_log_set_history(int enable); + +/** + * Dump the log history to the console. + */ +void rte_log_dump_history(void); + +/** + * Add a log message to the history. + * + * This function can be called from a user-defined log stream. It adds + * the given message in the history that can be dumped using + * rte_log_dump_history(). + * + * @param buf + * A data buffer containing the message to be saved in the history. + * @param size + * The length of the data buffer. + * @return + * - 0: Success. + * - (-ENOBUFS) if there is no room to store the message. + */ +int rte_log_add_in_history(const char *buf, size_t size); + +/** + * Generates a log message. + * + * The message will be sent in the stream defined by the previous call + * to rte_openlog_stream(). + * + * The level argument determines if the log should be displayed or + * not, depending on the global rte_logs variable. + * + * The preferred alternative is the RTE_LOG() function because debug logs may + * be removed at compilation time if optimization is enabled. Moreover, + * logs are automatically prefixed by type when using the macro. + * + * @param level + * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). + * @param logtype + * The log type, for example, RTE_LOGTYPE_EAL. + * @param format + * The format string, as in printf(3), followed by the variable arguments + * required by the format. + * @return + * - 0: Success. + * - Negative on error. + */ +int rte_log(uint32_t level, uint32_t logtype, const char *format, ...) + __attribute__((format(printf, 3, 4))); + +/** + * Generates a log message. + * + * The message will be sent in the stream defined by the previous call + * to rte_openlog_stream(). + * + * The level argument determines if the log should be displayed or + * not, depending on the global rte_logs variable. A trailing + * newline may be added if needed. + * + * The preferred alternative is the RTE_LOG() because debug logs may be + * removed at compilation time. + * + * @param level + * Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8). + * @param logtype + * The log type, for example, RTE_LOGTYPE_EAL. + * @param format + * The format string, as in printf(3), followed by the variable arguments + * required by the format. + * @param ap + * The va_list of the variable arguments required by the format. + * @return + * - 0: Success. + * - Negative on error. + */ +int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap); + +/** + * Generates a log message. + * + * The RTE_LOG() is equivalent to rte_log() with two differences: + + * - RTE_LOG() can be used to remove debug logs at compilation time, + * depending on RTE_LOG_LEVEL configuration option, and compilation + * optimization level. If optimization is enabled, the tests + * involving constants only are pre-computed. If compilation is done + * with -O0, these tests will be done at run time. + * - The log level and log type names are smaller, for example: + * RTE_LOG(INFO, EAL, "this is a %s", "log"); + * + * @param l + * Log level. A value between EMERG (1) and DEBUG (8). The short name is + * expanded by the macro, so it cannot be an integer value. + * @param t + * The log type, for example, EAL. The short name is expanded by the + * macro, so it cannot be an integer value. + * @param fmt + * The fmt string, as in printf(3), followed by the variable arguments + * required by the format. + * @param args + * The variable list of arguments according to the format string. + * @return + * - 0: Success. + * - Negative on error. + */ +#define RTE_LOG(l, t, fmt, args...) ({ \ + if ((RTE_LOG_##l <= RTE_LOG_LEVEL) && \ + (RTE_LOG_##l <= rte_logs.level) && \ + (RTE_LOGTYPE_##t & rte_logs.type)) { \ + rte_log(RTE_LOG_##l, RTE_LOGTYPE_##t, \ + #t ": " fmt, ## args); \ + } \ +}) + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_LOG_H_ */ diff --git a/lib/librte_eal/common/include/rte_memcpy.h b/lib/librte_eal/common/include/rte_memcpy.h new file mode 100644 index 0000000000..fd2a296dac --- /dev/null +++ b/lib/librte_eal/common/include/rte_memcpy.h @@ -0,0 +1,355 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_MEMCPY_H_ +#define _RTE_MEMCPY_H_ + +/** + * @file + * + * Functions for SSE implementation of memcpy(). + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Copy 16 bytes from one location to another using optimised SSE + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov16(uint8_t *dst, const uint8_t *src) +{ + asm volatile ("movdqu (%[src]), %%xmm0\n\t" + "movdqu %%xmm0, (%[dst])\n\t" + : + : [src] "r" (src), + [dst] "r"(dst) + : "xmm0", "memory"); +} + +/** + * Copy 32 bytes from one location to another using optimised SSE + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov32(uint8_t *dst, const uint8_t *src) +{ + asm volatile ("movdqu (%[src]), %%xmm0\n\t" + "movdqu 16(%[src]), %%xmm1\n\t" + "movdqu %%xmm0, (%[dst])\n\t" + "movdqu %%xmm1, 16(%[dst])" + : + : [src] "r" (src), + [dst] "r"(dst) + : "xmm0", "xmm1", "memory"); +} + +/** + * Copy 48 bytes from one location to another using optimised SSE + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov48(uint8_t *dst, const uint8_t *src) +{ + asm volatile ("movdqu (%[src]), %%xmm0\n\t" + "movdqu 16(%[src]), %%xmm1\n\t" + "movdqu 32(%[src]), %%xmm2\n\t" + "movdqu %%xmm0, (%[dst])\n\t" + "movdqu %%xmm1, 16(%[dst])\n\t" + "movdqu %%xmm2, 32(%[dst])" + : + : [src] "r" (src), + [dst] "r"(dst) + : "xmm0", "xmm1", "memory"); +} + +/** + * Copy 64 bytes from one location to another using optimised SSE + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov64(uint8_t *dst, const uint8_t *src) +{ + asm volatile ("movdqu (%[src]), %%xmm0\n\t" + "movdqu 16(%[src]), %%xmm1\n\t" + "movdqu 32(%[src]), %%xmm2\n\t" + "movdqu 48(%[src]), %%xmm3\n\t" + "movdqu %%xmm0, (%[dst])\n\t" + "movdqu %%xmm1, 16(%[dst])\n\t" + "movdqu %%xmm2, 32(%[dst])\n\t" + "movdqu %%xmm3, 48(%[dst])" + : + : [src] "r" (src), + [dst] "r"(dst) + : "xmm0", "xmm1", "xmm2", "xmm3","memory"); +} + +/** + * Copy 128 bytes from one location to another using optimised SSE + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov128(uint8_t *dst, const uint8_t *src) +{ + asm volatile ("movdqu (%[src]), %%xmm0\n\t" + "movdqu 16(%[src]), %%xmm1\n\t" + "movdqu 32(%[src]), %%xmm2\n\t" + "movdqu 48(%[src]), %%xmm3\n\t" + "movdqu 64(%[src]), %%xmm4\n\t" + "movdqu 80(%[src]), %%xmm5\n\t" + "movdqu 96(%[src]), %%xmm6\n\t" + "movdqu 112(%[src]), %%xmm7\n\t" + "movdqu %%xmm0, (%[dst])\n\t" + "movdqu %%xmm1, 16(%[dst])\n\t" + "movdqu %%xmm2, 32(%[dst])\n\t" + "movdqu %%xmm3, 48(%[dst])\n\t" + "movdqu %%xmm4, 64(%[dst])\n\t" + "movdqu %%xmm5, 80(%[dst])\n\t" + "movdqu %%xmm6, 96(%[dst])\n\t" + "movdqu %%xmm7, 112(%[dst])" + : + : [src] "r" (src), + [dst] "r"(dst) + : "xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7", "memory"); +} + +/** + * Copy 256 bytes from one location to another using optimised SSE + * instructions. The locations should not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + */ +static inline void +rte_mov256(uint8_t *dst, const uint8_t *src) +{ + /* + * There are 16XMM registers, but this function does not use + * them all so that it can still be compiled as 32bit + * code. The performance increase was neglible if all 16 + * registers were used. + */ + rte_mov128(dst, src); + rte_mov128(dst + 128, src + 128); +} + +#ifdef RTE_MEMCPY_BUILTIN_CONSTANT_P +/** + * Choose between compiler built-in implementation of memcpy or DPDK + * implementation depending if size is a compile-time constant + */ +#define rte_memcpy(dst, src, n) \ + (__builtin_constant_p (n) ? \ + memcpy(dst, src, n) : rte_memcpy_func(dst, src, n)) +#else +/** + * Always use DPDK implementation. + */ +#define rte_memcpy rte_memcpy_func +#endif + +/** + * Copy bytes from one location to another. The locations must not overlap. + * + * @param dst + * Pointer to the destination of the data. + * @param src + * Pointer to the source data. + * @param n + * Number of bytes to copy. + * @return + * Pointer to the destination data. + */ +static inline void * +rte_memcpy_func(void *dst, const void *src, size_t n) +{ + void *ret = dst; + + /* We can't copy < 16 bytes using XMM registers so do it manually. */ + if (n < 16) { + if (n & 0x01) { + *(uint8_t *)dst = *(const uint8_t *)src; + dst = (uint8_t *)dst + 1; + src = (const uint8_t *)src + 1; + } + if (n & 0x02) { + *(uint16_t *)dst = *(const uint16_t *)src; + dst = (uint16_t *)dst + 1; + src = (const uint16_t *)src + 1; + } + if (n & 0x04) { + /* + * NOTE: doing this as a 32bit copy causes "strict + * aliasing" compile errors, but worked fine for 64bit + * copy below, for unknown reasons. + */ + *(uint16_t *)dst = *(const uint16_t *)src; + *((uint16_t *)dst + 1) = *((const uint16_t *)src + 1); + dst = (uint32_t *)dst + 1; + src = (const uint32_t *)src + 1; + } + if (n & 0x08) { + *(uint64_t *)dst = *(const uint64_t *)src; + } + return ret; + } + + /* Special fast cases for <= 128 bytes */ + if (n <= 32) { + rte_mov16((uint8_t *)dst, (const uint8_t *)src); + rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n); + return ret; + } + + if (n <= 64) { + rte_mov32((uint8_t *)dst, (const uint8_t *)src); + rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n); + return ret; + } + + if (n <= 128) { + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + rte_mov64((uint8_t *)dst - 64 + n, (const uint8_t *)src - 64 + n); + return ret; + } + + /* + * For large copies > 128 bytes. This combination of 256, 64 and 16 byte + * copies was found to be faster than doing 128 and 32 byte copies as + * well. + */ + for ( ; n >= 256; n -= 256) { + rte_mov256((uint8_t *)dst, (const uint8_t *)src); + dst = (uint8_t *)dst + 256; + src = (const uint8_t *)src + 256; + } + + /* + * We split the remaining bytes (which will be less than 256) into + * 64byte (2^6) chunks. + * Using incrementing integers in the case labels of a switch statement + * enourages the compiler to use a jump table. To get incrementing + * integers, we shift the 2 relevant bits to the LSB position to first + * get decrementing integers, and then subtract. + */ + switch (3 - (n >> 6)) { + case 0x00: + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + n -= 64; + dst = (uint8_t *)dst + 64; + src = (const uint8_t *)src + 64; /* fallthrough */ + case 0x01: + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + n -= 64; + dst = (uint8_t *)dst + 64; + src = (const uint8_t *)src + 64; /* fallthrough */ + case 0x02: + rte_mov64((uint8_t *)dst, (const uint8_t *)src); + n -= 64; + dst = (uint8_t *)dst + 64; + src = (const uint8_t *)src + 64; /* fallthrough */ + default: + ; + } + + /* + * We split the remaining bytes (which will be less than 64) into + * 16byte (2^4) chunks, using the same switch structure as above. + */ + switch (3 - (n >> 4)) { + case 0x00: + rte_mov16((uint8_t *)dst, (const uint8_t *)src); + n -= 16; + dst = (uint8_t *)dst + 16; + src = (const uint8_t *)src + 16; /* fallthrough */ + case 0x01: + rte_mov16((uint8_t *)dst, (const uint8_t *)src); + n -= 16; + dst = (uint8_t *)dst + 16; + src = (const uint8_t *)src + 16; /* fallthrough */ + case 0x02: + rte_mov16((uint8_t *)dst, (const uint8_t *)src); + n -= 16; + dst = (uint8_t *)dst + 16; + src = (const uint8_t *)src + 16; /* fallthrough */ + default: + ; + } + + /* Copy any remaining bytes, without going beyond end of buffers */ + if (n != 0) { + rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n); + } + return ret; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MEMCPY_H_ */ diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h new file mode 100644 index 0000000000..bf843dcf5e --- /dev/null +++ b/lib/librte_eal/common/include/rte_memory.h @@ -0,0 +1,143 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_MEMORY_H_ +#define _RTE_MEMORY_H_ + +/** + * @file + * + * Memory-related RTE API. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +enum rte_page_sizes { + RTE_PGSIZE_4K = 1 << 12, + RTE_PGSIZE_2M = RTE_PGSIZE_4K << 9, + RTE_PGSIZE_1G = RTE_PGSIZE_2M <<9 +}; + +#define SOCKET_ID_ANY -1 /**< Any NUMA socket. */ +#define CACHE_LINE_SIZE 64 /**< Cache line size. */ +#define CACHE_LINE_MASK (CACHE_LINE_SIZE-1) /**< Cache line mask. */ + +#define CACHE_LINE_ROUNDUP(size) \ + (CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE)) +/**< Return the first cache-aligned value greater or equal to size. */ + +/** + * Force alignment to cache line. + */ +#define __rte_cache_aligned __attribute__((__aligned__(CACHE_LINE_SIZE))) + +#ifndef __KERNEL__ /* so we can include this header in kernel modules */ +typedef uint64_t phys_addr_t; /**< Physical address definition. */ +#endif + +/** + * Physical memory segment descriptor. + */ +struct rte_memseg { + phys_addr_t phys_addr; /**< Start physical address. */ + union { + void *addr; /**< Start virtual address. */ + uint64_t addr_64; /**< Makes sure addr is always 64 bits */ + }; + uint64_t len; /**< Length of the segment. */ + uint64_t hugepage_sz; /**< The pagesize of underlying memory */ + int32_t socket_id; /**< NUMA socket ID. */ + uint32_t nchannel; /**< Number of channels. */ + uint32_t nrank; /**< Number of ranks. */ +} __attribute__((__packed__)); + + +/** + * Get the layout of the available physical memory. + * + * It can be useful for an application to have the full physical + * memory layout to decide the size of a memory zone to reserve. This + * table is stored in rte_config (see rte_eal_get_configuration()). + * + * @return + * - On success, return a pointer to a read-only table of struct + * rte_physmem_desc elements, containing the layout of all + * addressable physical memory. The last element of the table + * contains a NULL address. + * - On error, return NULL. This should not happen since it is a fatal + * error that will probably cause the entire system to panic. + */ +const struct rte_memseg *rte_eal_get_physmem_layout(void); + +/** + * Dump the physical memory layout to the console. + */ +void rte_dump_physmem_layout(void); + +/** + * Get the total amount of available physical memory. + * + * @return + * The total amount of available physical memory in bytes. + */ +uint64_t rte_eal_get_physmem_size(void); + +/** + * Get the number of memory channels. + * + * @return + * The number of memory channels on the system. The value is 0 if unknown + * or not the same on all devices. + */ +unsigned rte_memory_get_nchannel(void); + +/** + * Get the number of memory ranks. + * + * @return + * The number of memory ranks on the system. The value is 0 if unknown or + * not the same on all devices. + */ +unsigned rte_memory_get_nrank(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MEMORY_H_ */ diff --git a/lib/librte_eal/common/include/rte_memzone.h b/lib/librte_eal/common/include/rte_memzone.h new file mode 100644 index 0000000000..02da3dbcef --- /dev/null +++ b/lib/librte_eal/common/include/rte_memzone.h @@ -0,0 +1,200 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_MEMZONE_H_ +#define _RTE_MEMZONE_H_ + +/** + * @file + * RTE Memzone + * + * The goal of the memzone allocator is to reserve contiguous + * portions of physical memory. These zones are identified by a name. + * + * The memzone descriptors are shared by all partitions and are + * located in a known place of physical memory. This zone is accessed + * using rte_eal_get_configuration(). The lookup (by name) of a + * memory zone can be done in any partition and returns the same + * physical address. + * + * A reserved memory zone cannot be unreserved. The reservation shall + * be done at initialization time only. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_MEMZONE_2MB 0x00000001 /**< Use 2MB pages. */ +#define RTE_MEMZONE_1GB 0x00000002 /**< Use 1GB pages. */ +#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004 /**< Use available page size */ + +/** + * A structure describing a memzone, which is a contiguous portion of + * physical memory identified by a name. + */ +struct rte_memzone { + +#define RTE_MEMZONE_NAMESIZE 32 /**< Maximum length of memory zone name.*/ + char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the memory zone. */ + + phys_addr_t phys_addr; /**< Start physical address. */ + union { + void *addr; /**< Start virtual address. */ + uint64_t addr_64; /**< Makes sure addr is always 64-bits */ + }; + uint64_t len; /**< Length of the memzone. */ + + uint64_t hugepage_sz; /**< The page size of underlying memory */ + + int32_t socket_id; /**< NUMA socket ID. */ + + uint32_t flags; /**< Characteristics of this memzone. */ +} __attribute__((__packed__)); + +/** + * Reserve a portion of physical memory. + * + * This function reserves some memory and returns a pointer to a + * correctly filled memzone descriptor. If the allocation cannot be + * done, return NULL. Note: A reserved zone cannot be freed. + * + * @param name + * The name of the memzone. If it already exists, the function will + * fail and return NULL. + * @param len + * The size of the memory to be reserved. If it + * is 0, the biggest contiguous zone will be reserved. + * @param socket_id + * The socket identifier in the case of + * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The flags parameter is used to request memzones to be + * taken from 1GB or 2MB hugepages. + * - RTE_MEMZONE_2MB - Reserve from 2MB pages + * - RTE_MEMZONE_1GB - Reserve from 1GB pages + * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if + * the requested page size is unavailable. + * If this flag is not set, the function + * will return error on an unavailable size + * request. + * @return + * A pointer to a correctly-filled read-only memzone descriptor, or NULL + * on error. + * On error case, rte_errno will be set appropriately: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + * - EINVAL - invalid parameters + */ +const struct rte_memzone *rte_memzone_reserve(const char *name, + uint64_t len, int socket_id, + unsigned flags); + +/** + * Reserve a portion of physical memory with alignment on a specified + * boundary. + * + * This function reserves some memory with alignment on a specified + * boundary, and returns a pointer to a correctly filled memzone + * descriptor. If the allocation cannot be done or if the alignment + * is not a power of 2, returns NULL. + * Note: A reserved zone cannot be freed. + * + * @param name + * The name of the memzone. If it already exists, the function will + * fail and return NULL. + * @param len + * The size of the memory to be reserved. If it + * is 0, the biggest contiguous zone will be reserved. + * @param align + * Alignment for resulting memzone. Must be a power of 2. + * @param socket_id + * The socket identifier in the case of + * NUMA. The value can be SOCKET_ID_ANY if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The flags parameter is used to request memzones to be + * taken from 1GB or 2MB hugepages. + * - RTE_MEMZONE_2MB - Reserve from 2MB pages + * - RTE_MEMZONE_1GB - Reserve from 1GB pages + * - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if + * the requested page size is unavailable. + * If this flag is not set, the function + * will return error on an unavailable size + * request. + * @return + * A pointer to a correctly-filled read-only memzone descriptor, or NULL + * on error. + * On error case, rte_errno will be set appropriately: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + * - EINVAL - invalid parameters + */ +const struct rte_memzone *rte_memzone_reserve_aligned(const char *name, + uint64_t len, int socket_id, unsigned flags, + unsigned align); + +/** + * Lookup for a memzone. + * + * Get a pointer to a descriptor of an already reserved memory + * zone identified by the name given as an argument. + * + * @param name + * The name of the memzone. + * @return + * A pointer to a read-only memzone descriptor. + */ +const struct rte_memzone *rte_memzone_lookup(const char *name); + +/** + * Dump all reserved memzones to the console. + */ +void rte_memzone_dump(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MEMZONE_H_ */ diff --git a/lib/librte_eal/common/include/rte_pci.h b/lib/librte_eal/common/include/rte_pci.h new file mode 100644 index 0000000000..f2128b5431 --- /dev/null +++ b/lib/librte_eal/common/include/rte_pci.h @@ -0,0 +1,197 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_PCI_H_ +#define _RTE_PCI_H_ + +/** + * @file + * + * RTE PCI Interface + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +TAILQ_HEAD(pci_device_list, rte_pci_device); /**< PCI devices in D-linked Q. */ +TAILQ_HEAD(pci_driver_list, rte_pci_driver); /**< PCI drivers in D-linked Q. */ + +extern struct pci_driver_list driver_list; /**< Global list of PCI drivers. */ +extern struct pci_device_list device_list; /**< Global list of PCI devices. */ + +/** Pathname of PCI devices directory. */ +#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" + +/** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */ +#define PCI_PRI_FMT "%.4"PRIx16":%.2"PRIx8":%.2"PRIx8".%"PRIx8 + +/** Nb. of values in PCI device identifier format string. */ +#define PCI_FMT_NVAL 4 + +/** Nb. of values in PCI resource format. */ +#define PCI_RESOURCE_FMT_NVAL 3 + +/** + * A structure describing a PCI resource. + */ +struct rte_pci_resource { + uint64_t phys_addr; /**< Physical address, 0 if no resource. */ + uint64_t len; /**< Length of the resource. */ + void *addr; /**< Virtual address, NULL when not mapped. */ +}; + +/** Maximum number of PCI resources. */ +#define PCI_MAX_RESOURCE 7 + +/** + * A structure describing an ID for a PCI driver. Each driver provides a + * table of these IDs for each device that it supports. + */ +struct rte_pci_id { + uint16_t vendor_id; /**< Vendor ID or PCI_ANY_ID. */ + uint16_t device_id; /**< Device ID or PCI_ANY_ID. */ + uint16_t subsystem_vendor_id; /**< Subsystem vendor ID or PCI_ANY_ID. */ + uint16_t subsystem_device_id; /**< Subsystem device ID or PCI_ANY_ID. */ +}; + +/** + * A structure describing the location of a PCI device. + */ +struct rte_pci_addr { + uint16_t domain; /**< Device domain */ + uint8_t bus; /**< Device bus */ + uint8_t devid; /**< Device ID */ + uint8_t function; /**< Device function. */ +}; + +/** + * A structure describing a PCI device. + */ +struct rte_pci_device { + TAILQ_ENTRY(rte_pci_device) next; /**< Next probed PCI device. */ + struct rte_pci_addr addr; /**< PCI location. */ + struct rte_pci_id id; /**< PCI ID. */ + struct rte_pci_resource mem_resource; /**< PCI Memory Resource */ + struct rte_intr_handle intr_handle; /**< Interrupt handle */ +}; + +/** Any PCI device identifier (vendor, device, ...) */ +#define PCI_ANY_ID (0xffff) + +#ifdef __cplusplus +/** C++ macro used to help building up tables of device IDs */ +#define RTE_PCI_DEVICE(vend, dev) \ + (vend), \ + (dev), \ + PCI_ANY_ID, \ + PCI_ANY_ID +#else +/** Macro used to help building up tables of device IDs */ +#define RTE_PCI_DEVICE(vend, dev) \ + .vendor_id = (vend), \ + .device_id = (dev), \ + .subsystem_vendor_id = PCI_ANY_ID, \ + .subsystem_device_id = PCI_ANY_ID +#endif + +struct rte_pci_driver; + +/** + * Initialisation function for the driver called during PCI probing. + */ +typedef int (pci_devinit_t)(struct rte_pci_driver *, struct rte_pci_device *); + +/** + * A structure describing a PCI driver. + */ +struct rte_pci_driver { + TAILQ_ENTRY(rte_pci_driver) next; /**< Next in list. */ + const char *name; /**< Driver name. */ + pci_devinit_t *devinit; /**< Device init. function. */ + struct rte_pci_id *id_table; /**< ID table, NULL terminated. */ + uint32_t drv_flags; /**< Flags contolling handling of device. */ +}; + +/**< Device needs igb_uio kernel module */ +#define RTE_PCI_DRV_NEED_IGB_UIO 0x0001 + +/** + * Probe the PCI bus for registered drivers. + * + * Scan the content of the PCI bus, and call the probe() function for + * all registered drivers that have a matching entry in its id_table + * for discovered devices. + * + * @return + * - 0 on success. + * - Negative on error. + */ +int rte_eal_pci_probe(void); + +/** + * Dump the content of the PCI bus. + */ +void rte_eal_pci_dump(void); + +/** + * Register a PCI driver. + * + * @param driver + * A pointer to a rte_pci_driver structure describing the driver + * to be registered. + */ +void rte_eal_pci_register(struct rte_pci_driver *driver); + +/** + * Register a list of PCI locations that will be blacklisted (not used by DPDK). + * + * @param blacklist + * List of PCI device addresses that will not be used by DPDK. + * @param size + * Number of items in the list. + */ +void rte_eal_pci_set_blacklist(struct rte_pci_addr *blacklist, unsigned size); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_PCI_H_ */ diff --git a/lib/librte_eal/common/include/rte_pci_dev_ids.h b/lib/librte_eal/common/include/rte_pci_dev_ids.h new file mode 100644 index 0000000000..402d21f6bc --- /dev/null +++ b/lib/librte_eal/common/include/rte_pci_dev_ids.h @@ -0,0 +1,205 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/** + * @file + * + * This file contains a list of the PCI device IDs recognised by DPDK, which + * can be used to fill out an array of structures describing the devices. + * + * Currently two families of devices are recognised: those supported by the + * IGB driver, and those supported by the IXGBE driver. The inclusion of these + * in an array built using this file depends on the definition of + * RTE_LIBRTE_IGB_PMD and RTE_LIBRTE_IXGBE_PMD at the time when this file is + * included. + * + * In order to populate an array, the user of this file must define this macro: + * RTE_PCI_DEV_ID_DECL(vendorID, deviceID). For example: + * + * @code + * struct device { + * int vend; + * int dev; + * }; + * + * struct device devices[] = { + * #define RTE_PCI_DEV_ID_DECL(vendorID, deviceID) {vend, dev}, + * #include + * }; + * @endcode + * + * Note that this file can be included multiple times within the same file. + */ + +#ifndef RTE_PCI_DEV_ID_DECL +#error "You must define RTE_PCI_DEV_ID_DECL before including rte_pci_dev_ids.h" +#endif + +#ifndef PCI_VENDOR_ID_INTEL +/** Vendor ID used by Intel devices */ +#define PCI_VENDOR_ID_INTEL 0x8086 +#endif + +/******************** Physical IGB devices from e1000_hw.h ********************/ +#ifdef RTE_LIBRTE_IGB_PMD + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I350_DA4 0x1546 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_FIBER) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_SERDES) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_QUAD_COPPER) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_NS) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_NS_SERDES) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_SERDES_QUAD) + +/* This device is the on-board NIC on some development boards. */ +#ifdef RTE_PCI_DEV_USE_82575EB_COPPER +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575EB_COPPER) +#endif + +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER) + +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_COPPER) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_FIBER) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_SERDES) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_SGMII) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_COPPER_DUAL) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_QUAD_FIBER) + +/* This device is the on-board NIC on some development boards. */ +#ifndef RTE_PCI_DEV_NO_USE_I350_COPPER +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_COPPER) +#endif + +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_FIBER) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_SERDES) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_SGMII) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_DA4) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SGMII) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SERDES) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SFP) + +#endif /* RTE_LIBRTE_IGB_PMD */ + + +/****************** Physical IXGBE devices from ixgbe_type.h ******************/ +#ifdef RTE_LIBRTE_IXGBE_PMD + +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_X540T 0x1528 + +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_SFP) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_FCOE) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599EN_SFP) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM) +RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T) + +#endif /* RTE_LIBRTE_IXGBE_PMD */ diff --git a/lib/librte_eal/common/include/rte_per_lcore.h b/lib/librte_eal/common/include/rte_per_lcore.h new file mode 100644 index 0000000000..08627dd8ee --- /dev/null +++ b/lib/librte_eal/common/include/rte_per_lcore.h @@ -0,0 +1,81 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_PER_LCORE_H_ +#define _RTE_PER_LCORE_H_ + +/** + * @file + * + * Per-lcore variables in RTE + * + * This file defines an API for instantiating per-lcore "global + * variables" that are environment-specific. Note that in all + * environments, a "shared variable" is the default when you use a + * global variable. + * + * Parts of this are execution environment specific. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifdef __DOXYGEN__ +/** + * Macro to define a per lcore variable "var" of type "type", don't + * use keywords like "static" or "volatile" in type, just prefix the + * whole macro. + */ +#define RTE_DEFINE_PER_LCORE(type, name) + +/** + * Macro to declare an extern per lcore variable "var" of type "type" + */ +#define RTE_DECLARE_PER_LCORE(type, name) + +/** + * Read/write the per-lcore variable value + */ +#define RTE_PER_LCORE(name) +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_PER_LCORE_H_ */ diff --git a/lib/librte_eal/common/include/rte_prefetch.h b/lib/librte_eal/common/include/rte_prefetch.h new file mode 100644 index 0000000000..0d2160287f --- /dev/null +++ b/lib/librte_eal/common/include/rte_prefetch.h @@ -0,0 +1,90 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_PREFETCH_H_ +#define _RTE_PREFETCH_H_ + +/** + * @file + * + * Prefetch operations. + * + * This file defines an API for prefetch macros / inline-functions, + * which are architecture-dependent. Prefetching occurs when a + * processor requests an instruction or data from memory to cache + * before it is actually needed, potentially speeding up the execution of the + * program. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Prefetch a cache line into all cache levels. + * @param p + * Address to prefetch + */ +static inline void rte_prefetch0(volatile void *p) +{ + asm volatile ("prefetcht0 %[p]" : [p] "+m" (*(volatile char *)p)); +} + +/** + * Prefetch a cache line into all cache levels except the 0th cache level. + * @param p + * Address to prefetch + */ +static inline void rte_prefetch1(volatile void *p) +{ + asm volatile ("prefetcht1 %[p]" : [p] "+m" (*(volatile char *)p)); +} + +/** + * Prefetch a cache line into all cache levels except the 0th and 1th cache + * levels. + * @param p + * Address to prefetch + */ +static inline void rte_prefetch2(volatile void *p) +{ + asm volatile ("prefetcht2 %[p]" : [p] "+m" (*(volatile char *)p)); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_PREFETCH_H_ */ diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h new file mode 100644 index 0000000000..a86906f370 --- /dev/null +++ b/lib/librte_eal/common/include/rte_random.h @@ -0,0 +1,93 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_RANDOM_H_ +#define _RTE_RANDOM_H_ + +/** + * @file + * + * Pseudo-random Generators in RTE + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * Seed the pseudo-random generator. + * + * The generator is automatically seeded by the EAL init with a timer + * value. It may need to be re-seeded by the user with a real random + * value. + * + * @param seedval + * The value of the seed. + */ +static inline void +rte_srand(uint64_t seedval) +{ + srand48((long unsigned int)seedval); +} + +/** + * Get a pseudo-random value. + * + * This function generates pseudo-random numbers using the linear + * congruential algorithm and 48-bit integer arithmetic, called twice + * to generate a 64-bit value. + * + * @return + * A pseudo-random value between 0 and (1<<64)-1. + */ +static inline uint64_t +rte_rand(void) +{ + uint64_t val; + val = lrand48(); + val <<= 32; + val += lrand48(); + return val; +} + +#ifdef __cplusplus +} +#endif + + +#endif /* _RTE_PER_LCORE_H_ */ diff --git a/lib/librte_eal/common/include/rte_rwlock.h b/lib/librte_eal/common/include/rte_rwlock.h new file mode 100644 index 0000000000..a0b5e01d9a --- /dev/null +++ b/lib/librte_eal/common/include/rte_rwlock.h @@ -0,0 +1,174 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_RWLOCK_H_ +#define _RTE_RWLOCK_H_ + +/** + * @file + * + * RTE Read-Write Locks + * + * This file defines an API for read-write locks. The lock is used to + * protect data that allows multiple readers in parallel, but only + * one writer. All readers are blocked until the writer is finished + * writing. + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * The rte_rwlock_t type. + * + * cnt is -1 when write lock is held, and > 0 when read locks are held. + */ +typedef struct { + volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */ +} rte_rwlock_t; + +/** + * A static rwlock initializer. + */ +#define RTE_RWLOCK_INITIALIZER { 0 } + +/** + * Initialize the rwlock to an unlocked state. + * + * @param rwl + * A pointer to the rwlock structure. + */ +static inline void +rte_rwlock_init(rte_rwlock_t *rwl) +{ + rwl->cnt = 0; +} + +/** + * Take a read lock. Loop until the lock is held. + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_read_lock(rte_rwlock_t *rwl) +{ + int32_t x; + int success = 0; + + while (success == 0) { + x = rwl->cnt; + /* write lock is held */ + if (x < 0) { + rte_pause(); + continue; + } + success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, + x, x + 1); + } +} + +/** + * Release a read lock. + * + * @param rwl + * A pointer to the rwlock structure. + */ +static inline void +rte_rwlock_read_unlock(rte_rwlock_t *rwl) +{ + /* in debug mode, we should check that rwl->cnt is > 0 */ + + /* same than atomic32_dec */ + asm volatile(MPLOCKED + "decl %[cnt]" + : [cnt] "=m" (rwl->cnt) /* output (0) */ + : "m" (rwl->cnt) /* input (1) */ + ); /* no clobber-list */ +} + +/** + * Take a write lock. Loop until the lock is held. + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_write_lock(rte_rwlock_t *rwl) +{ + int32_t x; + int success = 0; + + while (success == 0) { + x = rwl->cnt; + /* a lock is held */ + if (x != 0) { + rte_pause(); + continue; + } + success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt, + 0, -1); + } +} + +/** + * Release a write lock. + * + * @param rwl + * A pointer to a rwlock structure. + */ +static inline void +rte_rwlock_write_unlock(rte_rwlock_t *rwl) +{ + /* in debug mode, we should check that rwl->cnt is < 0 */ + + /* same than atomic32_inc */ + asm volatile(MPLOCKED + "incl %[cnt]" + : [cnt] "=m" (rwl->cnt) /* output (0) */ + : "m" (rwl->cnt) /* input (1) */ + ); /* no clobber-list */ +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_RWLOCK_H_ */ diff --git a/lib/librte_eal/common/include/rte_spinlock.h b/lib/librte_eal/common/include/rte_spinlock.h new file mode 100644 index 0000000000..7961809c87 --- /dev/null +++ b/lib/librte_eal/common/include/rte_spinlock.h @@ -0,0 +1,243 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_SPINLOCK_H_ +#define _RTE_SPINLOCK_H_ + +/** + * @file + * + * RTE Spinlocks + * + * This file defines an API for read-write locks, which are implemented + * in an architecture-specific way. This kind of lock simply waits in + * a loop repeatedly checking until the lock becomes available. + * + * All locks must be initialised before use, and only initialised once. + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * The rte_spinlock_t type. + */ +typedef struct { + volatile int locked; /**< lock status 0 = unlocked, 1 = locked */ +} rte_spinlock_t; + +/** + * A static spinlock initializer. + */ +#define RTE_SPINLOCK_INITIALIZER { 0 } + +/** + * Initialize the spinlock to an unlocked state. + * + * @param sl + * A pointer to the spinlock. + */ +static inline void +rte_spinlock_init(rte_spinlock_t *sl) +{ + sl->locked = 0; +} + +/** + * Take the spinlock. + * + * @param sl + * A pointer to the spinlock. + */ +static inline void +rte_spinlock_lock(rte_spinlock_t *sl) +{ + int lock_val = 1; + asm volatile ( + "1:\n" + "xchg %[locked], %[lv]\n" + "test %[lv], %[lv]\n" + "jz 3f\n" + "2:\n" + "pause\n" + "cmp $0, %[locked]\n" + "jnz 2b\n" + "jmp 1b\n" + "3:\n" + : [locked] "=m" (sl->locked), [lv] "=q" (lock_val) + : "[lv]" (lock_val) + : "memory"); +} + +/** + * Release the spinlock. + * + * @param sl + * A pointer to the spinlock. + */ +static inline void +rte_spinlock_unlock (rte_spinlock_t *sl) +{ + int unlock_val = 0; + asm volatile ( + "xchg %[locked], %[ulv]\n" + : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val) + : "[ulv]" (unlock_val) + : "memory"); +} + +/** + * Try to take the lock. + * + * @param sl + * A pointer to the spinlock. + * @return + * 1 if the lock is successfully taken; 0 otherwise. + */ +static inline int +rte_spinlock_trylock (rte_spinlock_t *sl) +{ + int lockval = 1; + + asm volatile ( + "xchg %[locked], %[lockval]" + : [locked] "=m" (sl->locked), [lockval] "=q" (lockval) + : "[lockval]" (lockval) + : "memory"); + + return (lockval == 0); +} + +/** + * Test if the lock is taken. + * + * @param sl + * A pointer to the spinlock. + * @return + * 1 if the lock is currently taken; 0 otherwise. + */ +static inline int rte_spinlock_is_locked (rte_spinlock_t *sl) +{ + return sl->locked; +} + +/** + * The rte_spinlock_recursive_t type. + */ +typedef struct { + rte_spinlock_t sl; /**< the actual spinlock */ + volatile int user; /**< core id using lock, -1 for unused */ + volatile int count; /**< count of time this lock has been called */ +} rte_spinlock_recursive_t; + +/** + * A static recursive spinlock initializer. + */ +#define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0} + +/** + * Initialize the recursive spinlock to an unlocked state. + * + * @param slr + * A pointer to the recursive spinlock. + */ +static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr) +{ + rte_spinlock_init(&slr->sl); + slr->user = -1; + slr->count = 0; +} + +/** + * Take the recursive spinlock. + * + * @param slr + * A pointer to the recursive spinlock. + */ +static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr) +{ + int id = rte_lcore_id(); + + if (slr->user != id) { + rte_spinlock_lock(&slr->sl); + slr->user = id; + } + slr->count++; +} +/** + * Release the recursive spinlock. + * + * @param slr + * A pointer to the recursive spinlock. + */ +static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr) +{ + if (--(slr->count) == 0) { + slr->user = -1; + rte_spinlock_unlock(&slr->sl); + } + +} + +/** + * Try to take the recursive lock. + * + * @param slr + * A pointer to the recursive spinlock. + * @return + * 1 if the lock is successfully taken; 0 otherwise. + */ +static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr) +{ + int id = rte_lcore_id(); + + if (slr->user != id) { + if (rte_spinlock_trylock(&slr->sl) == 0) + return 0; + slr->user = id; + } + slr->count++; + return 1; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_SPINLOCK_H_ */ diff --git a/lib/librte_eal/common/include/rte_string_fns.h b/lib/librte_eal/common/include/rte_string_fns.h new file mode 100644 index 0000000000..da3a3c99e8 --- /dev/null +++ b/lib/librte_eal/common/include/rte_string_fns.h @@ -0,0 +1,165 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/** + * @file + * + * Definitions of warnings for use of various insecure functions + */ + +#ifndef _RTE_STRING_FNS_H_ +#define _RTE_STRING_FNS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/** + * Safer version of snprintf that writes up to buflen characters to + * the output buffer and ensures that the resultant string is null-terminated, + * that is, it writes at most buflen-1 actual string characters to buffer. The + * return value is the number of characters which should be written to the + * buffer, so string truncation can be detected by the caller by checking if + * the return value is greater than or equal to the buflen. + * + * @param buffer + * The buffer into which the output is to be written + * + * @param buflen + * The size of the output buffer + * + * @param format + * The format string to be printed to the buffer + * + * @return + * The number of characters written to the buffer, or if the string has been + * truncated, the number of characters which would have been written had the + * buffer been sufficiently big. + * + */ +static inline int +rte_snprintf(char *buffer, int buflen, const char *format, ...) +{ + int len; + va_list ap; + + if (buffer == NULL && buflen != 0) + goto einval_error; + if (format == NULL) { + if (buflen > 0) + buffer[0] = '\0'; + goto einval_error; + } + + va_start(ap, format); + len = vsnprintf(buffer, buflen, format, ap); + va_end(ap); + if (len >= buflen && buflen > 0) + buffer[buflen - 1] = '\0'; + + return len; + +einval_error: + errno = EINVAL; + return -1; +} + + +/** + * Takes string "string" parameter and splits it at character "delim" + * up to maxtokens-1 times - to give "maxtokens" resulting tokens. Like + * strtok or strsep functions, this modifies its input string, by replacing + * instances of "delim" with '\0'. All resultant tokens are returned in the + * "tokens" array which must have enough entries to hold "maxtokens". + * + * @param string + * The input string to be split into tokens + * + * @param stringlen + * The max length of the input buffer + * + * @param tokens + * The array to hold the pointers to the tokens in the string + * + * @param maxtokens + * The number of elements in the tokens array. At most, maxtokens-1 splits + * of the string will be done. + * + * @param delim + * The character on which the split of the data will be done + * + * @return + * The number of tokens in the tokens array. + */ +static inline int +rte_strsplit(char *string, int stringlen, + char **tokens, int maxtokens, char delim) +{ + int i, tok = 0; + int tokstart = 1; /* first token is right at start of string */ + + if (string == NULL || tokens == NULL) + goto einval_error; + + for (i = 0; i < stringlen; i++) { + if (string[i] == '\0' || tok >= maxtokens) + break; + if (tokstart) { + tokstart = 0; + tokens[tok++] = &string[i]; + } + if (string[i] == delim) { + string[i] = '\0'; + tokstart = 1; + } + } + return tok; + +einval_error: + errno = EINVAL; + return -1; +} + +#ifdef __cplusplus +} +#endif + + +#endif /* RTE_STRING_FNS_H */ diff --git a/lib/librte_eal/common/include/rte_tailq.h b/lib/librte_eal/common/include/rte_tailq.h new file mode 100644 index 0000000000..db13013b5b --- /dev/null +++ b/lib/librte_eal/common/include/rte_tailq.h @@ -0,0 +1,146 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_TAILQ_H_ +#define _RTE_TAILQ_H_ + +/** + * @file + * + */ + + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifndef __KERNEL__ +/** dummy structure type used by the rte_tailq APIs */ +struct rte_dummy { + TAILQ_ENTRY(rte_dummy) next; /**< Pointer entries for a tailq list */ +}; +/** dummy */ +TAILQ_HEAD(rte_dummy_head, rte_dummy); + +#define RTE_TAILQ_NAMESIZE 32 + +/** + * The structure defining a tailq header entry for storing + * in the rte_config structure in shared memory. Each tailq + * is identified by name. + * Any library storing a set of objects e.g. rings, mempools, hash-tables, + * is recommended to use an entry here, so as to make it easy for + * a multi-process app to find already-created elements in shared memory. + */ +struct rte_tailq_head { + struct rte_dummy_head tailq_head; /**< NOTE: must be first element */ + char qname[RTE_TAILQ_NAMESIZE]; /**< Queue name */ +}; +#else +struct rte_tailq_head {}; +#endif + +/** + * Utility macro to make reserving a tailqueue for a particular struct easier. + * + * @param name + * The name to be given to the tailq - used by lookup to find it later + * + * @param struct_name + * The name of the list type we are using. (Generally this is the same as the + * first parameter passed to TAILQ_HEAD macro) + * + * @return + * The return value from rte_eal_tailq_reserve, typecast to the appropriate + * structure pointer type. + * NULL on error, since the tailq_head is the first + * element in the rte_tailq_head structure. + */ +#define RTE_TAILQ_RESERVE(name, struct_name) \ + (struct struct_name *)(&rte_eal_tailq_reserve(name)->tailq_head) + +/** + * Utility macro to make looking up a tailqueue for a particular struct easier. + * + * @param name + * The name of the tailq + * + * @param struct_name + * The name of the list type we are using. (Generally this is the same as the + * first parameter passed to TAILQ_HEAD macro) + * + * @return + * The return value from rte_eal_tailq_lookup, typecast to the appropriate + * structure pointer type. + * NULL on error, since the tailq_head is the first + * element in the rte_tailq_head structure. + */ +#define RTE_TAILQ_LOOKUP(name, struct_name) \ + (struct struct_name *)(&rte_eal_tailq_lookup(name)->tailq_head) + +/** + * Reserve a slot in the tailq list for a particular tailq header + * Note: this function, along with rte_tailq_lookup, is not multi-thread safe, + * and both these functions should only be called from a single thread at a time + * + * @param name + * The name to be given to the tail queue. + * @return + * A pointer to the newly reserved tailq entry + */ +struct rte_tailq_head *rte_eal_tailq_reserve(const char *name); + +/** + * Lookup for a tail queue. + * + * Get a pointer to a tail queue header of an already reserved tail + * queue identified by the name given as an argument. + * Note: this function, along with rte_tailq_reserve, is not multi-thread safe, + * and both these functions should only be called from a single thread at a time + * + * @param name + * The name of the queue. + * @return + * A pointer to the tail queue head structure. + */ +struct rte_tailq_head *rte_eal_tailq_lookup(const char *name); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_TAILQ_H_ */ diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h new file mode 100644 index 0000000000..b29c1d3aab --- /dev/null +++ b/lib/librte_eal/common/include/rte_version.h @@ -0,0 +1,85 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/** + * @file + * Definitions of Intel(R) DPDK version numbers + */ + +#ifndef _RTE_VERSION_H_ +#define _RTE_VERSION_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * Major version number i.e. the x in x.y.z + */ +#define RTE_VER_MAJOR 1 + +/** + * Minor version number i.e. the y in x.y.z + */ +#define RTE_VER_MINOR 2 + +/** + * Patch level number i.e. the z in x.y.z + */ +#define RTE_VER_PATCH_LEVEL 3 + +#define RTE_VER_PREFIX "RTE" + +/** + * Function returning string of version number: "RTE x.y.z" + * @return + * string + */ +static inline const char * +rte_version(void) { + return RTE_VER_PREFIX" " + RTE_STR(RTE_VER_MAJOR)"." + RTE_STR(RTE_VER_MINOR)"." + RTE_STR(RTE_VER_PATCH_LEVEL); +} + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_VERSION_H */ diff --git a/lib/librte_eal/common/include/rte_warnings.h b/lib/librte_eal/common/include/rte_warnings.h new file mode 100644 index 0000000000..eb003204e4 --- /dev/null +++ b/lib/librte_eal/common/include/rte_warnings.h @@ -0,0 +1,88 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/** + * @file + * Definitions of warnings for use of various insecure functions + */ + +#ifndef _RTE_WARNINGS_H_ +#define _RTE_WARNINGS_H_ + +#ifdef RTE_INSECURE_FUNCTION_WARNING + +/* we need to include all used standard header files so that they appear + * _before_ we poison the function names. + */ + +#include +#include +#include +#include +#include +#include + +/* rte_snprintf uses snprintf, so include its definition before we poison the + * functions, otherwise we'll get an error in it. */ +#include + +/* the following function are deemed not fully secure for use e.g. they + * do not always null-terminate arguments */ +#pragma GCC poison sprintf strtok snprintf vsnprintf +#pragma GCC poison strlen strcpy strcat +#pragma GCC poison sscanf + +/* other unsafe functions may be implemented as macros so just undef them */ +#ifdef strsep +#undef strsep +#else +#pragma GCC poison strsep +#endif + +#ifdef strncpy +#undef strncpy +#else +#pragma GCC poison strncpy +#endif + +#ifdef strncat +#undef strncat +#else +#pragma GCC poison strncat +#endif + +#endif + +#endif /* RTE_WARNINGS_H */ diff --git a/lib/librte_eal/common/include/x86_64/arch/rte_atomic.h b/lib/librte_eal/common/include/x86_64/arch/rte_atomic.h new file mode 100644 index 0000000000..a335c7f582 --- /dev/null +++ b/lib/librte_eal/common/include/x86_64/arch/rte_atomic.h @@ -0,0 +1,943 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Inspired from FreeBSD src/sys/amd64/include/atomic.h + * Copyright (c) 1998 Doug Rabson + * All rights reserved. + */ + +#ifndef _RTE_ATOMIC_H_ +#error "don't include this file directly, please include generic " +#endif + +#ifndef _RTE_X86_64_ATOMIC_H_ +#define _RTE_X86_64_ATOMIC_H_ + +/** + * @file + * Atomic Operations on x86_64 + */ + +#if RTE_MAX_LCORE == 1 +#define MPLOCKED /**< No need to insert MP lock prefix. */ +#else +#define MPLOCKED "lock ; " /**< Insert MP lock prefix. */ +#endif + +/** + * General memory barrier. + * + * Guarantees that the LOAD and STORE operations generated before the + * barrier occur before the LOAD and STORE operations generated after. + */ +#define rte_mb() asm volatile("mfence;" : : : "memory") + +/** + * Write memory barrier. + * + * Guarantees that the STORE operations generated before the barrier + * occur before the STORE operations generated after. + */ +#define rte_wmb() asm volatile("sfence;" : : : "memory") + +/** + * Read memory barrier. + * + * Guarantees that the LOAD operations generated before the barrier + * occur before the LOAD operations generated after. + */ +#define rte_rmb() asm volatile("lfence;" : : : "memory") + +/*------------------------- 16 bit atomic operations -------------------------*/ + +/** + * Atomic compare and set. + * + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 16-bit words) + * + * @param dst + * The destination location into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) +{ + uint8_t res; + + asm volatile( + MPLOCKED + "cmpxchgw %[src], %[dst];" + "sete %[res];" + : [res] "=a" (res), /* output */ + [dst] "=m" (*dst) + : [src] "r" (src), /* input */ + "a" (exp), + "m" (*dst) + : "memory"); /* no-clobber list */ + return res; +} + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int16_t cnt; /**< An internal counter value. */ +} rte_atomic16_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC16_INIT(val) { (val) } + +/** + * Initialize an atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_init(rte_atomic16_t *v) +{ + v->cnt = 0; +} + +/** + * Atomically read a 16-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int16_t +rte_atomic16_read(const rte_atomic16_t *v) +{ + return v->cnt; +} + +/** + * Atomically set a counter to a 16-bit value. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value for the counter. + */ +static inline void +rte_atomic16_set(rte_atomic16_t *v, int16_t new_value) +{ + v->cnt = new_value; +} + +/** + * Atomically add a 16-bit value to an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic16_add(rte_atomic16_t *v, int16_t inc) +{ + asm volatile( + MPLOCKED + "addw %[inc], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [inc] "ir" (inc), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically subtract a 16-bit value from an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic16_sub(rte_atomic16_t *v, int16_t dec) +{ + asm volatile( + MPLOCKED + "subw %[dec], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [dec] "ir" (dec), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically increment a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_inc(rte_atomic16_t *v) +{ + asm volatile( + MPLOCKED + "incw %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically decrement a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic16_dec(rte_atomic16_t *v) +{ + asm volatile( + MPLOCKED + "decw %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically add a 16-bit value to a counter and return the result. + * + * Atomically adds the 16-bits value (inc) to the atomic counter (v) and + * returns the value of v after addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int16_t +rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc) +{ + int16_t prev = inc; + + asm volatile( + MPLOCKED + "xaddw %[prev], %[cnt]" + : [prev] "+r" (prev), /* output */ + [cnt] "=m" (v->cnt) + : "m" (v->cnt) /* input */ + ); + return (int16_t)(prev + inc); +} + +/** + * Atomically subtract a 16-bit value from a counter and return + * the result. + * + * Atomically subtracts the 16-bit value (inc) from the atomic counter + * (v) and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int16_t +rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec) +{ + return rte_atomic16_add_return(v, (int16_t)-dec); +} + +/** + * Atomically increment a 16-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the increment operation is 0; false otherwise. + */ +static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) +{ + uint8_t ret; + + asm volatile( + MPLOCKED + "incw %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return (ret != 0); +} + +/** + * Atomically decrement a 16-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the decrement operation is 0; false otherwise. + */ +static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) +{ + uint8_t ret; + + asm volatile(MPLOCKED + "decw %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return (ret != 0); +} + +/** + * Atomically test and set a 16-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) +{ + return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1); +} + +/** + * Atomically set a 16-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic16_clear(rte_atomic16_t *v) +{ + v->cnt = 0; +} + +/*------------------------- 32 bit atomic operations -------------------------*/ + +/** + * Atomic compare and set. + * + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 32-bit words) + * + * @param dst + * The destination location into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) +{ + uint8_t res; + + asm volatile( + MPLOCKED + "cmpxchgl %[src], %[dst];" + "sete %[res];" + : [res] "=a" (res), /* output */ + [dst] "=m" (*dst) + : [src] "r" (src), /* input */ + "a" (exp), + "m" (*dst) + : "memory"); /* no-clobber list */ + return res; +} + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int32_t cnt; /**< An internal counter value. */ +} rte_atomic32_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC32_INIT(val) { (val) } + +/** + * Initialize an atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_init(rte_atomic32_t *v) +{ + v->cnt = 0; +} + +/** + * Atomically read a 32-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int32_t +rte_atomic32_read(const rte_atomic32_t *v) +{ + return v->cnt; +} + +/** + * Atomically set a counter to a 32-bit value. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value for the counter. + */ +static inline void +rte_atomic32_set(rte_atomic32_t *v, int32_t new_value) +{ + v->cnt = new_value; +} + +/** + * Atomically add a 32-bit value to an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic32_add(rte_atomic32_t *v, int32_t inc) +{ + asm volatile( + MPLOCKED + "addl %[inc], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [inc] "ir" (inc), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically subtract a 32-bit value from an atomic counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic32_sub(rte_atomic32_t *v, int32_t dec) +{ + asm volatile( + MPLOCKED + "subl %[dec], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [dec] "ir" (dec), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically increment a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_inc(rte_atomic32_t *v) +{ + asm volatile( + MPLOCKED + "incl %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically decrement a counter by one. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic32_dec(rte_atomic32_t *v) +{ + asm volatile( + MPLOCKED + "decl %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically add a 32-bit value to a counter and return the result. + * + * Atomically adds the 32-bits value (inc) to the atomic counter (v) and + * returns the value of v after addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int32_t +rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc) +{ + int32_t prev = inc; + + asm volatile( + MPLOCKED + "xaddl %[prev], %[cnt]" + : [prev] "+r" (prev), /* output */ + [cnt] "=m" (v->cnt) + : "m" (v->cnt) /* input */ + ); + return (int32_t)(prev + inc); +} + +/** + * Atomically subtract a 32-bit value from a counter and return + * the result. + * + * Atomically subtracts the 32-bit value (inc) from the atomic counter + * (v) and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int32_t +rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec) +{ + return rte_atomic32_add_return(v, -dec); +} + +/** + * Atomically increment a 32-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the increment operation is 0; false otherwise. + */ +static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) +{ + uint8_t ret; + + asm volatile( + MPLOCKED + "incl %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return (ret != 0); +} + +/** + * Atomically decrement a 32-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the decrement operation is 0; false otherwise. + */ +static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) +{ + uint8_t ret; + + asm volatile(MPLOCKED + "decl %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return (ret != 0); +} + +/** + * Atomically test and set a 32-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) +{ + return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1); +} + +/** + * Atomically set a 32-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic32_clear(rte_atomic32_t *v) +{ + v->cnt = 0; +} + +/*------------------------- 64 bit atomic operations -------------------------*/ + +/** + * An atomic compare and set function used by the mutex functions. + * (atomic) equivalent to: + * if (*dst == exp) + * *dst = src (all 64-bit words) + * + * @param dst + * The destination into which the value will be written. + * @param exp + * The expected value. + * @param src + * The new value. + * @return + * Non-zero on success; 0 on failure. + */ +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) +{ + uint8_t res; + + asm volatile( + MPLOCKED + "cmpxchgq %[src], %[dst];" + "sete %[res];" + : [res] "=a" (res), /* output */ + [dst] "=m" (*dst) + : [src] "r" (src), /* input */ + "a" (exp), + "m" (*dst) + : "memory"); /* no-clobber list */ + + return res; +} + +/** + * The atomic counter structure. + */ +typedef struct { + volatile int64_t cnt; /**< Internal counter value. */ +} rte_atomic64_t; + +/** + * Static initializer for an atomic counter. + */ +#define RTE_ATOMIC64_INIT(val) { (val) } + +/** + * Initialize the atomic counter. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_init(rte_atomic64_t *v) +{ + v->cnt = 0; +} + +/** + * Atomically read a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @return + * The value of the counter. + */ +static inline int64_t +rte_atomic64_read(rte_atomic64_t *v) +{ + return v->cnt; +} + +/** + * Atomically set a 64-bit counter. + * + * @param v + * A pointer to the atomic counter. + * @param new_value + * The new value of the counter. + */ +static inline void +rte_atomic64_set(rte_atomic64_t *v, int64_t new_value) +{ + v->cnt = new_value; +} + +/** + * Atomically add a 64-bit value to a counter. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + */ +static inline void +rte_atomic64_add(rte_atomic64_t *v, int64_t inc) +{ + asm volatile( + MPLOCKED + "addq %[inc], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [inc] "ir" (inc), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically subtract a 64-bit value from a counter. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + */ +static inline void +rte_atomic64_sub(rte_atomic64_t *v, int64_t dec) +{ + asm volatile( + MPLOCKED + "subq %[dec], %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : [dec] "ir" (dec), /* input */ + "m" (v->cnt) + ); +} + +/** + * Atomically increment a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_inc(rte_atomic64_t *v) +{ + asm volatile( + MPLOCKED + "incq %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void +rte_atomic64_dec(rte_atomic64_t *v) +{ + asm volatile( + MPLOCKED + "decq %[cnt]" + : [cnt] "=m" (v->cnt) /* output */ + : "m" (v->cnt) /* input */ + ); +} + +/** + * Add a 64-bit value to an atomic counter and return the result. + * + * Atomically adds the 64-bit value (inc) to the atomic counter (v) and + * returns the value of v after the addition. + * + * @param v + * A pointer to the atomic counter. + * @param inc + * The value to be added to the counter. + * @return + * The value of v after the addition. + */ +static inline int64_t +rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc) +{ + int64_t prev = inc; + + asm volatile( + MPLOCKED + "xaddq %[prev], %[cnt]" + : [prev] "+r" (prev), /* output */ + [cnt] "=m" (v->cnt) + : "m" (v->cnt) /* input */ + ); + return prev + inc; +} + +/** + * Subtract a 64-bit value from an atomic counter and return the result. + * + * Atomically subtracts the 64-bit value (dec) from the atomic counter (v) + * and returns the value of v after the subtraction. + * + * @param v + * A pointer to the atomic counter. + * @param dec + * The value to be subtracted from the counter. + * @return + * The value of v after the subtraction. + */ +static inline int64_t +rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec) +{ + return rte_atomic64_add_return(v, -dec); +} + +/** + * Atomically increment a 64-bit counter by one and test. + * + * Atomically increments the atomic counter (v) by one and returns + * true if the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after the addition is 0; false otherwise. + */ +static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v) +{ + uint8_t ret; + + asm volatile( + MPLOCKED + "incq %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + + return ret != 0; +} + +/** + * Atomically decrement a 64-bit counter by one and test. + * + * Atomically decrements the atomic counter (v) by one and returns true if + * the result is 0, or false in all other cases. + * + * @param v + * A pointer to the atomic counter. + * @return + * True if the result after subtraction is 0; false otherwise. + */ +static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v) +{ + uint8_t ret; + + asm volatile( + MPLOCKED + "decq %[cnt] ; " + "sete %[ret]" + : [cnt] "+m" (v->cnt), /* output */ + [ret] "=qm" (ret) + ); + return ret != 0; +} + +/** + * Atomically test and set a 64-bit atomic counter. + * + * If the counter value is already set, return 0 (failed). Otherwise, set + * the counter value to 1 and return 1 (success). + * + * @param v + * A pointer to the atomic counter. + * @return + * 0 if failed; else 1, success. + */ +static inline int rte_atomic64_test_and_set(rte_atomic64_t *v) +{ + return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1); +} + +/** + * Atomically set a 64-bit counter to 0. + * + * @param v + * A pointer to the atomic counter. + */ +static inline void rte_atomic64_clear(rte_atomic64_t *v) +{ + v->cnt = 0; +} + +#endif /* _RTE_X86_64_ATOMIC_H_ */ diff --git a/lib/librte_eal/linuxapp/Makefile b/lib/librte_eal/linuxapp/Makefile new file mode 100644 index 0000000000..17b422206d --- /dev/null +++ b/lib/librte_eal/linuxapp/Makefile @@ -0,0 +1,39 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += igb_uio +DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile new file mode 100644 index 0000000000..c600cbfc14 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/Makefile @@ -0,0 +1,91 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +LIB = librte_eal.a + +VPATH += $(RTE_SDK)/lib/librte_eal/common + +CFLAGS += -I$(SRCDIR)/include +CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include +CFLAGS += -I$(RTE_SDK)/lib/librte_ring +CFLAGS += -I$(RTE_SDK)/lib/librte_mempool +CFLAGS += -I$(RTE_SDK)/lib/librte_malloc +CFLAGS += -I$(RTE_SDK)/lib/librte_ether +CFLAGS += $(WERROR_FLAGS) -O3 + +# specific to linuxapp exec-env +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) := eal.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_hugepage_info.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_memory.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_thread.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_log.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_debug.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_lcore.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_hpet.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_interrupts.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_alarm.c + +# from common dir +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_memzone.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_log.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_launch.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_pci.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_memory.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_tailqs.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_errno.c +SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_cpuflags.c + +CFLAGS_eal.o := -D_GNU_SOURCE +CFLAGS_eal_thread.o := -D_GNU_SOURCE +CFLAGS_eal_log.o := -D_GNU_SOURCE +CFLAGS_eal_common_log.o := -D_GNU_SOURCE + +# workaround for a gcc bug with noreturn attribute +# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603 +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_eal_thread.o += -Wno-return-type +CFLAGS_eal_hpet.o += -Wno-return-type +endif + +INC := rte_per_lcore.h rte_lcore.h rte_interrupts.h + +SYMLINK-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP)-include/exec-env := \ + $(addprefix include/exec-env/,$(INC)) + +DEPDIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += lib/librte_eal/common + +include $(RTE_SDK)/mk/rte.lib.mk + diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c new file mode 100644 index 0000000000..8d82cc3b72 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal.c @@ -0,0 +1,620 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" +#include "eal_thread.h" +#include "eal_internal_cfg.h" +#include "eal_fs_paths.h" +#include "eal_hugepages.h" + +#define OPT_HUGE_DIR "huge-dir" +#define OPT_PROC_TYPE "proc-type" +#define OPT_NO_SHCONF "no-shconf" +#define OPT_NO_HPET "no-hpet" +#define OPT_NO_PCI "no-pci" +#define OPT_NO_HUGE "no-huge" +#define OPT_FILE_PREFIX "file-prefix" + +#define RTE_EAL_BLACKLIST_SIZE 0x100 + +#define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL) + +#define GET_BLACKLIST_FIELD(in, fd, lim, dlm) \ +{ \ + unsigned long val; \ + char *end; \ + errno = 0; \ + val = strtoul((in), &end, 16); \ + if (errno != 0 || end[0] != (dlm) || val > (lim)) \ + return (-EINVAL); \ + (fd) = (typeof (fd))val; \ + (in) = end + 1; \ +} + +/* early configuration structure, when memory config is not mmapped */ +static struct rte_mem_config early_mem_config; + +/* define fd variable here, because file needs to be kept open for the + * duration of the program, as we hold a write lock on it in the primary proc */ +static int mem_cfg_fd = -1; + +static struct flock wr_lock = { + .l_type = F_WRLCK, + .l_whence = SEEK_SET, + .l_start = offsetof(struct rte_mem_config, memseg), + .l_len = sizeof(early_mem_config.memseg), +}; + +/* Address of global and public configuration */ +static struct rte_config rte_config = { + .mem_config = &early_mem_config, +}; + +static struct rte_pci_addr eal_dev_blacklist[RTE_EAL_BLACKLIST_SIZE]; + +/* internal configuration (per-core) */ +struct lcore_config lcore_config[RTE_MAX_LCORE]; + +/* internal configuration */ +struct internal_config internal_config; + +/* Return a pointer to the configuration structure */ +struct rte_config * +rte_eal_get_configuration(void) +{ + return &rte_config; +} + +/* create memory configuration in shared/mmap memory. Take out + * a write lock on the memsegs, so we can auto-detect primary/secondary. + * This means we never close the file while running (auto-close on exit). + * We also don't lock the whole file, so that in future we can use read-locks + * on other parts, e.g. memzones, to detect if there are running secondary + * processes. */ +static void +rte_eal_config_create(void) +{ + void *rte_mem_cfg_addr; + int retval; + + const char *pathname = eal_runtime_config_path(); + + if (internal_config.no_shconf) + return; + + if (mem_cfg_fd < 0){ + mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660); + if (mem_cfg_fd < 0) + rte_panic("Cannot open '%s' for rte_mem_config\n", pathname); + } + + retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config)); + if (retval < 0){ + close(mem_cfg_fd); + rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname); + } + + retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock); + if (retval < 0){ + close(mem_cfg_fd); + rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary " + "process running?\n", pathname); + } + + rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config), + PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0); + + if (rte_mem_cfg_addr == MAP_FAILED){ + rte_panic("Cannot mmap memory for rte_config\n"); + } + rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr; + memcpy(rte_config.mem_config, &early_mem_config, + sizeof(early_mem_config)); +} + +/* attach to an existing shared memory config */ +static void +rte_eal_config_attach(void) +{ + void *rte_mem_cfg_addr; + const char *pathname = eal_runtime_config_path(); + + if (internal_config.no_shconf) + return; + + if (mem_cfg_fd < 0){ + mem_cfg_fd = open(pathname, O_RDONLY); + if (mem_cfg_fd < 0) + rte_panic("Cannot open '%s' for rte_mem_config\n", pathname); + } + + rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config), PROT_READ, + MAP_SHARED, mem_cfg_fd, 0); + close(mem_cfg_fd); + if (rte_mem_cfg_addr == MAP_FAILED) + rte_panic("Cannot mmap memory for rte_config\n"); + + rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr; +} + +/* Detect if we are a primary or a secondary process */ +static enum rte_proc_type_t +eal_proc_type_detect(void) +{ + enum rte_proc_type_t ptype = RTE_PROC_PRIMARY; + const char *pathname = eal_runtime_config_path(); + + /* if we can open the file but not get a write-lock we are a secondary + * process. NOTE: if we get a file handle back, we keep that open + * and don't close it to prevent a race condition between multiple opens */ + if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) && + (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0)) + ptype = RTE_PROC_SECONDARY; + + RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n", + ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY"); + + return ptype; +} + +/* Sets up rte_config structure with the pointer to shared memory config.*/ +static void +rte_config_init(void) +{ + /* set the magic in configuration structure */ + rte_config.magic = RTE_MAGIC; + rte_config.process_type = (internal_config.process_type == RTE_PROC_AUTO) ? + eal_proc_type_detect() : /* for auto, detect the type */ + internal_config.process_type; /* otherwise use what's already set */ + + switch (rte_config.process_type){ + case RTE_PROC_PRIMARY: + rte_eal_config_create(); + break; + case RTE_PROC_SECONDARY: + rte_eal_config_attach(); + break; + case RTE_PROC_AUTO: + case RTE_PROC_INVALID: + rte_panic("Invalid process type\n"); + } +} + +/* display usage */ +static void +eal_usage(const char *prgname) +{ + printf("\nUsage: %s -c COREMASK -n NUM [-m NB] [-r NUM] [-b ]" + "[--proc-type primary|secondary|auto] \n\n" + "EAL options:\n" + " -c COREMASK: A hexadecimal bitmask of cores to run on\n" + " -n NUM : Number of memory channels\n" + " -v : Display version information on startup\n" + " -b : to prevent EAL from using specified PCI device\n" + " (multiple -b options are alowed)\n" + " -m MB : memory to allocate (default = size of hugemem)\n" + " -r NUM : force number of memory ranks (don't detect)\n" + " --"OPT_HUGE_DIR" : directory where hugetlbfs is mounted\n" + " --"OPT_PROC_TYPE": type of this process\n" + " --"OPT_FILE_PREFIX": prefix for hugepage filenames\n" + "\nEAL options for DEBUG use only:\n" + " --"OPT_NO_HUGE" : use malloc instead of hugetlbfs\n" + " --"OPT_NO_PCI" : disable pci\n" + " --"OPT_NO_HPET" : disable hpet\n" + " --"OPT_NO_SHCONF": no shared config (mmap'd files)\n\n", + prgname); +} + +/* + * Parse the coremask given as argument (hexadecimal string) and fill + * the global configuration (core role and core count) with the parsed + * value. + */ +static int +eal_parse_coremask(const char *coremask) +{ + struct rte_config *cfg = rte_eal_get_configuration(); + unsigned i; + char *end = NULL; + unsigned long long cm; + unsigned count = 0; + + /* parse hexadecimal string */ + cm = strtoull(coremask, &end, 16); + if ((coremask[0] == '\0') || (end == NULL) || (*end != '\0') || (cm == 0)) + return -1; + + RTE_LOG(DEBUG, EAL, "coremask set to %llx\n", cm); + /* set core role and core count */ + for (i = 0; i < RTE_MAX_LCORE; i++) { + if ((1ULL << i) & cm) { + if (count == 0) + cfg->master_lcore = i; + cfg->lcore_role[i] = ROLE_RTE; + count++; + } + else { + cfg->lcore_role[i] = ROLE_OFF; + } + } + return 0; +} + +static inline uint64_t +eal_get_hugepage_mem_size(void) +{ + uint64_t size = 0; + unsigned i; + + for (i = 0; i < internal_config.num_hugepage_sizes; i++){ + struct hugepage_info *hpi = &internal_config.hugepage_info[i]; + if (hpi->hugedir != NULL) + size += hpi->hugepage_sz * hpi->num_pages; + } + + return (size); +} + +static enum rte_proc_type_t +eal_parse_proc_type(const char *arg) +{ + if (strncasecmp(arg, "primary", sizeof("primary")) == 0) + return RTE_PROC_PRIMARY; + if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0) + return RTE_PROC_SECONDARY; + if (strncasecmp(arg, "auto", sizeof("auto")) == 0) + return RTE_PROC_AUTO; + + return RTE_PROC_INVALID; +} + +static int +eal_parse_blacklist(const char *input, struct rte_pci_addr *dev2bl) +{ + GET_BLACKLIST_FIELD(input, dev2bl->domain, UINT16_MAX, ':'); + GET_BLACKLIST_FIELD(input, dev2bl->bus, UINT8_MAX, ':'); + GET_BLACKLIST_FIELD(input, dev2bl->devid, UINT8_MAX, '.'); + GET_BLACKLIST_FIELD(input, dev2bl->function, UINT8_MAX, 0); + return (0); +} + +static ssize_t +eal_parse_blacklist_opt(const char *optarg, size_t idx) +{ + if (idx >= sizeof (eal_dev_blacklist) / sizeof (eal_dev_blacklist[0])) { + RTE_LOG(ERR, EAL, + "%s - too many devices to blacklist...\n", + optarg); + return (-EINVAL); + } else if (eal_parse_blacklist(optarg, eal_dev_blacklist + idx) != 0) { + RTE_LOG(ERR, EAL, + "%s - invalid device to blacklist...\n", + optarg); + return (-EINVAL); + } + + idx += 1; + return (idx); +} + + +/* Parse the argument given in the command line of the application */ +static int +eal_parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + int coremask_ok = 0; + ssize_t blacklist_index = 0;; + char *prgname = argv[0]; + static struct option lgopts[] = { + {OPT_NO_HUGE, 0, 0, 0}, + {OPT_NO_PCI, 0, 0, 0}, + {OPT_NO_HPET, 0, 0, 0}, + {OPT_HUGE_DIR, 1, 0, 0}, + {OPT_NO_SHCONF, 0, 0, 0}, + {OPT_PROC_TYPE, 1, 0, 0}, + {OPT_FILE_PREFIX, 1, 0, 0}, + {0, 0, 0, 0} + }; + + argvopt = argv; + + internal_config.memory = 0; + internal_config.force_nrank = 0; + internal_config.force_nchannel = 0; + internal_config.hugefile_prefix = HUGEFILE_PREFIX_DEFAULT; + internal_config.hugepage_dir = NULL; +#ifdef RTE_LIBEAL_USE_HPET + internal_config.no_hpet = 0; +#else + internal_config.no_hpet = 1; +#endif + + while ((opt = getopt_long(argc, argvopt, "b:c:m:n:r:v", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* blacklist */ + case 'b': + if ((blacklist_index = eal_parse_blacklist_opt(optarg, + blacklist_index)) < 0) { + eal_usage(prgname); + return (-1); + } + break; + /* coremask */ + case 'c': + if (eal_parse_coremask(optarg) < 0) { + RTE_LOG(ERR, EAL, "invalid coremask\n"); + eal_usage(prgname); + return -1; + } + coremask_ok = 1; + break; + /* size of memory */ + case 'm': + internal_config.memory = atoi(optarg); + internal_config.memory *= 1024ULL; + internal_config.memory *= 1024ULL; + break; + /* force number of channels */ + case 'n': + internal_config.force_nchannel = atoi(optarg); + if (internal_config.force_nchannel == 0 || + internal_config.force_nchannel > 4) { + RTE_LOG(ERR, EAL, "invalid channel number\n"); + eal_usage(prgname); + return -1; + } + break; + /* force number of ranks */ + case 'r': + internal_config.force_nrank = atoi(optarg); + if (internal_config.force_nrank == 0 || + internal_config.force_nrank > 16) { + RTE_LOG(ERR, EAL, "invalid rank number\n"); + eal_usage(prgname); + return -1; + } + break; + case 'v': + /* since message is explicitly requested by user, we + * write message at highest log level so it can always be seen + * even if info or warning messages are disabled */ + RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version()); + break; + + /* long options */ + case 0: + if (!strcmp(lgopts[option_index].name, OPT_NO_HUGE)) { + internal_config.no_hugetlbfs = 1; + } + else if (!strcmp(lgopts[option_index].name, OPT_NO_PCI)) { + internal_config.no_pci = 1; + } + else if (!strcmp(lgopts[option_index].name, OPT_NO_HPET)) { + internal_config.no_hpet = 1; + } + else if (!strcmp(lgopts[option_index].name, OPT_NO_SHCONF)) { + internal_config.no_shconf = 1; + } + else if (!strcmp(lgopts[option_index].name, OPT_HUGE_DIR)) { + internal_config.hugepage_dir = optarg; + } + else if (!strcmp(lgopts[option_index].name, OPT_PROC_TYPE)) { + internal_config.process_type = eal_parse_proc_type(optarg); + } + else if (!strcmp(lgopts[option_index].name, OPT_FILE_PREFIX)) { + internal_config.hugefile_prefix = optarg; + } + break; + + default: + eal_usage(prgname); + return -1; + } + } + + /* sanity checks */ + if (!coremask_ok) { + RTE_LOG(ERR, EAL, "coremask not specified\n"); + eal_usage(prgname); + return -1; + } + if (internal_config.process_type == RTE_PROC_AUTO){ + internal_config.process_type = eal_proc_type_detect(); + } + if (internal_config.process_type == RTE_PROC_INVALID){ + RTE_LOG(ERR, EAL, "Invalid process type specified\n"); + eal_usage(prgname); + return -1; + } + if (internal_config.process_type == RTE_PROC_PRIMARY && + internal_config.force_nchannel == 0) { + RTE_LOG(ERR, EAL, "Number of memory channels (-n) not specified\n"); + eal_usage(prgname); + return -1; + } + if (index(internal_config.hugefile_prefix,'%') != NULL){ + RTE_LOG(ERR, EAL, "Invalid char, '%%', in '"OPT_FILE_PREFIX"' option\n"); + eal_usage(prgname); + return -1; + } + + if (blacklist_index > 0) + rte_eal_pci_set_blacklist(eal_dev_blacklist, blacklist_index); + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +/* Launch threads, called at application init(). */ +int +rte_eal_init(int argc, char **argv) +{ + int i, fctret, ret; + pthread_t thread_id; + + thread_id = pthread_self(); + + if (rte_eal_log_early_init() < 0) + rte_panic("Cannot init early logs\n"); + + fctret = eal_parse_args(argc, argv); + if (fctret < 0) + exit(1); + + if (eal_hugepage_info_init() < 0) + rte_panic("Cannot get hugepage information\n"); + + if (internal_config.memory == 0) { + if (internal_config.no_hugetlbfs) + internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE; + else + internal_config.memory = eal_get_hugepage_mem_size(); + } + + rte_srand(rte_rdtsc()); + rte_config_init(); + + if (rte_eal_cpu_init() < 0) + rte_panic("Cannot detect lcores\n"); + + if (rte_eal_memory_init() < 0) + rte_panic("Cannot init memory\n"); + + if (rte_eal_memzone_init() < 0) + rte_panic("Cannot init memzone\n"); + + if (rte_eal_tailqs_init() < 0) + rte_panic("Cannot init tail queues for objects\n"); + + if (rte_eal_log_init() < 0) + rte_panic("Cannot init logs\n"); + + if (rte_eal_alarm_init() < 0) + rte_panic("Cannot init interrupt-handling thread\n"); + + if (rte_eal_intr_init() < 0) + rte_panic("Cannot init interrupt-handling thread\n"); + + if (rte_eal_hpet_init() < 0) + rte_panic("Cannot init HPET\n"); + + if (rte_eal_pci_init() < 0) + rte_panic("Cannot init PCI\n"); + + RTE_LOG(DEBUG, EAL, "Master core %u is ready (tid=%x)\n", + rte_config.master_lcore, (int)thread_id); + + RTE_LCORE_FOREACH_SLAVE(i) { + + /* + * create communication pipes between master thread + * and children + */ + if (pipe(lcore_config[i].pipe_master2slave) < 0) + rte_panic("Cannot create pipe\n"); + if (pipe(lcore_config[i].pipe_slave2master) < 0) + rte_panic("Cannot create pipe\n"); + + lcore_config[i].state = WAIT; + + /* create a thread for each lcore */ + ret = pthread_create(&lcore_config[i].thread_id, NULL, + eal_thread_loop, NULL); + if (ret != 0) + rte_panic("Cannot create thread\n"); + } + + eal_thread_init_master(rte_config.master_lcore); + + return fctret; +} + +/* get core role */ +enum rte_lcore_role_t +rte_eal_lcore_role(unsigned lcore_id) +{ + return (rte_config.lcore_role[lcore_id]); +} + +enum rte_proc_type_t +rte_eal_process_type(void) +{ + return (rte_config.process_type); +} + diff --git a/lib/librte_eal/linuxapp/eal/eal_alarm.c b/lib/librte_eal/linuxapp/eal/eal_alarm.c new file mode 100644 index 0000000000..f2eabf6a5b --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_alarm.c @@ -0,0 +1,232 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NS_PER_US 1000 +#define US_PER_MS 1000 +#define MS_PER_S 1000 +#define US_PER_S (US_PER_MS * MS_PER_S) + +struct alarm_entry { + LIST_ENTRY(alarm_entry) next; + struct timeval time; + rte_eal_alarm_callback cb_fn; + void *cb_arg; + volatile int executing; +}; + +static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER(); +static rte_spinlock_t alarm_list_lk = RTE_SPINLOCK_INITIALIZER; + +static struct rte_intr_handle intr_handle = {.fd = -1 }; +static int handler_registered = 0; +static void eal_alarm_callback(struct rte_intr_handle *hdl, void *arg); + +int +rte_eal_alarm_init(void) +{ + intr_handle.type = RTE_INTR_HANDLE_ALARM; + /* create a timerfd file descriptor */ + intr_handle.fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK); + if (intr_handle.fd == -1) + goto error; + + return 0; + +error: + rte_errno = errno; + return -1; +} + +static void +eal_alarm_callback(struct rte_intr_handle *hdl __rte_unused, + void *arg __rte_unused) +{ + struct timeval now; + struct alarm_entry *ap; + + rte_spinlock_lock(&alarm_list_lk); + while ((ap = LIST_FIRST(&alarm_list)) !=NULL && + gettimeofday(&now, NULL) == 0 && + (ap->time.tv_sec < now.tv_sec || (ap->time.tv_sec == now.tv_sec && + ap->time.tv_usec <= now.tv_usec))){ + ap->executing = 1; + rte_spinlock_unlock(&alarm_list_lk); + + ap->cb_fn(ap->cb_arg); + + rte_spinlock_lock(&alarm_list_lk); + LIST_REMOVE(ap, next); + rte_free(ap); + } + + if (!LIST_EMPTY(&alarm_list)) { + struct itimerspec atime = { .it_interval = { 0, 0 } }; + + ap = LIST_FIRST(&alarm_list); + atime.it_value.tv_sec = ap->time.tv_sec; + atime.it_value.tv_nsec = ap->time.tv_usec * NS_PER_US; + /* perform borrow for subtraction if necessary */ + if (now.tv_usec > ap->time.tv_usec) + atime.it_value.tv_sec--, atime.it_value.tv_nsec += US_PER_S * NS_PER_US; + + atime.it_value.tv_sec -= now.tv_sec; + atime.it_value.tv_nsec -= now.tv_usec * NS_PER_US; + timerfd_settime(intr_handle.fd, 0, &atime, NULL); + } + rte_spinlock_unlock(&alarm_list_lk); +} + +int +rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb_fn, void *cb_arg) +{ + struct timeval now; + int ret = 0; + struct alarm_entry *ap, *new_alarm; + + /* Check parameters, including that us won't cause a uint64_t overflow */ + if (us < 1 || us > (UINT64_MAX - US_PER_S) || cb_fn == NULL) + return -EINVAL; + + new_alarm = rte_malloc(NULL, sizeof(*new_alarm), 0); + if (new_alarm == NULL) + return -ENOMEM; + + /* use current time to calculate absolute time of alarm */ + gettimeofday(&now, NULL); + + new_alarm->cb_fn = cb_fn; + new_alarm->cb_arg = cb_arg; + new_alarm->time.tv_usec = (now.tv_usec + us) % US_PER_S; + new_alarm->time.tv_sec = now.tv_sec + ((now.tv_usec + us) / US_PER_S); + new_alarm->executing = 0; + + rte_spinlock_lock(&alarm_list_lk); + if (!handler_registered) { + ret |= rte_intr_callback_register(&intr_handle, + eal_alarm_callback, NULL); + handler_registered = (ret == 0) ? 1 : 0; + } + + if (LIST_EMPTY(&alarm_list)) + LIST_INSERT_HEAD(&alarm_list, new_alarm, next); + else { + LIST_FOREACH(ap, &alarm_list, next) { + if (ap->time.tv_sec > new_alarm->time.tv_sec || + (ap->time.tv_sec == new_alarm->time.tv_sec && + ap->time.tv_usec > new_alarm->time.tv_usec)){ + LIST_INSERT_BEFORE(ap, new_alarm, next); + break; + } + if (LIST_NEXT(ap, next) == NULL) { + LIST_INSERT_AFTER(ap, new_alarm, next); + break; + } + } + } + + if (LIST_FIRST(&alarm_list) == new_alarm) { + struct itimerspec alarm_time = { + .it_interval = {0, 0}, + .it_value = { + .tv_sec = us / US_PER_S, + .tv_nsec = (us % US_PER_S) * NS_PER_US, + }, + }; + ret |= timerfd_settime(intr_handle.fd, 0, &alarm_time, NULL); + } + rte_spinlock_unlock(&alarm_list_lk); + + return ret; +} + +int +rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg) +{ + struct alarm_entry *ap, *ap_prev; + int count = 0; + + if (!cb_fn) + return -1; + + rte_spinlock_lock(&alarm_list_lk); + /* remove any matches at the start of the list */ + while ((ap = LIST_FIRST(&alarm_list)) != NULL && + cb_fn == ap->cb_fn && ap->executing == 0 && + (cb_arg == (void *)-1 || cb_arg == ap->cb_arg)) { + LIST_REMOVE(ap, next); + rte_free(ap); + count++; + } + ap_prev = ap; + + /* now go through list, removing entries not at start */ + LIST_FOREACH(ap, &alarm_list, next) { + /* this won't be true first time through */ + if (cb_fn == ap->cb_fn && ap->executing == 0 && + (cb_arg == (void *)-1 || cb_arg == ap->cb_arg)) { + LIST_REMOVE(ap,next); + rte_free(ap); + count++; + ap = ap_prev; + } + ap_prev = ap; + } + rte_spinlock_unlock(&alarm_list_lk); + return count; +} + diff --git a/lib/librte_eal/linuxapp/eal/eal_debug.c b/lib/librte_eal/linuxapp/eal/eal_debug.c new file mode 100644 index 0000000000..c05341dc15 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_debug.c @@ -0,0 +1,114 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define BACKTRACE_SIZE 256 + +/* dump the stack of the calling core */ +void rte_dump_stack(void) +{ + void *func[BACKTRACE_SIZE]; + char **symb = NULL; + int size; + + size = backtrace(func, BACKTRACE_SIZE); + symb = backtrace_symbols(func, size); + while (size > 0) { + rte_log(RTE_LOG_ERR, RTE_LOGTYPE_EAL, + "%d: [%s]\n", size, symb[size - 1]); + size --; + } +} + +/* not implemented in this environment */ +void rte_dump_registers(void) +{ + return; +} + +/* call abort(), it will generate a coredump if enabled */ +void __rte_panic(const char *funcname, const char *format, ...) +{ + va_list ap; + + /* disable history */ + rte_log_set_history(0); + + rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname); + va_start(ap, format); + rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap); + va_end(ap); + rte_dump_stack(); + rte_dump_registers(); + abort(); +} + +/* + * Like rte_panic this terminates the application. However, no traceback is + * provided and no core-dump is generated. + */ +void +rte_exit(int exit_code, const char *format, ...) +{ + va_list ap; + + /* disable history */ + rte_log_set_history(0); + + if (exit_code != 0) + RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\nCause: ", exit_code); + + va_start(ap, format); + rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap); + va_end(ap); + +#ifndef RTE_EAL_ALWAYS_PANIC_ON_ERROR + exit(exit_code); +#else + rte_dump_stack(); + rte_dump_registers(); + abort(); +#endif +} diff --git a/lib/librte_eal/linuxapp/eal/eal_hpet.c b/lib/librte_eal/linuxapp/eal/eal_hpet.c new file mode 100644 index 0000000000..aa686b19be --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_hpet.c @@ -0,0 +1,232 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" +#include "eal_internal_cfg.h" + +#define DEV_HPET "/dev/hpet" + +/* Maximum number of counters. */ +#define HPET_TIMER_NUM 3 + +/* General capabilities register */ +#define CLK_PERIOD_SHIFT 32 /* Clock period shift. */ +#define CLK_PERIOD_MASK 0xffffffff00000000ULL /* Clock period mask. */ +#define COUNT_SIZE_CAP_SHIFT 13 /* Count size capa. shift. */ +#define COUNT_SIZE_CAP_MASK 0x0000000000002000ULL /* Count size capa. mask. */ + +/** + * HPET timer registers. From the Intel IA-PC HPET (High Precision Event + * Timers) Specification. + */ +struct eal_hpet_regs { + /* Memory-mapped, software visible registers */ + uint64_t capabilities; /**< RO General Capabilities Register. */ + uint64_t reserved0; /**< Reserved for future use. */ + uint64_t config; /**< RW General Configuration Register. */ + uint64_t reserved1; /**< Reserved for future use. */ + uint64_t isr; /**< RW Clear General Interrupt Status. */ + uint64_t reserved2[25]; /**< Reserved for future use. */ + union { + uint64_t counter; /**< RW Main Counter Value Register. */ + struct { + uint32_t counter_l; /**< RW Main Counter Low. */ + uint32_t counter_h; /**< RW Main Counter High. */ + }; + }; + uint64_t reserved3; /**< Reserved for future use. */ + struct { + uint64_t config; /**< RW Timer Config and Capability Reg. */ + uint64_t comp; /**< RW Timer Comparator Value Register. */ + uint64_t fsb; /**< RW FSB Interrupt Route Register. */ + uint64_t reserved4; /**< Reserved for future use. */ + } timers[HPET_TIMER_NUM]; /**< Set of HPET timers. */ +}; + +/* Mmap'd hpet registers */ +static volatile struct eal_hpet_regs *eal_hpet = NULL; + +/* Period at which the counter increments in femtoseconds (10^-15 seconds). */ +static uint32_t eal_hpet_resolution_fs = 0; + +/* Frequency of the counter in Hz */ +static uint64_t eal_hpet_resolution_hz = 0; + +/* Incremented 4 times during one 32bits hpet full count */ +static uint32_t eal_hpet_msb; + +static pthread_t msb_inc_thread_id; + +/* + * This function runs on a specific thread to update a global variable + * containing used to process MSB of the HPET (unfortunatelly, we need + * this because hpet is 32 bits by default under linux). + */ +static __attribute__((noreturn)) void * +hpet_msb_inc(__attribute__((unused)) void *arg) +{ + uint32_t t; + + while (1) { + t = (eal_hpet->counter_l >> 30); + if (t != (eal_hpet_msb & 3)) + eal_hpet_msb ++; + sleep(10); + } +} + +static inline void +set_rdtsc_freq(void) +{ + uint64_t start; + + start = rte_rdtsc(); + sleep(1); + eal_hpet_resolution_hz = rte_rdtsc() - start; + eal_hpet_resolution_fs = (uint32_t) + ((1.0 / eal_hpet_resolution_hz) / 1e-15); +} + +/* + * Open and mmap /dev/hpet (high precision event timer) that will + * provide our time reference. + */ +int +rte_eal_hpet_init(void) +{ + int fd, ret; + + if (internal_config.no_hpet) { + goto use_rdtsc; + } + + fd = open(DEV_HPET, O_RDONLY); + if (fd < 0) { + RTE_LOG(WARNING, EAL, "WARNING: Cannot open "DEV_HPET": %s! " + "The TSC will be used instead.\n", + strerror(errno)); + goto use_rdtsc; + } + eal_hpet = mmap(NULL, 1024, PROT_READ, MAP_SHARED, fd, 0); + if (eal_hpet == MAP_FAILED) { + RTE_LOG(WARNING, EAL, "WARNING: Cannot mmap "DEV_HPET"! " + "The TSC will be used instead.\n"); + close(fd); + goto use_rdtsc; + } + close(fd); + + eal_hpet_resolution_fs = (uint32_t)((eal_hpet->capabilities & + CLK_PERIOD_MASK) >> + CLK_PERIOD_SHIFT); + + eal_hpet_resolution_hz = (1000ULL*1000ULL*1000ULL*1000ULL*1000ULL) / + (uint64_t)eal_hpet_resolution_fs; + + eal_hpet_msb = (eal_hpet->counter_l >> 30); + + /* create a thread that will increment a global variable for + * msb (hpet is 32 bits by default under linux) */ + ret = pthread_create(&msb_inc_thread_id, NULL, hpet_msb_inc, NULL); + if (ret < 0) { + RTE_LOG(WARNING, EAL, "WARNING: Cannot create HPET timer thread! " + "The TSC will be used instead.\n"); + goto use_rdtsc; + } + + return 0; + +use_rdtsc: + internal_config.no_hpet = 1; + set_rdtsc_freq(); + return 0; +} + +uint64_t +rte_get_hpet_hz(void) +{ + return eal_hpet_resolution_hz; +} + +uint64_t +rte_get_hpet_cycles(void) +{ + uint32_t t, msb; + uint64_t ret; + + if(internal_config.no_hpet) + /* fallback to rdtsc */ + return rte_rdtsc(); + + t = eal_hpet->counter_l; + msb = eal_hpet_msb; + ret = (msb + 2 - (t >> 30)) / 4; + ret <<= 32; + ret += t; + return ret; +} + +void +rte_delay_us(unsigned us) +{ + uint64_t start; + uint64_t ticks; + ticks = (uint64_t)us * 1000ULL * 1000ULL * 1000ULL; + ticks /= eal_hpet_resolution_fs; + start = rte_get_hpet_cycles(); + while ((rte_get_hpet_cycles() - start) < ticks) + rte_pause(); +} diff --git a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c new file mode 100644 index 0000000000..d1ed49ac13 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c @@ -0,0 +1,229 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_memory.h" +#include "rte_memzone.h" +#include "rte_tailq.h" +#include "rte_eal.h" +#include "rte_launch.h" +#include "rte_per_lcore.h" +#include "rte_lcore.h" +#include "rte_debug.h" +#include "rte_log.h" +#include "rte_common.h" +#include "rte_string_fns.h" +#include "eal_internal_cfg.h" +#include "eal_hugepages.h" + +static const char sys_dir_path[] = "/sys/kernel/mm/hugepages"; + +static int32_t +get_num_hugepages(const char *subdir) +{ + const char nr_hp_file[] = "nr_hugepages"; + char path[BUFSIZ]; + unsigned num_pages = 0; + + rte_snprintf(path, sizeof(path), "%s/%s/%s", + sys_dir_path, subdir, nr_hp_file); + FILE *fd = fopen(path, "r"); + if (fd == NULL || fscanf(fd, "%u", &num_pages) != 1) + rte_panic("Error reading file '%s'\n", path); + fclose(fd); + + return num_pages; +} + +static uint64_t +get_default_hp_size(void) +{ + const char proc_meminfo[] = "/proc/meminfo"; + const char str_hugepagesz[] = "Hugepagesize:"; + unsigned hugepagesz_len = sizeof(str_hugepagesz) - 1; + char buffer[256]; + unsigned long long size = 0; + + FILE *fd = fopen(proc_meminfo, "r"); + if (fd == NULL) + rte_panic("Cannot open %s\n", proc_meminfo); + while(fgets(buffer, sizeof(buffer), fd)){ + if (strncmp(buffer, str_hugepagesz, hugepagesz_len) == 0){ + size = rte_str_to_size(&buffer[hugepagesz_len]); + break; + } + } + fclose(fd); + if (size == 0) + rte_panic("Cannot get default hugepage size from %s\n", proc_meminfo); + return size; +} + +static const char * +get_hugepage_dir(uint64_t hugepage_sz) +{ + enum proc_mount_fieldnames { + DEVICE = 0, + MOUNTPT, + FSTYPE, + OPTIONS, + _FIELDNAME_MAX + }; + static uint64_t default_size = 0; + const char proc_mounts[] = "/proc/mounts"; + const char hugetlbfs_str[] = "hugetlbfs"; + const size_t htlbfs_str_len = sizeof(hugetlbfs_str) - 1; + const char pagesize_opt[] = "pagesize="; + const size_t pagesize_opt_len = sizeof(pagesize_opt) - 1; + const char split_tok = ' '; + char *splitstr[_FIELDNAME_MAX]; + char buf[BUFSIZ]; + char *retval = NULL; + + FILE *fd = fopen(proc_mounts, "r"); + if (fd == NULL) + rte_panic("Cannot open %s\n", proc_mounts); + + if (default_size == 0) + default_size = get_default_hp_size(); + + while (fgets(buf, sizeof(buf), fd)){ + if (rte_strsplit(buf, sizeof(buf), splitstr, _FIELDNAME_MAX, + split_tok) != _FIELDNAME_MAX) { + RTE_LOG(ERR, EAL, "Error parsing %s\n", proc_mounts); + break; /* return NULL */ + } + + /* we have a specified --huge-dir option, only examine that dir */ + if (internal_config.hugepage_dir != NULL && + strcmp(splitstr[MOUNTPT], internal_config.hugepage_dir) != 0) + continue; + + if (strncmp(splitstr[FSTYPE], hugetlbfs_str, htlbfs_str_len) == 0){ + const char *pagesz_str = strstr(splitstr[OPTIONS], pagesize_opt); + + /* if no explicit page size, the default page size is compared */ + if (pagesz_str == NULL){ + if (hugepage_sz == default_size){ + retval = strdup(splitstr[MOUNTPT]); + break; + } + } + /* there is an explicit page size, so check it */ + else { + uint64_t pagesz = rte_str_to_size(&pagesz_str[pagesize_opt_len]); + if (pagesz == hugepage_sz) { + retval = strdup(splitstr[MOUNTPT]); + break; + } + } + } /* end if strncmp hugetlbfs */ + } /* end while fgets */ + + fclose(fd); + return retval; +} + +static inline void +swap_hpi(struct hugepage_info *a, struct hugepage_info *b) +{ + char buf[sizeof(*a)]; + memcpy(buf, a, sizeof(*a)); + memcpy(a, b, sizeof(*a)); + memcpy(b, buf, sizeof(*a)); +} + +int +eal_hugepage_info_init(void) +{ + const char dirent_start_text[] = "hugepages-"; + const size_t dirent_start_len = sizeof(dirent_start_text) - 1; + unsigned i, num_sizes = 0; + + DIR *dir = opendir(sys_dir_path); + if (dir == NULL) + rte_panic("Cannot open directory %s to read system hugepage info\n", + sys_dir_path); + + struct dirent *dirent = readdir(dir); + while(dirent != NULL){ + if (strncmp(dirent->d_name, dirent_start_text, dirent_start_len) == 0){ + struct hugepage_info *hpi = \ + &internal_config.hugepage_info[num_sizes]; + hpi->hugepage_sz = rte_str_to_size(&dirent->d_name[dirent_start_len]); + hpi->num_pages = get_num_hugepages(dirent->d_name); + hpi->hugedir = get_hugepage_dir(hpi->hugepage_sz); + if (hpi->hugedir == NULL){ + RTE_LOG(INFO, EAL, "%u hugepages of size %llu reserved, "\ + "but no mounted hugetlbfs found for that size\n", + hpi->num_pages, + (unsigned long long)hpi->hugepage_sz); + hpi->num_pages = 0; + } else + num_sizes++; + } + dirent = readdir(dir); + } + closedir(dir); + internal_config.num_hugepage_sizes = num_sizes; + + /* sort the page directory entries by size, largest to smallest */ + for (i = 0; i < num_sizes; i++){ + unsigned j; + for (j = i+1; j < num_sizes; j++) + if (internal_config.hugepage_info[j-1].hugepage_sz < \ + internal_config.hugepage_info[j].hugepage_sz) + swap_hpi(&internal_config.hugepage_info[j-1], + &internal_config.hugepage_info[j]); + } + + /* now we have all info, check we have at least one valid size */ + for (i = 0; i < num_sizes; i++) + if (internal_config.hugepage_info[i].hugedir != NULL && + internal_config.hugepage_info[i].num_pages > 0) + return 0; + /* no valid hugepage mounts available, return error */ + return -1; +} diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c new file mode 100644 index 0000000000..8ff2289ec9 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_interrupts.c @@ -0,0 +1,540 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" + +#define EAL_INTR_EPOLL_WAIT_FOREVER (-1) + +/** + * union for pipe fds. + */ +union intr_pipefds{ + struct { + int pipefd[2]; + }; + struct { + int readfd; + int writefd; + }; +}; + +/** + * union buffer for reading on different devices + */ +union rte_intr_read_buffer { + int uio_intr_count; /* for uio device */ + uint64_t timerfd_num; /* for timerfd */ + char charbuf[16]; /* for others */ +}; + +TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback); +TAILQ_HEAD(rte_intr_source_list, rte_intr_source); + +struct rte_intr_callback { + TAILQ_ENTRY(rte_intr_callback) next; + rte_intr_callback_fn cb_fn; /**< callback address */ + void *cb_arg; /**< parameter for callback */ +}; + +struct rte_intr_source { + TAILQ_ENTRY(rte_intr_source) next; + struct rte_intr_handle intr_handle; /**< interrupt handle */ + struct rte_intr_cb_list callbacks; /**< user callbacks */ +}; + +/* global spinlock for interrupt data operation */ +static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER; + +/* union buffer for pipe read/write */ +static union intr_pipefds intr_pipe; + +/* interrupt sources list */ +static struct rte_intr_source_list intr_sources; + +/* interrupt handling thread */ +static pthread_t intr_thread; + +int +rte_intr_callback_register(struct rte_intr_handle *intr_handle, + rte_intr_callback_fn cb, void *cb_arg) +{ + int ret = -1; + struct rte_intr_source *src; + int wake_thread = 0; + + /* first do parameter checking */ + if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) { + RTE_LOG(ERR, EAL, + "Registering with invalid input parameter\n"); + return -EINVAL; + } + + /* allocate a new interrupt callback entity */ + struct rte_intr_callback *callback = + rte_zmalloc("interrupt callback list", + sizeof(*callback), 0); + if (callback == NULL) { + RTE_LOG(ERR, EAL, "Can not allocate memory\n"); + return -ENOMEM; + } + callback->cb_fn = cb; + callback->cb_arg = cb_arg; + + rte_spinlock_lock(&intr_lock); + + /* check if there is at least one callback registered for the fd */ + TAILQ_FOREACH(src, &intr_sources, next) + if (src->intr_handle.fd == intr_handle->fd) { + if (src->callbacks.tqh_first == NULL) + /* we had no interrupts for this */ + wake_thread = 1; + + TAILQ_INSERT_TAIL(&(src->callbacks), callback, next); + break; + } + + /* No callback registered for this fd */ + if (src == NULL){ + /* no existing callbacks for this - add new source */ + src = rte_zmalloc("interrupt source list", sizeof(*src), 0); + if (src == NULL){ + RTE_LOG(ERR, EAL, "Can not allocate memory\n"); + ret = -ENOMEM; + goto error; + } + src->intr_handle = *intr_handle; + TAILQ_INIT(&src->callbacks); + + TAILQ_INSERT_TAIL(&intr_sources, src, next); + TAILQ_INSERT_TAIL(&(src->callbacks), callback, next); + wake_thread = 1; + } + + rte_spinlock_unlock(&intr_lock); + /** + * check if need to notify the pipe fd waited by epoll_wait to + * rebuild the wait list. + */ + if (wake_thread) + if (write(intr_pipe.writefd, "1", 1) < 0) + return -EPIPE; + + return 0; + +error: + rte_spinlock_unlock(&intr_lock); + + return ret; +} + +int +rte_intr_callback_unregister(struct rte_intr_handle *intr_handle, + rte_intr_callback_fn cb_fn, void *cb_arg) +{ + int ret = -1; + struct rte_intr_source *src; + struct rte_intr_callback *cb; + + /* do parameter checking first */ + if (intr_handle == NULL || intr_handle->fd < 0) { + RTE_LOG(ERR, EAL, + "Unregistering with invalid input parameter\n"); + return -EINVAL; + } + + rte_spinlock_lock(&intr_lock); + + /* check if the insterrupt source for the fd is existent */ + TAILQ_FOREACH(src, &intr_sources, next) + if (src->intr_handle.fd == intr_handle->fd) + break; + + /* No interrupt source registered for the fd */ + if (src == NULL) { + ret = -ENOENT; + goto error; + } + + ret = 0; + TAILQ_FOREACH(cb, &src->callbacks, next) { + if (cb->cb_fn != cb_fn) + continue; + if (cb_arg == (void *)-1 || cb->cb_arg == cb_arg) { + TAILQ_REMOVE(&src->callbacks, cb, next); + rte_free(cb); + ret ++; + } + + if (src->callbacks.tqh_first == NULL) { + TAILQ_REMOVE(&intr_sources, src, next); + rte_free(src); + } + } + + /* notify the pipe fd waited by epoll_wait to rebuild the wait list */ + if (write(intr_pipe.writefd, "1", 1) < 0) { + ret = -EPIPE; + goto error; + } + + rte_spinlock_unlock(&intr_lock); + + return ret; + +error: + rte_spinlock_unlock(&intr_lock); + + return ret; +} + +int +rte_intr_enable(struct rte_intr_handle *intr_handle) +{ + const int value = 1; + + if (!intr_handle || intr_handle->fd < 0) + return -1; + + switch (intr_handle->type){ + /* write to the uio fd to enable the interrupt */ + case RTE_INTR_HANDLE_UIO: + if (write(intr_handle->fd, &value, sizeof(value)) < 0) { + RTE_LOG(ERR, EAL, + "Error enabling interrupts for fd %d\n", + intr_handle->fd); + return -1; + } + break; + /* not used at this moment */ + case RTE_INTR_HANDLE_ALARM: + return -1; + /* unkown handle type */ + default: + RTE_LOG(ERR, EAL, + "Unknown handle type of fd %d\n", + intr_handle->fd); + return -1; + } + + return 0; +} + +int +rte_intr_disable(struct rte_intr_handle *intr_handle) +{ + const int value = 0; + + if (!intr_handle || intr_handle->fd < 0) + return -1; + + switch (intr_handle->type){ + /* write to the uio fd to disable the interrupt */ + case RTE_INTR_HANDLE_UIO: + if (write(intr_handle->fd, &value, sizeof(value)) < 0){ + RTE_LOG(ERR, EAL, + "Error enabling interrupts for fd %d\n", + intr_handle->fd); + return -1; + } + break; + /* not used at this moment */ + case RTE_INTR_HANDLE_ALARM: + return -1; + /* unkown handle type */ + default: + RTE_LOG(ERR, EAL, + "Unknown handle type of fd %d\n", + intr_handle->fd); + return -1; + } + + return 0; +} + +static int +eal_intr_process_interrupts(struct epoll_event *events, int nfds) +{ + int n, i, active_cb, bytes_read; + struct rte_intr_source *src; + struct rte_intr_callback *cb; + union rte_intr_read_buffer buf; + struct rte_intr_callback active_cbs[32]; + + for (n = 0; n < nfds; n++) { + /** + * if the pipe fd is ready to read, return out to + * rebuild the wait list. + */ + if (events[n].data.fd == intr_pipe.readfd){ + int r = read(intr_pipe.readfd, buf.charbuf, + sizeof(buf.charbuf)); + RTE_SET_USED(r); + return -1; + } + rte_spinlock_lock(&intr_lock); + TAILQ_FOREACH(src, &intr_sources, next) + if (src->intr_handle.fd == + events[n].data.fd) + break; + if (src == NULL){ + rte_spinlock_unlock(&intr_lock); + continue; + } + + /* for this source, make a copy of all the callbacks, + * then unlock the lock, so the callbacks can + * themselves manipulate the list for future + * instances. + */ + active_cb = 0; + memset(active_cbs, 0, sizeof(active_cbs)); + TAILQ_FOREACH(cb, &src->callbacks, next) + active_cbs[active_cb++] = *cb; + rte_spinlock_unlock(&intr_lock); + + /* set the length to be read dor different handle type */ + switch (src->intr_handle.type) { + case RTE_INTR_HANDLE_UIO: + bytes_read = 4; + break; + case RTE_INTR_HANDLE_ALARM: + bytes_read = sizeof(uint64_t); + break; + default: + bytes_read = 1; + break; + } + /** + * read out to clear the ready-to-be-read flag + * for epoll_wait. + */ + bytes_read = read(events[n].data.fd, &buf, bytes_read); + if (bytes_read < 0) { + RTE_LOG(ERR, EAL, "Error reading from file descriptor" + " %d, error: %d\n", events[n].data.fd, errno); + continue; + } + else if (bytes_read == 0) { + RTE_LOG(ERR, EAL, + "Read nothing from file descriptor %d.\n", + events[n].data.fd); + continue; + } + /** + * Finally, call all callbacks from the copy + * we made earlier. + */ + for (i = 0; i < active_cb; i++) { + if (active_cbs[i].cb_fn == NULL) + continue; + active_cbs[i].cb_fn(&src->intr_handle, + active_cbs[i].cb_arg); + } + } + + return 0; +} + +/** + * It handles all the interrupts. + * + * @param pfd + * epoll file descriptor. + * @param totalfds + * The number of file descriptors added in epoll. + * + * @return + * void + */ +static void +eal_intr_handle_interrupts(int pfd, unsigned totalfds) +{ + struct epoll_event events[totalfds]; + int nfds = 0; + + for(;;) { + nfds = epoll_wait(pfd, events, totalfds, + EAL_INTR_EPOLL_WAIT_FOREVER); + /* epoll_wait fail */ + if (nfds < 0) { + if (errno == EINTR) + continue; + RTE_LOG(ERR, EAL, + "epoll_wait returns with fail\n"); + return; + } + /* epoll_wait timeout, will never happens here */ + else if (nfds == 0) + continue; + /* epoll_wait has at least one fd ready to read */ + if (eal_intr_process_interrupts(events, nfds) < 0) + return; + } +} + +/** + * It builds/rebuilds up the epoll file descriptor with all the + * file descriptors being waited on. Then handles the interrupts. + * + * @param arg + * pointer. (unused) + * + * @return + * never return; + */ +static __attribute__((noreturn)) void * +eal_intr_thread_main(__rte_unused void *arg) +{ + struct epoll_event ev; + + /* host thread, never break out */ + for (;;) { + /* build up the epoll fd with all descriptors we are to + * wait on then pass it to the handle_interrupts function + */ + static struct epoll_event pipe_event = { + .events = EPOLLIN | EPOLLPRI, + }; + struct rte_intr_source *src; + unsigned numfds = 0; + + /* create epoll fd */ + int pfd = epoll_create(1); + if (pfd < 0) + rte_panic("Cannot create epoll instance\n"); + + pipe_event.data.fd = intr_pipe.readfd; + /** + * add pipe fd into wait list, this pipe is used to + * rebuild the wait list. + */ + if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd, + &pipe_event) < 0) { + rte_panic("Error adding fd to %d epoll_ctl, %s\n", + intr_pipe.readfd, strerror(errno)); + } + numfds++; + + rte_spinlock_lock(&intr_lock); + + TAILQ_FOREACH(src, &intr_sources, next) { + if (src->callbacks.tqh_first == NULL) + continue; /* skip those with no callbacks */ + ev.events = EPOLLIN | EPOLLPRI; + ev.data.fd = src->intr_handle.fd; + + /** + * add all the uio device file descriptor + * into wait list. + */ + if (epoll_ctl(pfd, EPOLL_CTL_ADD, + src->intr_handle.fd, &ev) < 0){ + rte_panic("Error adding fd %d epoll_ctl, %s\n", + src->intr_handle.fd, strerror(errno)); + } + else + numfds++; + } + rte_spinlock_unlock(&intr_lock); + /* serve the interrupt */ + eal_intr_handle_interrupts(pfd, numfds); + + /** + * when we return, we need to rebuild the + * list of fds to monitor. + */ + close(pfd); + } +} + +int +rte_eal_intr_init(void) +{ + int ret = 0; + + /* init the global interrupt source head */ + TAILQ_INIT(&intr_sources); + + /** + * create a pipe which will be waited by epoll and notified to + * rebuild the wait list of epoll. + */ + if (pipe(intr_pipe.pipefd) < 0) + return -1; + + /* create the host thread to wait/handle the interrupt */ + ret = pthread_create(&intr_thread, NULL, + eal_intr_thread_main, NULL); + if (ret != 0) + RTE_LOG(ERR, EAL, + "Failed to create thread for interrupt handling\n"); + + return -ret; +} + diff --git a/lib/librte_eal/linuxapp/eal/eal_lcore.c b/lib/librte_eal/linuxapp/eal/eal_lcore.c new file mode 100644 index 0000000000..dde9bc1450 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_lcore.c @@ -0,0 +1,192 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" + +#define PROC_CPUINFO "/proc/cpuinfo" +#define PROC_PROCESSOR_FMT "" + +/* parse one line and try to match "processor : %d". */ +static int +parse_processor_id(const char *buf, unsigned *lcore_id) +{ + static const char _processor[] = "processor"; + const char *s; + + if (strncmp(buf, _processor, sizeof(_processor) - 1) != 0) + return -1; + + s = strchr(buf, ':'); + if (s == NULL) + return -1; + + errno = 0; + *lcore_id = strtoul(s+1, NULL, 10); + if (errno != 0) { + *lcore_id = -1; + return -1; + } + + return 0; +} + +/* parse one line and try to match "physical id : %d". */ +static int +parse_socket_id(const char *buf, unsigned *socket_id) +{ + static const char _physical_id[] = "physical id"; + const char *s; + + if (strncmp(buf, _physical_id, sizeof(_physical_id) - 1) != 0) + return -1; + + s = strchr(buf, ':'); + if (s == NULL) + return -1; + + errno = 0; + *socket_id = strtoul(s+1, NULL, 10); + if (errno != 0) + return -1; + + return 0; +} + +/* + * Parse /proc/cpuinfo to get the number of physical and logical + * processors on the machine. The function will fill the cpu_info + * structure. + */ +int +rte_eal_cpu_init(void) +{ + struct rte_config *config; + FILE *f; + char buf[BUFSIZ]; + unsigned lcore_id = 0; + unsigned socket_id = 0; + unsigned count = 0; + + /* get pointer to global configuration */ + config = rte_eal_get_configuration(); + + /* open /proc/cpuinfo */ + f = fopen(PROC_CPUINFO, "r"); + if (f == NULL) { + RTE_LOG(ERR, EAL, "%s(): Cannot find "PROC_CPUINFO"\n", __func__); + return -1; + } + + /* + * browse lines of /proc/cpuinfo and fill memseg entries in + * global configuration + */ + while (fgets(buf, sizeof(buf), f) != NULL) { + + if (parse_processor_id(buf, &lcore_id) == 0) + continue; + + if (parse_socket_id(buf, &socket_id) == 0) + continue; + + if (buf[0] == '\n') { + RTE_LOG(DEBUG, EAL, "Detected lcore %u on socket %u\n", + lcore_id, socket_id); + if (lcore_id >= RTE_MAX_LCORE) { + RTE_LOG(DEBUG, EAL, + "Skip lcore %u >= RTE_MAX_LCORE\n", + lcore_id); + continue; + } + + /* + * In a virtualization environment, the socket ID + * reported by the system may not be linked to a real + * physical socket ID, and may be incoherent. So in this + * case, a default socket ID of 0 is assigned. + */ + if (socket_id >= RTE_MAX_NUMA_NODES) { +#ifdef CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID + socket_id = 0; +#else + rte_panic("Socket ID (%u) is greater than " + "RTE_MAX_NUMA_NODES (%d)\n", + socket_id, RTE_MAX_NUMA_NODES); +#endif + } + + lcore_config[lcore_id].detected = 1; + lcore_config[lcore_id].socket_id = socket_id; + + } + } + + fclose(f); + + /* disable lcores that were not detected */ + RTE_LCORE_FOREACH(lcore_id) { + + if (lcore_config[lcore_id].detected == 0) { + RTE_LOG(DEBUG, EAL, "Skip lcore %u (not detected)\n", + lcore_id); + config->lcore_role[lcore_id] = ROLE_OFF; + } + else + count ++; + } + + config->lcore_count = count; + + return 0; +} diff --git a/lib/librte_eal/linuxapp/eal/eal_log.c b/lib/librte_eal/linuxapp/eal/eal_log.c new file mode 100644 index 0000000000..3d3d7ba8d9 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_log.c @@ -0,0 +1,137 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" + +/* + * default log function, used once mempool (hence log history) is + * available + */ +static ssize_t +console_log_write(__attribute__((unused)) void *c, const char *buf, size_t size) +{ + char copybuf[BUFSIZ + 1]; + ssize_t ret; + uint32_t loglevel; + + /* add this log in history */ + rte_log_add_in_history(buf, size); + + /* write on stdout */ + ret = fwrite(buf, 1, size, stdout); + fflush(stdout); + + /* truncate message if too big (should not happen) */ + if (size > BUFSIZ) + size = BUFSIZ; + + /* Syslog error levels are from 0 to 7, so subtract 1 to convert */ + loglevel = rte_log_cur_msg_loglevel() - 1; + memcpy(copybuf, buf, size); + copybuf[size] = '\0'; + + /* write on syslog too */ + syslog(loglevel, "%s", copybuf); + + return ret; +} + +static ssize_t +console_log_read(__attribute__((unused)) void *c, + __attribute__((unused)) char *buf, + __attribute__((unused)) size_t size) +{ + return 0; +} + +static int +console_log_seek(__attribute__((unused)) void *c, + __attribute__((unused)) off64_t *offset, + __attribute__((unused)) int whence) +{ + return -1; +} + +static int +console_log_close(__attribute__((unused)) void *c) +{ + return 0; +} + +static cookie_io_functions_t console_log_func = { + .read = console_log_read, + .write = console_log_write, + .seek = console_log_seek, + .close = console_log_close +}; + +/* + * set the log to default function, called during eal init process, + * once memzones are available. + */ +int +rte_eal_log_init(void) +{ + FILE *log_stream; + + log_stream = fopencookie(NULL, "w+", console_log_func); + if (log_stream == NULL) + return -1; + + openlog("rte", LOG_NDELAY | LOG_PID, LOG_DAEMON); + + if (rte_eal_common_log_init(log_stream) < 0) + return -1; + + return 0; +} + diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c new file mode 100644 index 0000000000..a47dab46f2 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_memory.c @@ -0,0 +1,796 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" +#include "eal_internal_cfg.h" +#include "eal_fs_paths.h" +#include "eal_hugepages.h" + +/** + * @file + * Huge page mapping under linux + * + * To reserve a big contiguous amount of memory, we use the hugepage + * feature of linux. For that, we need to have hugetlbfs mounted. This + * code will create many files in this directory (one per page) and + * map them in virtual memory. For each page, we will retrieve its + * physical address and remap it in order to have a virtual contiguous + * zone as well as a physical contiguous zone. + */ + + +#define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space" + +/* + * Check whether address-space layout randomization is enabled in + * the kernel. This is important for multi-process as it can prevent + * two processes mapping data to the same virtual address + * Returns: + * 0 - address space randomization disabled + * 1/2 - address space randomization enabled + * negative error code on error + */ +static int +aslr_enabled(void) +{ + char c; + int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY); + if (fd < 0) + return -errno; + retval = read(fd, &c, 1); + close(fd); + if (retval < 0) + return -errno; + if (retval == 0) + return -EIO; + switch (c) { + case '0' : return 0; + case '1' : return 1; + case '2' : return 2; + default: return -EINVAL; + } +} + +/* + * Try to mmap *size bytes in /dev/zero. If it is succesful, return the + * pointer to the mmap'd area and keep *size unmodified. Else, retry + * with a smaller zone: decrease *size by hugepage_sz until it reaches + * 0. In this case, return NULL. Note: this function returns an address + * which is a multiple of hugepage size. + */ +static void * +get_virtual_area(uint64_t *size, uint64_t hugepage_sz) +{ + void *addr; + int fd; + long aligned_addr; + + RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%"PRIx64" bytes\n", *size); + + fd = open("/dev/zero", O_RDONLY); + if (fd < 0){ + RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n"); + return NULL; + } + do { + addr = mmap(NULL, (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0); + if (addr == MAP_FAILED) + *size -= hugepage_sz; + } while (addr == MAP_FAILED && *size > 0); + + if (addr == MAP_FAILED) { + close(fd); + RTE_LOG(INFO, EAL, "Cannot get a virtual area\n"); + return NULL; + } + + munmap(addr, (*size) + hugepage_sz); + close(fd); + + /* align addr to a huge page size boundary */ + aligned_addr = (long)addr; + aligned_addr += (hugepage_sz - 1); + aligned_addr &= (~(hugepage_sz - 1)); + addr = (void *)(aligned_addr); + + RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%"PRIx64")\n", + addr, *size); + + return addr; +} + +/* + * Mmap all hugepages of hugepage table: it first open a file in + * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the + * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored + * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to + * map continguous physical blocks in contiguous virtual blocks. + */ +static int +map_all_hugepages(struct hugepage *hugepg_tbl, + struct hugepage_info *hpi, int orig) +{ + int fd; + unsigned i; + void *virtaddr; + void *vma_addr = NULL; + uint64_t vma_len = 0; + + for (i = 0; i < hpi->num_pages; i++) { + uint64_t hugepage_sz = hpi->hugepage_sz; + + if (orig) { + hugepg_tbl[i].file_id = i; + hugepg_tbl[i].size = hugepage_sz; + eal_get_hugefile_path(hugepg_tbl[i].filepath, + sizeof(hugepg_tbl[i].filepath), hpi->hugedir, + hugepg_tbl[i].file_id); + hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0'; + } +#ifndef RTE_ARCH_X86_64 + /* for 32-bit systems, don't remap 1G pages, just reuse original + * map address as final map address. + */ + else if (hugepage_sz == RTE_PGSIZE_1G){ + hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va; + hugepg_tbl[i].orig_va = NULL; + continue; + } +#endif + else if (vma_len == 0) { + unsigned j, num_pages; + + /* reserve a virtual area for next contiguous + * physical block: count the number of + * contiguous physical pages. */ + for (j = i+1; j < hpi->num_pages ; j++) { + if (hugepg_tbl[j].physaddr != + hugepg_tbl[j-1].physaddr + hugepage_sz) + break; + } + num_pages = j - i; + vma_len = num_pages * hugepage_sz; + + /* get the biggest virtual memory area up to + * vma_len. If it fails, vma_addr is NULL, so + * let the kernel provide the address. */ + vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz); + if (vma_addr == NULL) + vma_len = hugepage_sz; + } + + fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755); + if (fd < 0) { + RTE_LOG(ERR, EAL, "%s(): open failed: %s", __func__, + strerror(errno)); + return -1; + } + + virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, 0); + if (virtaddr == MAP_FAILED) { + RTE_LOG(ERR, EAL, "%s(): mmap failed: %s", __func__, + strerror(errno)); + close(fd); + return -1; + } + if (orig) { + hugepg_tbl[i].orig_va = virtaddr; + memset(virtaddr, 0, hugepage_sz); + } + else { + hugepg_tbl[i].final_va = virtaddr; + } + + vma_addr = (char *)vma_addr + hugepage_sz; + vma_len -= hugepage_sz; + close(fd); + } + return 0; +} + +/* Unmap all hugepages from original mapping. */ +static int +unmap_all_hugepages_orig(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) +{ + unsigned i; + for (i = 0; i < hpi->num_pages; i++) { + if (hugepg_tbl[i].orig_va) { + munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz); + hugepg_tbl[i].orig_va = NULL; + } + } + return 0; +} + +/* + * For each hugepage in hugepg_tbl, fill the physaddr value. We find + * it by browsing the /proc/self/pagemap special file. + */ +static int +find_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) +{ + int fd; + unsigned i; + uint64_t page; + unsigned long virt_pfn; + int page_size; + + /* standard page size */ + page_size = getpagesize(); + + fd = open("/proc/self/pagemap", O_RDONLY); + if (fd < 0) { + RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s", + __func__, strerror(errno)); + return -1; + } + + for (i = 0; i < hpi->num_pages; i++) { + off_t offset; + virt_pfn = (unsigned long)hugepg_tbl[i].orig_va / + page_size; + offset = sizeof(uint64_t) * virt_pfn; + if (lseek(fd, offset, SEEK_SET) != offset){ + RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s", + __func__, strerror(errno)); + close(fd); + return -1; + } + if (read(fd, &page, sizeof(uint64_t)) < 0) { + RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s", + __func__, strerror(errno)); + close(fd); + return -1; + } + + /* + * the pfn (page frame number) are bits 0-54 (see + * pagemap.txt in linux Documentation) + */ + hugepg_tbl[i].physaddr = ((page & 0x7fffffffffffffULL) * page_size); + } + close(fd); + return 0; +} + +/* + * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge + * page. + */ +static int +find_numasocket(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) +{ + int socket_id; + char *end, *nodestr; + unsigned i, hp_count = 0; + uint64_t virt_addr; + char buf[BUFSIZ]; + char hugedir_str[PATH_MAX]; + FILE *f; + + f = fopen("/proc/self/numa_maps", "r"); + if (f == NULL) { + RTE_LOG(INFO, EAL, "cannot open /proc/self/numa_maps," + "consider that all memory is in socket_id 0"); + return 0; + } + + rte_snprintf(hugedir_str, sizeof(hugedir_str), + "%s/", hpi->hugedir); + + /* parse numa map */ + while (fgets(buf, sizeof(buf), f) != NULL) { + + /* ignore non huge page */ + if (strstr(buf, " huge ") == NULL && + strstr(buf, hugedir_str) == NULL) + continue; + + /* get zone addr */ + virt_addr = strtoull(buf, &end, 16); + if (virt_addr == 0 || end == buf) { + RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__); + goto error; + } + + /* get node id (socket id) */ + nodestr = strstr(buf, " N"); + if (nodestr == NULL) { + RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__); + goto error; + } + nodestr += 2; + end = strstr(nodestr, "="); + if (end == NULL) { + RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__); + goto error; + } + end[0] = '\0'; + end = NULL; + + socket_id = strtoul(nodestr, &end, 0); + if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) { + RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__); + goto error; + } + + /* if we find this page in our mappings, set socket_id */ + for (i = 0; i < hpi->num_pages; i++) { + void *va = (void *)(unsigned long)virt_addr; + if (hugepg_tbl[i].orig_va == va) { + hugepg_tbl[i].socket_id = socket_id; + hp_count++; + } + } + } + if (hp_count < hpi->num_pages) + goto error; + fclose(f); + return 0; + +error: + fclose(f); + return -1; +} + +/* + * Sort the hugepg_tbl by physical address (lower addresses first). We + * use a slow algorithm, but we won't have millions of pages, and this + * is only done at init time. + */ +static int +sort_by_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi) +{ + unsigned i, j; + int smallest_idx; + uint64_t smallest_addr; + struct hugepage tmp; + + for (i = 0; i < hpi->num_pages; i++) { + smallest_addr = 0; + smallest_idx = -1; + + /* + * browse all entries starting at 'i', and find the + * entry with the smallest addr + */ + for (j=i; jnum_pages; j++) { + + if (smallest_addr == 0 || + hugepg_tbl[j].physaddr < smallest_addr) { + smallest_addr = hugepg_tbl[j].physaddr; + smallest_idx = j; + } + } + + /* should not happen */ + if (smallest_idx == -1) { + RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__); + return -1; + } + + /* swap the 2 entries in the table */ + memcpy(&tmp, &hugepg_tbl[smallest_idx], sizeof(struct hugepage)); + memcpy(&hugepg_tbl[smallest_idx], &hugepg_tbl[i], + sizeof(struct hugepage)); + memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage)); + } + return 0; +} + +/* + * Uses mmap to create a shared memory area for storage of data + *Used in this file to store the hugepage file map on disk + */ +static void * +create_shared_memory(const char *filename, const size_t mem_size) +{ + void *retval; + int fd = open(filename, O_CREAT | O_RDWR, 0666); + if (fd < 0) + return NULL; + if (ftruncate(fd, mem_size) < 0) { + close(fd); + return NULL; + } + retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + close(fd); + return retval; +} + +/* + * This function takes in the list of hugepage sizes and the + * number of pages thereof, and calculates the best number of + * pages of each size to fulfill the request for ram + */ +static int +calc_num_pages(uint64_t memory, + struct hugepage_info *hp_info, + struct hugepage_info *hp_used, + unsigned num_hp_info) +{ + unsigned i = 0; + int total_num_pages = 0; + if (num_hp_info == 0) + return -1; + + for (i = 0; i < num_hp_info; i++){ + hp_used[i].hugepage_sz = hp_info[i].hugepage_sz; + hp_used[i].hugedir = hp_info[i].hugedir; + hp_used[i].num_pages = RTE_MIN(memory / hp_info[i].hugepage_sz, + hp_info[i].num_pages); + + memory -= hp_used[i].num_pages * hp_used[i].hugepage_sz; + total_num_pages += hp_used[i].num_pages; + + /* check if we have met all memory requests */ + if (memory == 0) + break; + /* check if we have any more pages left at this size, if so + * move on to next size */ + if (hp_used[i].num_pages == hp_info[i].num_pages) + continue; + /* At this point we know that there are more pages available that are + * bigger than the memory we want, so lets see if we can get enough + * from other page sizes. + */ + unsigned j; + uint64_t remaining_mem = 0; + for (j = i+1; j < num_hp_info; j++) + remaining_mem += hp_info[j].hugepage_sz * hp_info[j].num_pages; + + /* is there enough other memory, if not allocate another page and quit*/ + if (remaining_mem < memory){ + memory -= hp_info[i].hugepage_sz; + hp_used[i].num_pages++; + total_num_pages++; + break; /* we are done */ + } + } + return total_num_pages; +} + +/* + * Prepare physical memory mapping: fill configuration structure with + * these infos, return 0 on success. + * 1. map N huge pages in separate files in hugetlbfs + * 2. find associated physical addr + * 3. find associated NUMA socket ID + * 4. sort all huge pages by physical address + * 5. remap these N huge pages in the correct order + * 6. unmap the first mapping + * 7. fill memsegs in configuration with contiguous zones + */ +static int +rte_eal_hugepage_init(void) +{ + struct rte_mem_config *mcfg; + struct hugepage *hugepage; + struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES]; + int i, j, new_memseg; + int nrpages; + void *addr; + + memset(used_hp, 0, sizeof(used_hp)); + + /* get pointer to global configuration */ + mcfg = rte_eal_get_configuration()->mem_config; + + /* for debug purposes, hugetlbfs can be disabled */ + if (internal_config.no_hugetlbfs) { + addr = malloc(internal_config.memory); + mcfg->memseg[0].phys_addr = (unsigned long)addr; + mcfg->memseg[0].addr = addr; + mcfg->memseg[0].len = internal_config.memory; + mcfg->memseg[0].socket_id = 0; + return 0; + } + + nrpages = calc_num_pages(internal_config.memory, + &internal_config.hugepage_info[0], &used_hp[0], + internal_config.num_hugepage_sizes); + for (i = 0; i < (int)internal_config.num_hugepage_sizes; i++) + RTE_LOG(INFO, EAL, "Requesting %u pages of size %"PRIu64"\n", + used_hp[i].num_pages, used_hp[i].hugepage_sz); + + hugepage = create_shared_memory(eal_hugepage_info_path(), + nrpages * sizeof(struct hugepage)); + if (hugepage == NULL) + return -1; + memset(hugepage, 0, nrpages * sizeof(struct hugepage)); + + unsigned hp_offset = 0; /* where we start the current page size entries */ + for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){ + struct hugepage_info *hpi = &used_hp[i]; + if (hpi->num_pages == 0) + continue; + + if (map_all_hugepages(&hugepage[hp_offset], hpi, 1) < 0){ + RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n", + (unsigned)(hpi->hugepage_sz / 0x100000)); + goto fail; + } + + if (find_physaddr(&hugepage[hp_offset], hpi) < 0){ + RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n", + (unsigned)(hpi->hugepage_sz / 0x100000)); + goto fail; + } + + if (find_numasocket(&hugepage[hp_offset], hpi) < 0){ + RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n", + (unsigned)(hpi->hugepage_sz / 0x100000)); + goto fail; + } + + if (sort_by_physaddr(&hugepage[hp_offset], hpi) < 0) + goto fail; + + if (map_all_hugepages(&hugepage[hp_offset], hpi, 0) < 0){ + RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n", + (unsigned)(hpi->hugepage_sz / 0x100000)); + goto fail; + } + + if (unmap_all_hugepages_orig(&hugepage[hp_offset], hpi) < 0) + goto fail; + + /* we have processed a num of hugepages of this size, so inc offset */ + hp_offset += hpi->num_pages; + } + + memset(mcfg->memseg, 0, sizeof(mcfg->memseg)); + j = -1; + for (i = 0; i < nrpages; i++) { + new_memseg = 0; + + /* if this is a new section, create a new memseg */ + if (i == 0) + new_memseg = 1; + else if (hugepage[i].socket_id != hugepage[i-1].socket_id) + new_memseg = 1; + else if (hugepage[i].size != hugepage[i-1].size) + new_memseg = 1; + else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) != + hugepage[i].size) + new_memseg = 1; + else if (((unsigned long)hugepage[i].final_va - + (unsigned long)hugepage[i-1].final_va) != hugepage[i].size) + new_memseg = 1; + + if (new_memseg) { + j += 1; + if (j == RTE_MAX_MEMSEG) + break; + + mcfg->memseg[j].phys_addr = hugepage[i].physaddr; + mcfg->memseg[j].addr = hugepage[i].final_va; + mcfg->memseg[j].len = hugepage[i].size; + mcfg->memseg[j].socket_id = hugepage[i].socket_id; + mcfg->memseg[j].hugepage_sz = hugepage[i].size; + } + /* continuation of previous memseg */ + else { + mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz; + } + hugepage[i].memseg_id = j; + } + + return 0; + + + fail: + return -1; +} + +/* + * uses fstat to report the size of a file on disk + */ +static off_t +getFileSize(int fd) +{ + struct stat st; + if (fstat(fd, &st) < 0) + return 0; + return st.st_size; +} + +/* + * This creates the memory mappings in the secondary process to match that of + * the server process. It goes through each memory segment in the DPDK runtime + * configuration and finds the hugepages which form that segment, mapping them + * in order to form a contiguous block in the virtual memory space + */ +static int +rte_eal_hugepage_attach(void) +{ + const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config; + const struct hugepage *hp = NULL; + unsigned num_hp = 0; + unsigned i, s = 0; /* s used to track the segment number */ + off_t size; + int fd, fd_zero = -1, fd_hugepage = -1; + + if (aslr_enabled() > 0) { + RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization " + "(ASLR) is enabled in the kernel.\n"); + RTE_LOG(WARNING, EAL, " This may cause issues with mapping memory " + "into secondary processes\n"); + } + + fd_zero = open("/dev/zero", O_RDONLY); + if (fd_zero < 0) { + RTE_LOG(ERR, EAL, "Could not open /dev/zero\n"); + goto error; + } + fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY); + if (fd_hugepage < 0) { + RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path()); + goto error; + } + + size = getFileSize(fd_hugepage); + hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0); + if (hp == NULL) { + RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path()); + goto error; + } + + num_hp = size / sizeof(struct hugepage); + RTE_LOG(DEBUG, EAL, "Analysing %u hugepages\n", num_hp); + + while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){ + void *addr, *base_addr; + uintptr_t offset = 0; + + /* fdzero is mmapped to get a contiguous block of virtual addresses + * get a block of free memory of the appropriate size - + * use mmap to attempt to get an identical address as server. + */ + base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len, + PROT_READ, MAP_PRIVATE, fd_zero, 0); + if (base_addr == MAP_FAILED || base_addr != mcfg->memseg[s].addr) { + RTE_LOG(ERR, EAL, "Could not mmap %llu bytes " + "in /dev/zero to requested address [%p]\n", + (unsigned long long)mcfg->memseg[s].len, + mcfg->memseg[s].addr); + if (aslr_enabled() > 0) + RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel " + "and retry running both primary and secondary processes\n"); + goto error; + } + /* free memory so we can map the hugepages into the space */ + munmap(base_addr, mcfg->memseg[s].len); + + /* find the hugepages for this segment and map them + * we don't need to worry about order, as the server sorted the + * entries before it did the second mmap of them */ + for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){ + if (hp[i].memseg_id == (int)s){ + fd = open(hp[i].filepath, O_RDWR); + if (fd < 0) { + RTE_LOG(ERR, EAL, "Could not open %s\n", + hp[i].filepath); + goto error; + } + addr = mmap(RTE_PTR_ADD(base_addr, offset), + hp[i].size, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, fd, 0); + close(fd); /* close file both on success and on failure */ + if (addr == MAP_FAILED) { + RTE_LOG(ERR, EAL, "Could not mmap %s\n", + hp[i].filepath); + goto error; + } + offset+=hp[i].size; + } + } + RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s, + (unsigned long long)mcfg->memseg[s].len); + s++; + } + close(fd_zero); + close(fd_hugepage); + return 0; + +error: + if (fd_zero >= 0) + close(fd_zero); + if (fd_hugepage >= 0) + close(fd_hugepage); + return -1; +} + +static int +rte_eal_memdevice_init(void) +{ + struct rte_config *config; + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return 0; + + config = rte_eal_get_configuration(); + config->mem_config->nchannel = internal_config.force_nchannel; + config->mem_config->nrank = internal_config.force_nrank; + + return 0; +} + + +/* init memory subsystem */ +int +rte_eal_memory_init(void) +{ + const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ? + rte_eal_hugepage_init() : + rte_eal_hugepage_attach(); + if (retval < 0) + return -1; + + if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0) + return -1; + + return 0; +} diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c new file mode 100644 index 0000000000..78687d67f2 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_pci.c @@ -0,0 +1,770 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_internal_cfg.h" +#include "eal_private.h" + +/** + * @file + * PCI probing under linux + * + * This code is used to simulate a PCI probe by parsing information in + * sysfs. Moreover, when a registered driver matches a device, the + * kernel driver currently using it is unloaded and replaced by + * igb_uio module, which is a very minimal userland driver for Intel + * network card, only providing access to PCI BAR to applications, and + * enabling bus master. + */ + + +#define PROC_MODULES "/proc/modules" + +#define IGB_UIO_NAME "igb_uio" + +#define UIO_NEWID "/sys/bus/pci/drivers/%s/new_id" +#define UIO_BIND "/sys/bus/pci/drivers/%s/bind" + +/* maximum time to wait that /dev/uioX appears */ +#define UIO_DEV_WAIT_TIMEOUT 3 /* seconds */ + +/* + * For multi-process we need to reproduce all PCI mappings in secondary + * processes, so save them in a tailq. + */ +struct uio_resource { + TAILQ_ENTRY(uio_resource) next; + + struct rte_pci_addr pci_addr; + void *addr; + char path[PATH_MAX]; + unsigned long size; + unsigned long offset; +}; + +TAILQ_HEAD(uio_res_list, uio_resource); + +static struct uio_res_list *uio_res_list = NULL; +static int pci_parse_sysfs_value(const char *filename, unsigned long *val); + +/* + * Check that a kernel module is loaded. Returns 0 on success, or if the + * parameter is NULL, or -1 if the module is not loaded. + */ +static int +pci_uio_check_module(const char *module_name) +{ + FILE *f; + unsigned i; + char buf[BUFSIZ]; + + if (module_name == NULL) + return 0; + + f = fopen(PROC_MODULES, "r"); + if (f == NULL) { + RTE_LOG(ERR, EAL, "Cannot open "PROC_MODULES"\n"); + return -1; + } + + while(fgets(buf, sizeof(buf), f) != NULL) { + + for (i = 0; i < sizeof(buf) && buf[i] != '\0'; i++) { + if (isspace(buf[i])) + buf[i] = '\0'; + } + + if (strncmp(buf, module_name, sizeof(buf)) == 0) { + fclose(f); + return 0; + } + } + fclose(f); + RTE_LOG(ERR, EAL, "Cannot find %s in "PROC_MODULES"\n", module_name); + return -1; +} + +/* bind a PCI to the kernel module driver */ +static int +pci_uio_bind_device(struct rte_pci_device *dev, const char *module_name) +{ + FILE *f; + int n; + char buf[BUFSIZ]; + char uio_newid[PATH_MAX]; + char uio_bind[PATH_MAX]; + struct rte_pci_addr *loc = &dev->addr; + + RTE_LOG(DEBUG, EAL, "bind PCI device "PCI_PRI_FMT" to %s driver\n", + loc->domain, loc->bus, loc->devid, loc->function, module_name); + + n = rte_snprintf(uio_newid, sizeof(uio_newid), UIO_NEWID, module_name); + if ((n < 0) || (n >= (int)sizeof(uio_newid))) { + RTE_LOG(ERR, EAL, "Cannot rte_snprintf uio_newid name\n"); + return -1; + } + n = rte_snprintf(uio_bind, sizeof(uio_bind), UIO_BIND, module_name); + if ((n < 0) || (n >= (int)sizeof(uio_bind))) { + RTE_LOG(ERR, EAL, "Cannot rte_snprintf uio_bind name\n"); + return -1; + } + + n = rte_snprintf(buf, sizeof(buf), "%x %x\n", + dev->id.vendor_id, dev->id.device_id); + if ((n < 0) || (n >= (int)sizeof(buf))) { + RTE_LOG(ERR, EAL, "Cannot rte_snprintf vendor_id/device_id\n"); + return -1; + } + + f = fopen(uio_newid, "w"); + if (f == NULL) { + RTE_LOG(ERR, EAL, "Cannot open %s\n", uio_newid); + return -1; + } + if (fwrite(buf, n, 1, f) == 0) { + fclose(f); + return -1; + } + fclose(f); + + f = fopen(uio_bind, "w"); + if (f == NULL) { + RTE_LOG(ERR, EAL, "Cannot open %s\n", uio_bind); + return -1; + } + n = rte_snprintf(buf, sizeof(buf), PCI_PRI_FMT "\n", + loc->domain, loc->bus, loc->devid, loc->function); + if ((n < 0) || (n >= (int)sizeof(buf))) { + RTE_LOG(ERR, EAL, "Cannot rte_snprintf PCI infos\n"); + fclose(f); + return -1; + } + if (fwrite(buf, n, 1, f) == 0) { + fclose(f); + return -1; + } + + RTE_LOG(DEBUG, EAL, "Device bound\n"); + + fclose(f); + return 0; +} + +/* map a particular resource from a file */ +static void * +pci_map_resource(struct rte_pci_device *dev, void *requested_addr, const char *devname, + unsigned long offset, unsigned long size) +{ + unsigned n; + int fd; + void *mapaddr; + + /* + * open devname, and mmap it: it can take some time to + * appear, so we wait some time before returning an error + */ + for (n=0; n= 0) + break; + if (errno != ENOENT) + break; + usleep(100000); + } + if (fd < 0) { + RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", devname, strerror(errno)); + goto fail; + } + + /* Map the PCI memory resource of device */ + mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE, + MAP_SHARED, fd, offset); + if (mapaddr == MAP_FAILED || + (requested_addr != NULL && mapaddr != requested_addr)) { + RTE_LOG(ERR, EAL, "%s(): cannot mmap %s: %s\n", __func__, + devname, strerror(errno)); + close(fd); + goto fail; + } + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* save fd if in primary process */ + dev->intr_handle.fd = fd; + dev->intr_handle.type = RTE_INTR_HANDLE_UIO; + } else { + /* fd is not needed in slave process, close it */ + dev->intr_handle.fd = -1; + dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; + close(fd); + } + + RTE_LOG(DEBUG, EAL, "PCI memory mapped at %p\n", mapaddr); + + return mapaddr; + +fail: + dev->intr_handle.fd = -1; + dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; + + return NULL; +} +/* map the PCI resource of a PCI device in virtual memory */ +static int +pci_uio_map_resource(struct rte_pci_device *dev) +{ + struct dirent *e; + DIR *dir; + char dirname[PATH_MAX]; + char dirname2[PATH_MAX]; + char filename[PATH_MAX]; + char devname[PATH_MAX]; /* contains the /dev/uioX */ + void *mapaddr; + unsigned uio_num; + unsigned long size, offset; + struct rte_pci_addr *loc = &dev->addr; + struct uio_resource *uio_res; + + RTE_LOG(DEBUG, EAL, "map PCI resource for device "PCI_PRI_FMT"\n", + loc->domain, loc->bus, loc->devid, loc->function); + + /* secondary processes - use already recorded details */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + + TAILQ_FOREACH(uio_res, uio_res_list, next) { + /* skip this element if it doesn't match our PCI address */ + if (memcmp(&uio_res->pci_addr, &dev->addr, sizeof(dev->addr))) + continue; + + if (pci_map_resource(dev, uio_res->addr, uio_res->path, \ + uio_res->offset, uio_res->size) == uio_res->addr) + return 0; + else { + RTE_LOG(ERR, EAL, "Cannot mmap device resource\n"); + return -1; + } + } + RTE_LOG(ERR, EAL, "Cannot find resource for device\n"); + return -1; + } + + /* depending on kernel version, uio can be located in uio/uioX + * or uio:uioX */ + + rte_snprintf(dirname, sizeof(dirname), + "/sys/bus/pci/devices/" PCI_PRI_FMT "/uio", + loc->domain, loc->bus, loc->devid, loc->function); + + dir = opendir(dirname); + if (dir == NULL) { + /* retry with the parent directory */ + rte_snprintf(dirname, sizeof(dirname), + "/sys/bus/pci/devices/" PCI_PRI_FMT, + loc->domain, loc->bus, loc->devid, loc->function); + dir = opendir(dirname); + + if (dir == NULL) { + RTE_LOG(ERR, EAL, "Cannot opendir %s\n", dirname); + return -1; + } + } + + /* take the first file starting with "uio" */ + while ((e = readdir(dir)) != NULL) { + int shortprefix_len = sizeof("uio") - 1; /* format could be uio%d ...*/ + int longprefix_len = sizeof("uio:uio") - 1; /* ... or uio:uio%d */ + char *endptr; + + if (strncmp(e->d_name, "uio", 3) != 0) + continue; + + /* first try uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10); + if (errno == 0 && endptr != e->d_name) { + rte_snprintf(dirname2, sizeof(dirname2), + "%s/uio%u", dirname, uio_num); + break; + } + + /* then try uio:uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10); + if (errno == 0 && endptr != e->d_name) { + rte_snprintf(dirname2, sizeof(dirname2), + "%s/uio:uio%u", dirname, uio_num); + break; + } + } + closedir(dir); + + /* No uio resource found */ + if (e == NULL) + return 0; + + /* get mapping offset */ + rte_snprintf(filename, sizeof(filename), + "%s/maps/map0/offset", dirname2); + if (pci_parse_sysfs_value(filename, &offset) < 0) { + RTE_LOG(ERR, EAL, "%s(): cannot parse offset\n", + __func__); + return -1; + } + + /* get mapping size */ + rte_snprintf(filename, sizeof(filename), + "%s/maps/map0/size", dirname2); + if (pci_parse_sysfs_value(filename, &size) < 0) { + RTE_LOG(ERR, EAL, "%s(): cannot parse size\n", + __func__); + return -1; + } + + /* open and mmap /dev/uioX */ + rte_snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num); + mapaddr = pci_map_resource(dev, NULL, devname, offset, size); + if (mapaddr == NULL) + return -1; + dev->mem_resource.addr = mapaddr; + + /* save the mapping details for secondary processes*/ + uio_res = rte_malloc("UIO_RES", sizeof(*uio_res), 0); + if (uio_res == NULL){ + RTE_LOG(ERR, EAL, "%s(): cannot store uio mmap details\n", __func__); + return -1; + } + uio_res->addr = mapaddr; + uio_res->offset = offset; + uio_res->size = size; + rte_snprintf(uio_res->path, sizeof(uio_res->path), "%s", devname); + memcpy(&uio_res->pci_addr, &dev->addr, sizeof(uio_res->pci_addr)); + + TAILQ_INSERT_TAIL(uio_res_list, uio_res, next); + + return 0; +} + +/* parse the "resource" sysfs file */ +#define IORESOURCE_MEM 0x00000200 + +static int +pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) +{ + FILE *f; + char buf[BUFSIZ]; + union pci_resource_info { + struct { + char *phys_addr; + char *end_addr; + char *flags; + }; + char *ptrs[PCI_RESOURCE_FMT_NVAL]; + } res_info; + int i; + uint64_t phys_addr, end_addr, flags; + + f = fopen(filename, "r"); + if (f == NULL) { + RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n"); + return -1; + } + + for (i = 0; imem_resource.phys_addr = phys_addr; + dev->mem_resource.len = end_addr - phys_addr + 1; + dev->mem_resource.addr = NULL; /* not mapped for now */ + break; + } + } + fclose(f); + return 0; + +error: + fclose(f); + return -1; +} + +/* parse a sysfs file containing one integer value */ +static int +pci_parse_sysfs_value(const char *filename, unsigned long *val) +{ + FILE *f; + char buf[BUFSIZ]; + char *end = NULL; + + f = fopen(filename, "r"); + if (f == NULL) { + RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n", + __func__, filename); + return -1; + } + + if (fgets(buf, sizeof(buf), f) == NULL) { + RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n", + __func__, filename); + fclose(f); + return -1; + } + *val = strtoul(buf, &end, 0); + if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) { + RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n", + __func__, filename); + fclose(f); + return -1; + } + fclose(f); + return 0; +} + +/* Scan one pci sysfs entry, and fill the devices list from it. */ +static int +pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus, + uint8_t devid, uint8_t function) +{ + char filename[PATH_MAX]; + unsigned long tmp; + struct rte_pci_device *dev; + + dev = malloc(sizeof(*dev)); + if (dev == NULL) { + return -1; + } + + memset(dev, 0, sizeof(*dev)); + dev->addr.domain = domain; + dev->addr.bus = bus; + dev->addr.devid = devid; + dev->addr.function = function; + + /* get vendor id */ + rte_snprintf(filename, sizeof(filename), "%s/vendor", dirname); + if (pci_parse_sysfs_value(filename, &tmp) < 0) { + free(dev); + return -1; + } + dev->id.vendor_id = (uint16_t)tmp; + + /* get device id */ + rte_snprintf(filename, sizeof(filename), "%s/device", dirname); + if (pci_parse_sysfs_value(filename, &tmp) < 0) { + free(dev); + return -1; + } + dev->id.device_id = (uint16_t)tmp; + + /* get subsystem_vendor id */ + rte_snprintf(filename, sizeof(filename), "%s/subsystem_vendor", + dirname); + if (pci_parse_sysfs_value(filename, &tmp) < 0) { + free(dev); + return -1; + } + dev->id.subsystem_vendor_id = (uint16_t)tmp; + + /* get subsystem_device id */ + rte_snprintf(filename, sizeof(filename), "%s/subsystem_device", + dirname); + if (pci_parse_sysfs_value(filename, &tmp) < 0) { + free(dev); + return -1; + } + dev->id.subsystem_device_id = (uint16_t)tmp; + + /* parse resources */ + rte_snprintf(filename, sizeof(filename), "%s/resource", dirname); + if (pci_parse_sysfs_resource(filename, dev) < 0) { + RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__); + free(dev); + return -1; + } + + /* device is valid, add in list */ + TAILQ_INSERT_TAIL(&device_list, dev, next); + + return 0; +} + +/* + * split up a pci address into its constituent parts. + */ +static int +parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, + uint8_t *bus, uint8_t *devid, uint8_t *function) +{ + /* first split on ':' */ + union splitaddr { + struct { + char *domain; + char *bus; + char *devid; + char *function; + }; + char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */ + } splitaddr; + + char *buf_copy = strndup(buf, bufsize); + if (buf_copy == NULL) + return -1; + + if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') + != PCI_FMT_NVAL - 1) + goto error; + /* final split is on '.' between devid and function */ + splitaddr.function = strchr(splitaddr.devid,'.'); + if (splitaddr.function == NULL) + goto error; + *splitaddr.function++ = '\0'; + + /* now convert to int values */ + errno = 0; + *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16); + *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16); + *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16); + *function = (uint8_t)strtoul(splitaddr.function, NULL, 10); + if (errno != 0) + goto error; + + free(buf_copy); /* free the copy made with strdup */ + return 0; +error: + free(buf_copy); + return -1; +} + +/* + * Scan the content of the PCI bus, and the devices in the devices + * list + */ +static int +pci_scan(void) +{ + struct dirent *e; + DIR *dir; + char dirname[PATH_MAX]; + uint16_t domain; + uint8_t bus, devid, function; + + dir = opendir(SYSFS_PCI_DEVICES); + if (dir == NULL) { + RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n", + __func__, strerror(errno)); + return -1; + } + + while ((e = readdir(dir)) != NULL) { + if (e->d_name[0] == '.') + continue; + + if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &domain, + &bus, &devid, &function) != 0) + continue; + + rte_snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES, + e->d_name); + if (pci_scan_one(dirname, domain, bus, devid, function) < 0) + goto error; + } + closedir(dir); + return 0; + +error: + closedir(dir); + return -1; +} + +/* unbind kernel driver for this device */ +static int +pci_unbind_kernel_driver(struct rte_pci_device *dev) +{ + int n; + FILE *f; + char filename[PATH_MAX]; + char buf[BUFSIZ]; + struct rte_pci_addr *loc = &dev->addr; + + /* open /sys/bus/pci/devices/AAAA:BB:CC.D/driver */ + rte_snprintf(filename, sizeof(filename), + SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/driver/unbind", + loc->domain, loc->bus, loc->devid, loc->function); + + RTE_LOG(DEBUG, EAL, "unbind kernel driver %s\n", filename); + + f = fopen(filename, "w"); + if (f == NULL) /* device was not bound */ + return 0; + + n = rte_snprintf(buf, sizeof(buf), PCI_PRI_FMT "\n", + loc->domain, loc->bus, loc->devid, loc->function); + if ((n < 0) || (n >= (int)sizeof(buf))) { + RTE_LOG(ERR, EAL, "%s(): rte_snprintf failed\n", __func__); + goto error; + } + if (fwrite(buf, n, 1, f) == 0) + goto error; + + fclose(f); + return 0; + +error: + fclose(f); + return -1; +} + +/* + * If vendor/device ID match, call the devinit() function of the + * driver. + */ +int +rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *dev) +{ + struct rte_pci_id *id_table; + const char *module_name = NULL; + int ret; + + if (dr->drv_flags & RTE_PCI_DRV_NEED_IGB_UIO) + module_name = IGB_UIO_NAME; + + ret = pci_uio_check_module(module_name); + if (ret != 0) + rte_exit(1, "The %s module is required by the %s driver\n", + module_name, dr->name); + + for (id_table = dr->id_table ; id_table->vendor_id != 0; id_table++) { + + /* check if device's identifiers match the driver's ones */ + if (id_table->vendor_id != dev->id.vendor_id && + id_table->vendor_id != PCI_ANY_ID) + continue; + if (id_table->device_id != dev->id.device_id && + id_table->device_id != PCI_ANY_ID) + continue; + if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id && + id_table->subsystem_vendor_id != PCI_ANY_ID) + continue; + if (id_table->subsystem_device_id != dev->id.subsystem_device_id && + id_table->subsystem_device_id != PCI_ANY_ID) + continue; + + RTE_LOG(DEBUG, EAL, "probe driver: %x:%x %s\n", + dev->id.vendor_id, dev->id.device_id, dr->name); + + /* Unbind PCI devices if needed */ + if (module_name != NULL) { + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + /* unbind current driver, bind ours */ + if (pci_unbind_kernel_driver(dev) < 0) + return -1; + if (pci_uio_bind_device(dev, module_name) < 0) + return -1; + } + /* map the NIC resources */ + if (pci_uio_map_resource(dev) < 0) + return -1; + } + /* call the driver devinit() function */ + return dr->devinit(dr, dev); + + } + return -1; +} + +/* Init the PCI EAL subsystem */ +int +rte_eal_pci_init(void) +{ + TAILQ_INIT(&driver_list); + TAILQ_INIT(&device_list); + uio_res_list = RTE_TAILQ_RESERVE("PCI_RESOURCE_LIST", uio_res_list); + + /* for debug purposes, PCI can be disabled */ + if (internal_config.no_pci) + return 0; + + if (pci_scan() < 0) { + RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__); + return -1; + } + return 0; +} diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c new file mode 100644 index 0000000000..7409d28436 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/eal_thread.c @@ -0,0 +1,237 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "eal_private.h" +#include "eal_thread.h" + +RTE_DEFINE_PER_LCORE(unsigned, _lcore_id); + +/* + * Send a message to a slave lcore identified by slave_id to call a + * function f with argument arg. Once the execution is done, the + * remote lcore switch in FINISHED state. + */ +int +rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id) +{ + int n; + char c = 0; + int m2s = lcore_config[slave_id].pipe_master2slave[1]; + int s2m = lcore_config[slave_id].pipe_slave2master[0]; + + if (lcore_config[slave_id].state != WAIT) + return -EBUSY; + + lcore_config[slave_id].f = f; + lcore_config[slave_id].arg = arg; + + /* send message */ + n = 0; + while (n == 0 || (n < 0 && errno == EINTR)) + n = write(m2s, &c, 1); + if (n < 0) + rte_panic("cannot write on configuration pipe\n"); + + /* wait ack */ + n = 0; + do { + n = read(s2m, &c, 1); + } while (n < 0 && errno == EINTR); + + if (n <= 0) + rte_panic("cannot read on configuration pipe\n"); + + return 0; +} + +/* set affinity for current thread */ +static int +eal_thread_set_affinity(void) +{ + int s; + pthread_t thread; + +/* + * According to the section VERSIONS of the CPU_ALLOC man page: + * + * The CPU_ZERO(), CPU_SET(), CPU_CLR(), and CPU_ISSET() macros were added + * in glibc 2.3.3. + * + * CPU_COUNT() first appeared in glibc 2.6. + * + * CPU_AND(), CPU_OR(), CPU_XOR(), CPU_EQUAL(), CPU_ALLOC(), + * CPU_ALLOC_SIZE(), CPU_FREE(), CPU_ZERO_S(), CPU_SET_S(), CPU_CLR_S(), + * CPU_ISSET_S(), CPU_AND_S(), CPU_OR_S(), CPU_XOR_S(), and CPU_EQUAL_S() + * first appeared in glibc 2.7. + */ +#if defined(CPU_ALLOC) + size_t size; + cpu_set_t *cpusetp; + + cpusetp = CPU_ALLOC(RTE_MAX_LCORE); + if (cpusetp == NULL) { + RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n"); + return -1; + } + + size = CPU_ALLOC_SIZE(RTE_MAX_LCORE); + CPU_ZERO_S(size, cpusetp); + CPU_SET_S(rte_lcore_id(), size, cpusetp); + + thread = pthread_self(); + s = pthread_setaffinity_np(thread, size, cpusetp); + if (s != 0) { + RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); + CPU_FREE(cpusetp); + return -1; + } + + CPU_FREE(cpusetp); +#else /* CPU_ALLOC */ + cpu_set_t cpuset; + CPU_ZERO( &cpuset ); + CPU_SET( rte_lcore_id(), &cpuset ); + + thread = pthread_self(); + s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset); + if (s != 0) { + RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n"); + return -1; + } +#endif + return 0; +} + +void eal_thread_init_master(unsigned lcore_id) +{ + /* set the lcore ID in per-lcore memory area */ + RTE_PER_LCORE(_lcore_id) = lcore_id; + + /* set CPU affinity */ + if (eal_thread_set_affinity() < 0) + rte_panic("cannot set affinity\n"); +} + +/* main loop of threads */ +__attribute__((noreturn)) void * +eal_thread_loop(__attribute__((unused)) void *arg) +{ + char c; + int n, ret; + unsigned lcore_id; + pthread_t thread_id; + int m2s, s2m; + + thread_id = pthread_self(); + + /* retrieve our lcore_id from the configuration structure */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (thread_id == lcore_config[lcore_id].thread_id) + break; + } + if (lcore_id == RTE_MAX_LCORE) + rte_panic("cannot retrieve lcore id\n"); + + RTE_LOG(DEBUG, EAL, "Core %u is ready (tid=%x)\n", + lcore_id, (int)thread_id); + + m2s = lcore_config[lcore_id].pipe_master2slave[0]; + s2m = lcore_config[lcore_id].pipe_slave2master[1]; + + /* set the lcore ID in per-lcore memory area */ + RTE_PER_LCORE(_lcore_id) = lcore_id; + + /* set CPU affinity */ + if (eal_thread_set_affinity() < 0) + rte_panic("cannot set affinity\n"); + + /* read on our pipe to get commands */ + while (1) { + void *fct_arg; + + /* wait command */ + n = 0; + do { + n = read(m2s, &c, 1); + } while (n < 0 && errno == EINTR); + + if (n <= 0) + rte_panic("cannot read on configuration pipe\n"); + + lcore_config[lcore_id].state = RUNNING; + + /* send ack */ + n = 0; + while (n == 0 || (n < 0 && errno == EINTR)) + n = write(s2m, &c, 1); + if (n < 0) + rte_panic("cannot write on configuration pipe\n"); + + if (lcore_config[lcore_id].f == NULL) + rte_panic("NULL function pointer\n"); + + /* call the function and store the return value */ + fct_arg = lcore_config[lcore_id].arg; + ret = lcore_config[lcore_id].f(fct_arg); + lcore_config[lcore_id].ret = ret; + rte_wmb(); + lcore_config[lcore_id].state = FINISHED; + } + + /* never reached */ + /* pthread_exit(NULL); */ + /* return NULL; */ +} diff --git a/lib/librte_eal/linuxapp/eal/include/eal_fs_paths.h b/lib/librte_eal/linuxapp/eal/include/eal_fs_paths.h new file mode 100644 index 0000000000..9c5ffb27bf --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/include/eal_fs_paths.h @@ -0,0 +1,96 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/** + * @file + * Paths used for storing hugepage and config info for multi-process support. + */ + +#ifndef _EAL_LINUXAPP_FS_PATHS_H +#define _EAL_LINUXAPP_FS_PATHS_H + +/** Path of rte config file. */ +#define RUNTIME_CONFIG_FMT "%s/.%s_config" + +static const char *default_config_dir = "/var/run"; + +static inline const char * +eal_runtime_config_path(void) +{ + static char buffer[PATH_MAX]; /* static so auto-zeroed */ + const char *directory = default_config_dir; + const char *home_dir = getenv("HOME"); + + if (getuid() != 0 && home_dir != NULL) + directory = home_dir; + rte_snprintf(buffer, sizeof(buffer) - 1, RUNTIME_CONFIG_FMT, directory, + internal_config.hugefile_prefix); + return buffer; +} + +/** Path of hugepage info file. */ +#define HUGEPAGE_INFO_FMT "%s/.%s_hugepage_info" + +static inline const char * +eal_hugepage_info_path(void) +{ + static char buffer[PATH_MAX]; /* static so auto-zeroed */ + const char *directory = default_config_dir; + const char *home_dir = getenv("HOME"); + + if (getuid() != 0 && home_dir != NULL) + directory = home_dir; + rte_snprintf(buffer, sizeof(buffer) - 1, HUGEPAGE_INFO_FMT, directory, + internal_config.hugefile_prefix); + return buffer; +} + +/** String format for hugepage map files. */ +#define HUGEFILE_FMT "%s/%smap_%d" + +static inline const char * +eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id) +{ + rte_snprintf(buffer, buflen, HUGEFILE_FMT, hugedir, + internal_config.hugefile_prefix, f_id); + buffer[buflen - 1] = '\0'; + return buffer; +} + +/** define the default filename prefix for the %s values above */ +#define HUGEFILE_PREFIX_DEFAULT "rte" + + +#endif /* _EAL_LINUXAPP_FS_PATHS_H */ diff --git a/lib/librte_eal/linuxapp/eal/include/eal_hugepages.h b/lib/librte_eal/linuxapp/eal/include/eal_hugepages.h new file mode 100644 index 0000000000..2c7d646c81 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/include/eal_hugepages.h @@ -0,0 +1,62 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef RTE_LINUXAPP_HUGEPAGES_H_ +#define RTE_LINUXAPP_HUGEPAGES_H_ + +#define MAX_HUGEPAGE_PATH PATH_MAX + +/** + * Structure used to store informations about hugepages that we mapped + * through the files in hugetlbfs. + */ +struct hugepage { + void *orig_va; /**< virtual addr of first mmap() */ + void *final_va; /**< virtual addr of 2nd mmap() */ + uint64_t physaddr; /**< physical addr */ + uint64_t size; /**< the page size */ + int socket_id; /**< NUMA socket ID */ + int file_id; /**< the '%d' in HUGEFILE_FMT */ + int memseg_id; /**< the memory segment to which page belongs */ + char filepath[MAX_HUGEPAGE_PATH]; /**< Path to backing file on filesystem */ +}; + +/** + * Read the information from linux on what hugepages are available + * for the EAL to use + */ +int eal_hugepage_info_init(void); + +#endif /* EAL_HUGEPAGES_H_ */ diff --git a/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h b/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h new file mode 100644 index 0000000000..70d5afb273 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h @@ -0,0 +1,76 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/** + * @file + * Holds the structures for the eal internal configuration + */ + +#ifndef _EAL_LINUXAPP_INTERNAL_CFG +#define _EAL_LINUXAPP_INTERNAL_CFG + +#define MAX_HUGEPAGE_SIZES 3 /**< support up to 3 page sizes */ + +/* + * internal configuration structure for the number, size and + * mount points of hugepages + */ +struct hugepage_info { + uint64_t hugepage_sz; /**< size of a huge page */ + const char *hugedir; /**< dir where hugetlbfs is mounted */ + uint32_t num_pages; /**< number of hugepages of that size */ +}; + +/** + * internal configuration + */ +struct internal_config { + volatile uint64_t memory; /* amount of asked memory */ + volatile unsigned force_nchannel; /* force number of channels */ + volatile unsigned force_nrank; /* force number of ranks */ + volatile unsigned no_hugetlbfs; /* true to disable hugetlbfs */ + volatile unsigned no_pci; /* true to disable PCI */ + volatile unsigned no_hpet; /* true to disable HPET */ + volatile unsigned no_shconf; /* true if there is no shared config */ + volatile enum rte_proc_type_t process_type; /* multi-process proc type */ + const char *hugefile_prefix; /* the base filename of hugetlbfs files */ + const char *hugepage_dir; /* specific hugetlbfs directory to use */ + + unsigned num_hugepage_sizes; /* how many sizes on this system */ + struct hugepage_info hugepage_info[MAX_HUGEPAGE_SIZES]; +}; +extern struct internal_config internal_config; /**< Global EAL configuration. */ + +#endif diff --git a/lib/librte_eal/linuxapp/eal/include/eal_thread.h b/lib/librte_eal/linuxapp/eal/include/eal_thread.h new file mode 100644 index 0000000000..a04a35ebe6 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/include/eal_thread.h @@ -0,0 +1,55 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _EAL_LINUXAPP_THREAD_H_ +#define _EAL_LINUXAPP_THREAD_H_ + +/** + * basic loop of thread, called for each thread by eal_init(). + * + * @param arg + * opaque pointer + */ +__attribute__((noreturn)) void *eal_thread_loop(void *arg); + +/** + * Init per-lcore info for master thread + * + * @param lcore_id + * identifier of master lcore + */ +void eal_thread_init_master(unsigned lcore_id); + +#endif /* _EAL_LINUXAPP_PRIVATE_H_ */ diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h new file mode 100644 index 0000000000..15ca209a3f --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h @@ -0,0 +1,56 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_INTERRUPTS_H_ +#error "don't include this file directly, please include generic " +#endif + +#ifndef _RTE_LINUXAPP_INTERRUPTS_H_ +#define _RTE_LINUXAPP_INTERRUPTS_H_ + +enum rte_intr_handle_type { + RTE_INTR_HANDLE_UNKNOWN = 0, + RTE_INTR_HANDLE_UIO, /**< uio device handle */ + RTE_INTR_HANDLE_ALARM, /**< alarm handle */ + RTE_INTR_HANDLE_MAX +}; + +/** Handle for interrupts. */ +struct rte_intr_handle { + int fd; /**< file descriptor */ + enum rte_intr_handle_type type; /**< handle type */ +}; + +#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */ diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_lcore.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_lcore.h new file mode 100644 index 0000000000..4f14cbba1e --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_lcore.h @@ -0,0 +1,92 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_LCORE_H_ +#error "don't include this file directly, please include generic " +#endif + +#ifndef _RTE_LINUXAPP_LCORE_H_ +#define _RTE_LINUXAPP_LCORE_H_ + +/** + * @file + * API for lcore and socket manipulation in linuxapp environment + */ + +/** + * structure storing internal configuration (per-lcore) + */ +struct lcore_config { + unsigned detected; /**< true if lcore was detected */ + pthread_t thread_id; /**< pthread identifier */ + int pipe_master2slave[2]; /**< communication pipe with master */ + int pipe_slave2master[2]; /**< communication pipe with master */ + lcore_function_t * volatile f; /**< function to call */ + void * volatile arg; /**< argument of function */ + volatile int ret; /**< return value of function */ + volatile enum rte_lcore_state_t state; /**< lcore state */ + unsigned socket_id; /**< physical socket id for this lcore */ +}; + +/** + * internal configuration (per-lcore) + */ +extern struct lcore_config lcore_config[RTE_MAX_LCORE]; + +/** + * Return the ID of the physical socket of the logical core we are + * running on. + */ +static inline unsigned +rte_socket_id(void) +{ + return lcore_config[rte_lcore_id()].socket_id; +} + +/** + * Get the ID of the physical socket of the specified lcore + * + * @param lcore_id + * the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1. + * @return + * the ID of lcoreid's physical socket + */ +static inline unsigned +rte_lcore_to_socket_id(unsigned lcore_id) +{ + return lcore_config[lcore_id].socket_id; +} + +#endif /* _RTE_LCORE_H_ */ diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_per_lcore.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_per_lcore.h new file mode 100644 index 0000000000..781cfedc09 --- /dev/null +++ b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_per_lcore.h @@ -0,0 +1,69 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_PER_LCORE_H_ +#error "don't include this file directly, please include generic " +#endif + +#ifndef _RTE_LINUXAPP_PER_LCORE_H_ +#define _RTE_LINUXAPP_PER_LCORE_H_ + +/** + * @file + * Per-lcore variables in RTE on linuxapp environment + */ + +#include + +/** + * Macro to define a per lcore variable "var" of type "type", don't + * use keywords like "static" or "volatile" in type, just prefix the + * whole macro. + */ +#define RTE_DEFINE_PER_LCORE(type, name) \ + __thread __typeof__(type) per_lcore_##name + +/** + * Macro to declare an extern per lcore variable "var" of type "type" + */ +#define RTE_DECLARE_PER_LCORE(type, name) \ + extern __thread __typeof__(type) per_lcore_##name + +/** + * Read/write the per-lcore variable value + */ +#define RTE_PER_LCORE(name) (per_lcore_##name) + +#endif /* _RTE_LINUXAPP_PER_LCORE_H_ */ diff --git a/lib/librte_eal/linuxapp/igb_uio/Makefile b/lib/librte_eal/linuxapp/igb_uio/Makefile new file mode 100644 index 0000000000..f52aa7f3b3 --- /dev/null +++ b/lib/librte_eal/linuxapp/igb_uio/Makefile @@ -0,0 +1,55 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# module name and path +# +MODULE = igb_uio +MODULE_PATH = drivers/net/igb_uio + +# +# CFLAGS +# +MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=50 +MODULE_CFLAGS += -I$(RTE_OUTPUT)/include +MODULE_CFLAGS += -Winline -Wall -Werror +MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h + +# +# all source are stored in SRCS-y +# +SRCS-y := igb_uio.c + +include $(RTE_SDK)/mk/rte.module.mk diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c new file mode 100644 index 0000000000..51733f6822 --- /dev/null +++ b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c @@ -0,0 +1,402 @@ +/*- + * + * Copyright (c) 2010-2012, Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * GNU GPL V2: http://www.gnu.org/licenses/old-licenses/gpl-2.0.html + * + */ + +#include +#include +#include +#include +#include +#include +#include + +/* Some function names changes between 3.2.0 and 3.3.0... */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +#define PCI_LOCK pci_block_user_cfg_access +#define PCI_UNLOCK pci_unblock_user_cfg_access +#else +#define PCI_LOCK pci_cfg_access_lock +#define PCI_UNLOCK pci_cfg_access_unlock +#endif + +/** + * MSI-X related macros, copy from linux/pci_regs.h in kernel 2.6.39, + * but none of them in kernel 2.6.35. + */ +#ifndef PCI_MSIX_ENTRY_SIZE +#define PCI_MSIX_ENTRY_SIZE 16 +#define PCI_MSIX_ENTRY_LOWER_ADDR 0 +#define PCI_MSIX_ENTRY_UPPER_ADDR 4 +#define PCI_MSIX_ENTRY_DATA 8 +#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 +#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1 +#endif + +#define IGBUIO_NUM_MSI_VECTORS 1 + +/* interrupt mode */ +enum igbuio_intr_mode { + IGBUIO_LEGACY_INTR_MODE = 0, + IGBUIO_MSI_INTR_MODE, + IGBUIO_MSIX_INTR_MODE, + IGBUIO_INTR_MODE_MAX +}; + +/** + * A structure describing the private information for a uio device. + */ +struct rte_uio_pci_dev { + struct uio_info info; + struct pci_dev *pdev; + spinlock_t lock; /* spinlock for accessing PCI config space or msix data in multi tasks/isr */ + enum igbuio_intr_mode mode; + struct msix_entry \ + msix_entries[IGBUIO_NUM_MSI_VECTORS]; /* pointer to the msix vectors to be allocated later */ +}; + +static const enum igbuio_intr_mode igbuio_intr_mode_preferred = IGBUIO_MSIX_INTR_MODE; + +/* PCI device id table */ +static struct pci_device_id igbuio_pci_ids[] = { +#define RTE_PCI_DEV_ID_DECL(vend, dev) {PCI_DEVICE(vend, dev)}, +#include +{ 0, }, +}; + +static inline struct rte_uio_pci_dev * +igbuio_get_uio_pci_dev(struct uio_info *info) +{ + return container_of(info, struct rte_uio_pci_dev, info); +} + +/** + * It masks the msix on/off of generating MSI-X messages. + */ +static int +igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state) +{ + uint32_t mask_bits = desc->masked; + unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + + PCI_MSIX_ENTRY_VECTOR_CTRL; + + if (state != 0) + mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; + else + mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; + + if (mask_bits != desc->masked) { + writel(mask_bits, desc->mask_base + offset); + readl(desc->mask_base); + desc->masked = mask_bits; + } + + return 0; +} + +/** + * This function sets/clears the masks for generating LSC interrupts. + * + * @param info + * The pointer to struct uio_info. + * @param on + * The on/off flag of masking LSC. + * @return + * -On success, zero value. + * -On failure, a negative value. + */ +static int +igbuio_set_interrupt_mask(struct rte_uio_pci_dev *udev, int32_t state) +{ + struct pci_dev *pdev = udev->pdev; + + if (udev->mode == IGBUIO_MSIX_INTR_MODE) { + struct msi_desc *desc; + + list_for_each_entry(desc, &pdev->msi_list, list) { + igbuio_msix_mask_irq(desc, state); + } + } + else if (udev->mode == IGBUIO_LEGACY_INTR_MODE) { + uint32_t status; + uint16_t old, new; + + pci_read_config_dword(pdev, PCI_COMMAND, &status); + old = status; + if (state != 0) + new = old & (~PCI_COMMAND_INTX_DISABLE); + else + new = old | PCI_COMMAND_INTX_DISABLE; + + if (old != new) + pci_write_config_word(pdev, PCI_COMMAND, new); + } + + return 0; +} + +/** + * This is the irqcontrol callback to be registered to uio_info. + * It can be used to disable/enable interrupt from user space processes. + * + * @param info + * pointer to uio_info. + * @param irq_state + * state value. 1 to enable interrupt, 0 to disable interrupt. + * + * @return + * - On success, 0. + * - On failure, a negative value. + */ +static int +igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state) +{ + unsigned long flags; + struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info); + struct pci_dev *pdev = udev->pdev; + + spin_lock_irqsave(&udev->lock, flags); + PCI_LOCK(pdev); + + igbuio_set_interrupt_mask(udev, irq_state); + + PCI_UNLOCK(pdev); + spin_unlock_irqrestore(&udev->lock, flags); + + return 0; +} + +/** + * This is interrupt handler which will check if the interrupt is for the right device. + * If yes, disable it here and will be enable later. + */ +static irqreturn_t +igbuio_pci_irqhandler(int irq, struct uio_info *info) +{ + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info); + struct pci_dev *pdev = udev->pdev; + uint32_t cmd_status_dword; + uint16_t status; + + spin_lock_irqsave(&udev->lock, flags); + /* block userspace PCI config reads/writes */ + PCI_LOCK(pdev); + + /* for legacy mode, interrupt maybe shared */ + if (udev->mode == IGBUIO_LEGACY_INTR_MODE) { + pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword); + status = cmd_status_dword >> 16; + /* interrupt is not ours, goes to out */ + if (!(status & PCI_STATUS_INTERRUPT)) + goto done; + } + + igbuio_set_interrupt_mask(udev, 0); + ret = IRQ_HANDLED; +done: + /* unblock userspace PCI config reads/writes */ + PCI_UNLOCK(pdev); + spin_unlock_irqrestore(&udev->lock, flags); + printk(KERN_INFO "irq 0x%x %s\n", irq, (ret == IRQ_HANDLED) ? "handled" : "not handled"); + + return ret; +} + +/* Remap pci resources described by bar #pci_bar in uio resource n. */ +static int +igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info, + int n, int pci_bar, const char *name) +{ + unsigned long addr, len; + void *internal_addr; + + addr = pci_resource_start(dev, pci_bar); + len = pci_resource_len(dev, pci_bar); + if (addr == 0 || len == 0) + return -1; + internal_addr = ioremap(addr, len); + if (internal_addr == NULL) + return -1; + info->mem[n].name = name; + info->mem[n].addr = addr; + info->mem[n].internal_addr = internal_addr; + info->mem[n].size = len; + info->mem[n].memtype = UIO_MEM_PHYS; + return 0; +} + +/* Unmap previously ioremap'd resources */ +static void +igbuio_pci_release_iomem(struct uio_info *info) +{ + int i; + for (i = 0; i < MAX_UIO_MAPS; i++) { + if (info->mem[i].internal_addr) + iounmap(info->mem[i].internal_addr); + } +} + +static int __devinit +igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct rte_uio_pci_dev *udev; + + udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); + if (!udev) + return -ENOMEM; + + /* + * enable device: ask low-level code to enable I/O and + * memory + */ + if (pci_enable_device(dev)) { + printk(KERN_ERR "Cannot enable PCI device\n"); + goto fail_free; + } + + /* XXX should we use 64 bits ? */ + /* set 32-bit DMA mask */ + if (pci_set_dma_mask(dev,(uint64_t)0xffffffff)) { + printk(KERN_ERR "Cannot set DMA mask\n"); + goto fail_disable; + } + + /* + * reserve device's PCI memory regions for use by this + * module + */ + if (pci_request_regions(dev, "igb_uio")) { + printk(KERN_ERR "Cannot request regions\n"); + goto fail_disable; + } + + /* enable bus mastering on the device */ + pci_set_master(dev); + + /* remap IO memory */ + if (igbuio_pci_setup_iomem(dev, &udev->info, 0, 0, "config")) + goto fail_release_regions; + + /* fill uio infos */ + udev->info.name = "Intel IGB UIO"; + udev->info.version = "0.1"; + udev->info.handler = igbuio_pci_irqhandler; + udev->info.irqcontrol = igbuio_pci_irqcontrol; + udev->info.priv = udev; + udev->pdev = dev; + udev->mode = 0; /* set the default value for interrupt mode */ + spin_lock_init(&udev->lock); + + /* check if it need to try msix first */ + if (igbuio_intr_mode_preferred == IGBUIO_MSIX_INTR_MODE) { + int vector; + + for (vector = 0; vector < IGBUIO_NUM_MSI_VECTORS; vector ++) + udev->msix_entries[vector].entry = vector; + + if (pci_enable_msix(udev->pdev, udev->msix_entries, IGBUIO_NUM_MSI_VECTORS) == 0) { + udev->mode = IGBUIO_MSIX_INTR_MODE; + } + else { + pci_disable_msix(udev->pdev); + printk(KERN_INFO "fail to enable pci msix, or not enough msix entries\n"); + } + } + switch (udev->mode) { + case IGBUIO_MSIX_INTR_MODE: + udev->info.irq_flags = 0; + udev->info.irq = udev->msix_entries[0].vector; + break; + case IGBUIO_MSI_INTR_MODE: + break; + case IGBUIO_LEGACY_INTR_MODE: + udev->info.irq_flags = IRQF_SHARED; + udev->info.irq = dev->irq; + break; + default: + break; + } + + pci_set_drvdata(dev, udev); + igbuio_pci_irqcontrol(&udev->info, 0); + + /* register uio driver */ + if (uio_register_device(&dev->dev, &udev->info)) + goto fail_release_iomem; + + printk(KERN_INFO "uio device registered with irq %lx\n", udev->info.irq); + + return 0; + +fail_release_iomem: + igbuio_pci_release_iomem(&udev->info); + if (udev->mode == IGBUIO_MSIX_INTR_MODE) + pci_disable_msix(udev->pdev); +fail_release_regions: + pci_release_regions(dev); +fail_disable: + pci_disable_device(dev); +fail_free: + kfree(udev); + + return -ENODEV; +} + +static void +igbuio_pci_remove(struct pci_dev *dev) +{ + struct uio_info *info = pci_get_drvdata(dev); + + uio_unregister_device(info); + if (((struct rte_uio_pci_dev *)info->priv)->mode == IGBUIO_MSIX_INTR_MODE) + pci_disable_msix(dev); + pci_release_regions(dev); + pci_disable_device(dev); + pci_set_drvdata(dev, NULL); + kfree(info); +} + +static struct pci_driver igbuio_pci_driver = { + .name = "igb_uio", + .id_table = igbuio_pci_ids, + .probe = igbuio_pci_probe, + .remove = igbuio_pci_remove, +}; + +static int __init +igbuio_pci_init_module(void) +{ + return pci_register_driver(&igbuio_pci_driver); +} + +static void __exit +igbuio_pci_exit_module(void) +{ + pci_unregister_driver(&igbuio_pci_driver); +} + +module_init(igbuio_pci_init_module); +module_exit(igbuio_pci_exit_module); + +MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel Corporation"); diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile new file mode 100644 index 0000000000..ce01fda554 --- /dev/null +++ b/lib/librte_ether/Makefile @@ -0,0 +1,55 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = libethdev.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +SRCS-y += rte_ethdev.c + +# +# Export include files +# +SYMLINK-y-include += rte_ether.h +SYMLINK-y-include += rte_ethdev.h + +# this lib depends upon: +DEPDIRS-y += lib/librte_eal lib/librte_mempool lib/librte_ring lib/librte_mbuf + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c new file mode 100644 index 0000000000..a7a7e681c1 --- /dev/null +++ b/lib/librte_ether/rte_ethdev.c @@ -0,0 +1,1381 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_ether.h" +#include "rte_ethdev.h" + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG +#define PMD_DEBUG_TRACE(fmt, args...) do { \ + RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \ + } while (0) +#else +#define PMD_DEBUG_TRACE(fmt, args...) +#endif + +/* define two macros for quick checking for restricting functions to primary + * instance only. First macro is for functions returning an int - and therefore + * an error code, second macro is for functions returning null. + */ +#define PROC_PRIMARY_OR_ERR() do { \ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \ + PMD_DEBUG_TRACE("Cannot run %s in secondary processes\n", \ + __func__); \ + return (-E_RTE_SECONDARY); \ + } \ +} while(0) + +#define PROC_PRIMARY_OR_RET() do { \ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \ + PMD_DEBUG_TRACE("Cannot run %s in secondary processes\n", \ + __func__); \ + return; \ + } \ +} while(0) + +/* Macros to check for invlaid function pointers in dev_ops structure */ +#define FUNC_PTR_OR_ERR_RET(func, retval) do { \ + if ((func) == NULL) { \ + PMD_DEBUG_TRACE("Function not supported\n"); \ + return (retval); \ + } \ +} while(0) +#define FUNC_PTR_OR_RET(func) do { \ + if ((func) == NULL) { \ + PMD_DEBUG_TRACE("Function not supported\n"); \ + return; \ + } \ +} while(0) + +static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; +struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; +static struct rte_eth_dev_data *rte_eth_dev_data = NULL; +static uint8_t nb_ports = 0; + +/* spinlock for eth device callbacks */ +static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; + +/** + * The user application callback description. + * + * It contains callback address to be registered by user application, + * the pointer to the parameters for callback, and the event type. + */ +struct rte_eth_dev_callback { + TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ + rte_eth_dev_cb_fn cb_fn; /**< Callback address */ + void *cb_arg; /**< Parameter for callback */ + enum rte_eth_event_type event; /**< Interrupt event type */ +}; + +static inline void +rte_eth_dev_data_alloc(void) +{ + const unsigned flags = 0; + const struct rte_memzone *mz; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY){ + mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA, + RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data), + rte_socket_id(), flags); + } else + mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA); + if (mz == NULL) + rte_panic("Cannot allocate memzone for ethernet port data\n"); + + rte_eth_dev_data = mz->addr; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + memset(rte_eth_dev_data, 0, + RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data)); +} + +static inline struct rte_eth_dev * +rte_eth_dev_allocate(void) +{ + struct rte_eth_dev *eth_dev; + + if (nb_ports == RTE_MAX_ETHPORTS) + return NULL; + + if (rte_eth_dev_data == NULL) + rte_eth_dev_data_alloc(); + + eth_dev = &rte_eth_devices[nb_ports]; + eth_dev->data = &rte_eth_dev_data[nb_ports]; + eth_dev->data->port_id = nb_ports++; + return eth_dev; +} + +static int +rte_eth_dev_init(struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev) +{ + struct eth_driver *eth_drv; + struct rte_eth_dev *eth_dev; + int diag; + + eth_drv = (struct eth_driver *)pci_drv; + + eth_dev = rte_eth_dev_allocate(); + if (eth_dev == NULL) + return -ENOMEM; + + + if (rte_eal_process_type() == RTE_PROC_PRIMARY){ + eth_dev->data->dev_private = rte_zmalloc("ethdev private structure", + eth_drv->dev_private_size, + CACHE_LINE_SIZE); + if (eth_dev->data->dev_private == NULL) + return -ENOMEM; + } + eth_dev->pci_dev = pci_dev; + eth_dev->driver = eth_drv; + eth_dev->data->rx_mbuf_alloc_failed = 0; + + /* init user callbacks */ + TAILQ_INIT(&(eth_dev->callbacks)); + + /* + * Set the default maximum frame size. + */ + eth_dev->data->max_frame_size = ETHER_MAX_LEN; + + /* Invoke PMD device initialization function */ + diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev); + if (diag == 0) + return (0); + + PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)" + " failed\n", pci_drv->name, + (unsigned) pci_dev->id.vendor_id, + (unsigned) pci_dev->id.device_id); + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(eth_dev->data->dev_private); + nb_ports--; + return diag; +} + +/** + * Register an Ethernet [Poll Mode] driver. + * + * Function invoked by the initialization function of an Ethernet driver + * to simultaneously register itself as a PCI driver and as an Ethernet + * Poll Mode Driver. + * Invokes the rte_eal_pci_register() function to register the *pci_drv* + * structure embedded in the *eth_drv* structure, after having stored the + * address of the rte_eth_dev_init() function in the *devinit* field of + * the *pci_drv* structure. + * During the PCI probing phase, the rte_eth_dev_init() function is + * invoked for each PCI [Ethernet device] matching the embedded PCI + * identifiers provided by the driver. + */ +void +rte_eth_driver_register(struct eth_driver *eth_drv) +{ + eth_drv->pci_drv.devinit = rte_eth_dev_init; + rte_eal_pci_register(ð_drv->pci_drv); +} + +uint8_t +rte_eth_dev_count(void) +{ + return (nb_ports); +} + +int +rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, + const struct rte_eth_conf *dev_conf) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + int diag; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR(); + + if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-EINVAL); + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); + + if (dev->data->dev_started) { + PMD_DEBUG_TRACE( + "port %d must be stopped to allow configuration", port_id); + return -EBUSY; + } + + /* + * Check that the numbers of RX and TX queues are not greater + * than the maximum number of RX and TX queues supported by the + * configured device. + */ + (*dev->dev_ops->dev_infos_get)(dev, &dev_info); + if (nb_rx_q > dev_info.max_rx_queues) { + PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d", + port_id, nb_rx_q, dev_info.max_rx_queues); + return (-EINVAL); + } + if (nb_rx_q == 0) { + PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0", port_id); + return (-EINVAL); + } + + if (nb_tx_q > dev_info.max_tx_queues) { + PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d", + port_id, nb_tx_q, dev_info.max_tx_queues); + return (-EINVAL); + } + if (nb_tx_q == 0) { + PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0", port_id); + return (-EINVAL); + } + + /* Copy the dev_conf parameter into the dev structure */ + memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); + + /* + * If jumbo frames are enabled, check that the maximum RX packet + * length is supported by the configured device. + */ + if (dev_conf->rxmode.jumbo_frame == 1) { + if (dev_conf->rxmode.max_rx_pkt_len > + dev_info.max_rx_pktlen) { + PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u" + " > max valid value %u", + port_id, + (unsigned)dev_conf->rxmode.max_rx_pkt_len, + (unsigned)dev_info.max_rx_pktlen); + return (-EINVAL); + } + } else + /* Use default value */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN; + + /* For vmdb+dcb mode check our configuration before we go further */ + if (dev_conf->rxmode.mq_mode == ETH_VMDQ_DCB) { + const struct rte_eth_vmdq_dcb_conf *conf; + + if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) { + PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q " + "!= %d", + port_id, ETH_VMDQ_DCB_NUM_QUEUES); + return (-EINVAL); + } + conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf); + if (! (conf->nb_queue_pools == ETH_16_POOLS || + conf->nb_queue_pools == ETH_32_POOLS)) { + PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, " + "nb_queue_pools != %d or nb_queue_pools " + "!= %d", + port_id, ETH_16_POOLS, ETH_32_POOLS); + return (-EINVAL); + } + } + + diag = (*dev->dev_ops->dev_configure)(dev, nb_rx_q, nb_tx_q); + if (diag != 0) { + rte_free(dev->data->rx_queues); + rte_free(dev->data->tx_queues); + } + return diag; +} + +static void +rte_eth_dev_config_restore(uint8_t port_id) +{ + struct rte_eth_dev *dev; + struct rte_eth_dev_info dev_info; + struct ether_addr addr; + uint16_t i; + + dev = &rte_eth_devices[port_id]; + + rte_eth_dev_info_get(port_id, &dev_info); + + /* replay MAC address configuration */ + for (i = 0; i < dev_info.max_mac_addrs; i++) { + addr = dev->data->mac_addrs[i]; + + /* skip zero address */ + if (is_zero_ether_addr(&addr)) + continue; + + /* add address to the hardware */ + if (*dev->dev_ops->mac_addr_add) + (*dev->dev_ops->mac_addr_add)(dev, &addr, i, 0); + else { + PMD_DEBUG_TRACE("port %d: MAC address array not supported\n", + port_id); + /* exit the loop but not return an error */ + break; + } + } + + /* replay promiscuous configuration */ + if (rte_eth_promiscuous_get(port_id) == 1) + rte_eth_promiscuous_enable(port_id); + else if (rte_eth_promiscuous_get(port_id) == 0) + rte_eth_promiscuous_disable(port_id); + + /* replay allmulticast configuration */ + if (rte_eth_allmulticast_get(port_id) == 1) + rte_eth_allmulticast_enable(port_id); + else if (rte_eth_allmulticast_get(port_id) == 0) + rte_eth_allmulticast_disable(port_id); +} + +int +rte_eth_dev_start(uint8_t port_id) +{ + struct rte_eth_dev *dev; + int diag; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR(); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-EINVAL); + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); + diag = (*dev->dev_ops->dev_start)(dev); + if (diag == 0) + dev->data->dev_started = 1; + else + return diag; + + rte_eth_dev_config_restore(port_id); + + return 0; +} + +void +rte_eth_dev_stop(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_RET(); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); + dev->data->dev_started = 0; + (*dev->dev_ops->dev_stop)(dev); +} + +void +rte_eth_dev_close(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_RET(); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_RET(*dev->dev_ops->dev_close); + dev->data->dev_started = 0; + (*dev->dev_ops->dev_close)(dev); +} + +int +rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct rte_eth_dev *dev; + struct rte_pktmbuf_pool_private *mbp_priv; + struct rte_eth_dev_info dev_info; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR(); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-EINVAL); + } + dev = &rte_eth_devices[port_id]; + if (rx_queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id); + return (-EINVAL); + } + + if (dev->data->dev_started) { + PMD_DEBUG_TRACE( + "port %d must be stopped to allow configuration", port_id); + return -EBUSY; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); + + /* + * Check the size of the mbuf data buffer. + * This value must be provided in the private data of the memory pool. + * First check that the memory pool has a valid private data. + */ + (*dev->dev_ops->dev_infos_get)(dev, &dev_info); + if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { + PMD_DEBUG_TRACE("%s private_data_size %d < %d\n", + mp->name, (int) mp->private_data_size, + (int) sizeof(struct rte_pktmbuf_pool_private)); + return (-ENOSPC); + } + mbp_priv = (struct rte_pktmbuf_pool_private *) + ((char *)mp + sizeof(struct rte_mempool)); + if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) < + dev_info.min_rx_bufsize) { + PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d " + "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)" + "=%d)\n", + mp->name, + (int)mbp_priv->mbuf_data_room_size, + (int)(RTE_PKTMBUF_HEADROOM + + dev_info.min_rx_bufsize), + (int)RTE_PKTMBUF_HEADROOM, + (int)dev_info.min_rx_bufsize); + return (-EINVAL); + } + + return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, + socket_id, rx_conf, mp); +} + +int +rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct rte_eth_dev *dev; + + /* This function is only safe when called from the primary process + * in a multi-process setup*/ + PROC_PRIMARY_OR_ERR(); + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-EINVAL); + } + dev = &rte_eth_devices[port_id]; + if (tx_queue_id >= dev->data->nb_tx_queues) { + PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id); + return (-EINVAL); + } + + if (dev->data->dev_started) { + PMD_DEBUG_TRACE( + "port %d must be stopped to allow configuration", port_id); + return -EBUSY; + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); + return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc, + socket_id, tx_conf); +} + +void +rte_eth_promiscuous_enable(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable); + (*dev->dev_ops->promiscuous_enable)(dev); + dev->data->promiscuous = 1; +} + +void +rte_eth_promiscuous_disable(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable); + dev->data->promiscuous = 0; + (*dev->dev_ops->promiscuous_disable)(dev); +} + +int +rte_eth_promiscuous_get(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -1; + } + + dev = &rte_eth_devices[port_id]; + return dev->data->promiscuous; +} + +void +rte_eth_allmulticast_enable(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable); + (*dev->dev_ops->allmulticast_enable)(dev); + dev->data->all_multicast = 1; +} + +void +rte_eth_allmulticast_disable(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable); + dev->data->all_multicast = 0; + (*dev->dev_ops->allmulticast_disable)(dev); +} + +int +rte_eth_allmulticast_get(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -1; + } + + dev = &rte_eth_devices[port_id]; + return dev->data->all_multicast; +} + +static inline int +rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +void +rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_RET(*dev->dev_ops->link_update); + + if (dev->data->dev_conf.intr_conf.lsc != 0) + rte_eth_dev_atomic_read_link_status(dev, eth_link); + else { + (*dev->dev_ops->link_update)(dev, 1); + *eth_link = dev->data->dev_link; + } +} + +void +rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + FUNC_PTR_OR_RET(*dev->dev_ops->link_update); + + if (dev->data->dev_conf.intr_conf.lsc != 0) + rte_eth_dev_atomic_read_link_status(dev, eth_link); + else { + (*dev->dev_ops->link_update)(dev, 0); + *eth_link = dev->data->dev_link; + } +} + +void +rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_RET(*dev->dev_ops->stats_get); + (*dev->dev_ops->stats_get)(dev, stats); + stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; +} + +void +rte_eth_stats_reset(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset); + (*dev->dev_ops->stats_reset)(dev); +} + +void +rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); + (*dev->dev_ops->dev_infos_get)(dev, dev_info); + dev_info->pci_dev = dev->pci_dev; + dev_info->driver_name = dev->driver->pci_drv.name; +} + +void +rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return; + } + dev = &rte_eth_devices[port_id]; + ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); +} + +int +rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + dev = &rte_eth_devices[port_id]; + if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) { + PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id); + return (-ENOSYS); + } + if (vlan_id > 4095) { + PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n", + port_id, (unsigned) vlan_id); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP); + (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on); + return (0); +} + +int +rte_eth_dev_fdir_add_signature_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint8_t queue) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) { + PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", + port_id, dev->data->dev_conf.fdir_conf.mode); + return (-ENOSYS); + } + + if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP + || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) + && (fdir_filter->port_src || fdir_filter->port_dst)) { + PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \ + "None l4type source & destinations ports " \ + "should be null!"); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP); + if (*dev->dev_ops->fdir_add_signature_filter) + return (*dev->dev_ops->fdir_add_signature_filter)(dev, + fdir_filter, + queue); + + PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id); + return (-ENOTSUP); +} + +int +rte_eth_dev_fdir_update_signature_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint8_t queue) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) { + PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", + port_id, dev->data->dev_conf.fdir_conf.mode); + return (-ENOSYS); + } + + if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP + || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) + && (fdir_filter->port_src || fdir_filter->port_dst)) { + PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \ + "None l4type source & destinations ports " \ + "should be null!"); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP); + if (*dev->dev_ops->fdir_update_signature_filter) + return (*dev->dev_ops->fdir_update_signature_filter)(dev, + fdir_filter, + queue); + + + PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id); + return (-ENOTSUP); +} + +int +rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) { + PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", + port_id, dev->data->dev_conf.fdir_conf.mode); + return (-ENOSYS); + } + + if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP + || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) + && (fdir_filter->port_src || fdir_filter->port_dst)) { + PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \ + "None l4type source & destinations ports " \ + "should be null!"); + return (-EINVAL); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP); + if (*dev->dev_ops->fdir_remove_signature_filter) + return (*dev->dev_ops->fdir_remove_signature_filter)(dev, + fdir_filter); + + PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id); + return (-ENOTSUP); +} + +int +rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + if (! (dev->data->dev_conf.fdir_conf.mode)) { + PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id); + return (-ENOSYS); + } + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP); + if (*dev->dev_ops->fdir_infos_get) { + (*dev->dev_ops->fdir_infos_get)(dev, fdir); + return (0); + } + + PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id); + return (-ENOTSUP); +} + +int +rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint16_t soft_id, uint8_t queue, + uint8_t drop) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { + PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", + port_id, dev->data->dev_conf.fdir_conf.mode); + return (-ENOSYS); + } + + if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP + || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) + && (fdir_filter->port_src || fdir_filter->port_dst)) { + PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \ + "None l4type source & destinations ports " \ + "should be null!"); + return (-EINVAL); + } + + /* For now IPv6 is not supported with perfect filter */ + if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) + return (-ENOTSUP); + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP); + if (*dev->dev_ops->fdir_add_perfect_filter) + return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter, + soft_id, queue, + drop); + + PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id); + return (-ENOTSUP); +} + +int +rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint16_t soft_id, uint8_t queue, + uint8_t drop) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { + PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", + port_id, dev->data->dev_conf.fdir_conf.mode); + return (-ENOSYS); + } + + if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP + || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) + && (fdir_filter->port_src || fdir_filter->port_dst)) { + PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \ + "None l4type source & destinations ports " \ + "should be null!"); + return (-EINVAL); + } + + /* For now IPv6 is not supported with perfect filter */ + if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) + return (-ENOTSUP); + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP); + if (*dev->dev_ops->fdir_update_perfect_filter) + return (*dev->dev_ops->fdir_update_perfect_filter)(dev, + fdir_filter, + soft_id, + queue, + drop); + + PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id); + return (-ENOTSUP); +} + +int +rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint16_t soft_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) { + PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n", + port_id, dev->data->dev_conf.fdir_conf.mode); + return (-ENOSYS); + } + + if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP + || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) + && (fdir_filter->port_src || fdir_filter->port_dst)) { + PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \ + "None l4type source & destinations ports " \ + "should be null!"); + return (-EINVAL); + } + + /* For now IPv6 is not supported with perfect filter */ + if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) + return (-ENOTSUP); + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP); + if (*dev->dev_ops->fdir_remove_perfect_filter) + return (*dev->dev_ops->fdir_remove_perfect_filter)(dev, + fdir_filter, + soft_id); + + PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id); + return -ENOTSUP; +} + +int +rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + if (! (dev->data->dev_conf.fdir_conf.mode)) { + PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id); + return (-ENOSYS); + } + + /* IPv6 mask are not supported */ + if (fdir_mask->src_ipv6_mask) + return (-ENOTSUP); + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP); + if (*dev->dev_ops->fdir_set_masks) + return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask); + + PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", + port_id); + return -ENOTSUP; +} + +int +rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) { + PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n"); + return (-EINVAL); + } + + dev = &rte_eth_devices[port_id]; + + /* High water, low water validation are device specific */ + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP); + if (*dev->dev_ops->flow_ctrl_set) + return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf); + + return -ENOTSUP; +} + +int +rte_eth_led_on(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP); + return ((*dev->dev_ops->dev_led_on)(dev)); +} + +int +rte_eth_led_off(uint8_t port_id) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP); + return ((*dev->dev_ops->dev_led_off)(dev)); +} + +/* + * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find + * an empty spot. + */ +static inline int +get_mac_addr_index(uint8_t port_id, struct ether_addr *addr) +{ + struct rte_eth_dev_info dev_info; + struct rte_eth_dev *dev = &rte_eth_devices[port_id]; + unsigned i; + + rte_eth_dev_info_get(port_id, &dev_info); + + for (i = 0; i < dev_info.max_mac_addrs; i++) + if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0) + return i; + + return -1; +} + +static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}}; + +int +rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, + uint32_t pool) +{ + struct rte_eth_dev *dev; + int index; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); + if (is_zero_ether_addr(addr)) { + PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id); + return (-EINVAL); + } + + /* Check if it's already there, and do nothing */ + index = get_mac_addr_index(port_id, addr); + if (index >= 0) + return 0; + + index = get_mac_addr_index(port_id, &null_mac_addr); + if (index < 0) { + PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id); + return (-ENOSPC); + } + + /* Update NIC */ + (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); + + /* Update address in NIC data structure */ + ether_addr_copy(addr, &dev->data->mac_addrs[index]); + + return 0; +} + +int +rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr) +{ + struct rte_eth_dev *dev; + int index; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return (-ENODEV); + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP); + index = get_mac_addr_index(port_id, addr); + if (index == 0) { + PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id); + return (-EADDRINUSE); + } else if (index < 0) + return 0; /* Do nothing if address wasn't found */ + + /* Update NIC */ + (*dev->dev_ops->mac_addr_remove)(dev, index); + + /* Update address in NIC data structure */ + ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); + + return 0; +} + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG +uint16_t +rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return 0; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP); + if (queue_id >= dev->data->nb_rx_queues) { + PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id); + return 0; + } + return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], + rx_pkts, nb_pkts); +} + +uint16_t +rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev; + + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return 0; + } + dev = &rte_eth_devices[port_id]; + + FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP); + if (queue_id >= dev->data->nb_tx_queues) { + PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id); + return 0; + } + return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], + tx_pkts, nb_pkts); +} +#endif + +int +rte_eth_dev_callback_register(uint8_t port_id, + enum rte_eth_event_type event, + rte_eth_dev_cb_fn cb_fn, void *cb_arg) +{ + int ret = -1; + struct rte_eth_dev *dev; + struct rte_eth_dev_callback *user_cb = NULL; + + if (!cb_fn) + return -1; + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -1; + } + dev = &rte_eth_devices[port_id]; + rte_spinlock_lock(&rte_eth_dev_cb_lock); + TAILQ_FOREACH(user_cb, &(dev->callbacks), next) { + if (user_cb->cb_fn == cb_fn && + user_cb->cb_arg == cb_arg && + user_cb->event == event) { + ret = 0; + goto out; + } + } + user_cb = rte_malloc("INTR_USER_CALLBACK", + sizeof(struct rte_eth_dev_callback), 0); + if (!user_cb) + goto out; + user_cb->cb_fn = cb_fn; + user_cb->cb_arg = cb_arg; + user_cb->event = event; + TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next); + ret = 0; + +out: + rte_spinlock_unlock(&rte_eth_dev_cb_lock); + + return ret; +} + +int +rte_eth_dev_callback_unregister(uint8_t port_id, + enum rte_eth_event_type event, + rte_eth_dev_cb_fn cb_fn, void *cb_arg) +{ + int ret = -1; + struct rte_eth_dev *dev; + struct rte_eth_dev_callback *cb_lst = NULL; + + if (!cb_fn) + return -1; + if (port_id >= nb_ports) { + PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id); + return -1; + } + dev = &rte_eth_devices[port_id]; + rte_spinlock_lock(&rte_eth_dev_cb_lock); + TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) { + if (cb_lst->cb_fn != cb_fn || cb_lst->event != event) + continue; + if (cb_lst->cb_arg == (void *)-1 || + cb_lst->cb_arg == cb_arg) { + TAILQ_REMOVE(&(dev->callbacks), cb_lst, next); + rte_free(cb_lst); + ret = 0; + } + } + + rte_spinlock_unlock(&rte_eth_dev_cb_lock); + + return ret; +} + +void +_rte_eth_dev_callback_process(struct rte_eth_dev *dev, enum rte_eth_event_type event) +{ + struct rte_eth_dev_callback *cb_lst = NULL; + struct rte_eth_dev_callback dev_cb; + + rte_spinlock_lock(&rte_eth_dev_cb_lock); + TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) { + if (cb_lst->cb_fn == NULL || cb_lst->event != event) + continue; + dev_cb = *cb_lst; + rte_spinlock_unlock(&rte_eth_dev_cb_lock); + dev_cb.cb_fn(dev->data->port_id, dev_cb.event, + dev_cb.cb_arg); + rte_spinlock_lock(&rte_eth_dev_cb_lock); + } + rte_spinlock_unlock(&rte_eth_dev_cb_lock); +} + diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h new file mode 100644 index 0000000000..b5b6c9e6a1 --- /dev/null +++ b/lib/librte_ether/rte_ethdev.h @@ -0,0 +1,1809 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_ETHDEV_H_ +#define _RTE_ETHDEV_H_ + +/** + * @file + * + * RTE Ethernet Device API + * + * The Ethernet Device API is composed of two parts: + * + * - The application-oriented Ethernet API that includes functions to setup + * an Ethernet device (configure it, setup its RX and TX queues and start it), + * to get its MAC address, the speed and the status of its physical link, + * to receive and to transmit packets, and so on. + * + * - The driver-oriented Ethernet API that exports a function allowing + * an Ethernet Poll Mode Driver (PMD) to simultaneously register itself as + * an Ethernet device driver and as a PCI driver for a set of matching PCI + * [Ethernet] devices classes. + * + * By default, all the functions of the Ethernet Device API exported by a PMD + * are lock-free functions which assume to not be invoked in parallel on + * different logical cores to work on the same target object. For instance, + * the receive function of a PMD cannot be invoked in parallel on two logical + * cores to poll the same RX queue [of the same port]. Of course, this function + * can be invoked in parallel by different logical cores on different RX queues. + * It is the responsibility of the upper level application to enforce this rule. + * + * If needed, parallel accesses by multiple logical cores to shared queues + * shall be explicitly protected by dedicated inline lock-aware functions + * built on top of their corresponding lock-free functions of the PMD API. + * + * In all functions of the Ethernet API, the Ethernet device is + * designated by an integer >= 0 named the device port identifier. + * + * At the Ethernet driver level, Ethernet devices are represented by a generic + * data structure of type *rte_eth_dev*. + * + * Ethernet devices are dynamically registered during the PCI probing phase + * performed at EAL initialization time. + * When an Ethernet device is being probed, an *rte_eth_dev* structure and + * a new port identifier are allocated for that device. Then, the eth_dev_init() + * function supplied by the Ethernet driver matching the probed PCI + * device is invoked to properly initialize the device. + * + * The role of the device init function consists of resetting the hardware, + * checking access to Non-volatile Memory (NVM), reading the MAC address + * from NVM etc. + * + * If the device init operation is successful, the correspondence between + * the port identifier assigned to the new device and its associated + * *rte_eth_dev* structure is effectively registered. + * Otherwise, both the *rte_eth_dev* structure and the port identifier are + * freed. + * + * The functions exported by the application Ethernet API to setup a device + * designated by its port identifier must be invoked in the following order: + * - rte_eth_dev_configure() + * - rte_eth_tx_queue_setup() + * - rte_eth_rx_queue_setup() + * - rte_eth_dev_start() + * + * Then, the network application can invoke, in any order, the functions + * exported by the Ethernet API to get the MAC address of a given device, to + * get the speed and the status of a device physical link, to receive/transmit + * [burst of] packets, and so on. + * + * If the application wants to change the configuration (i.e. call + * rte_eth_dev_configure(), rte_eth_tx_queue_setup(), or + * rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the + * device and then do the reconfiguration before calling rte_eth_dev_start() + * again. The tramsit and receive functions should not be invoked when the + * device is stopped. + * + * Please note that some configuration is not stored between calls to + * rte_eth_dev_stop()/rte_eth_dev_start(). The following configuration will + * be retained: + * + * - flow control settings + * - receive mode configuration (promiscuous mode, hardware checksum mode, + * RSS/VMDQ settings etc.) + * - VLAN filtering configuration + * - MAC addresses supplied to MAC address array + * - flow director filtering mode (but not filtering rules) + * + * Any other configuration will not be stored and will need to be re-entered + * after a call to rte_eth_dev_start(). + * + * + * Finally, a network application can close an Ethernet device by invoking the + * rte_eth_dev_close() function. + * + * Each function of the application Ethernet API invokes a specific function + * of the PMD that controls the target device designated by its port + * identifier. + * For this purpose, all device-specific functions of an Ethernet driver are + * supplied through a set of pointers contained in a generic structure of type + * *eth_dev_ops*. + * The address of the *eth_dev_ops* structure is stored in the *rte_eth_dev* + * structure by the device init function of the Ethernet driver, which is + * invoked during the PCI probing phase, as explained earlier. + * + * In other words, each function of the Ethernet API simply retrieves the + * *rte_eth_dev* structure associated with the device port identifier and + * performs an indirect invocation of the corresponding driver function + * supplied in the *eth_dev_ops* structure of the *rte_eth_dev* structure. + * + * For performance reasons, the address of the burst-oriented RX and TX + * functions of the Ethernet driver are not contained in the *eth_dev_ops* + * structure. Instead, they are directly stored at the beginning of the + * *rte_eth_dev* structure to avoid an extra indirect memory access during + * their invocation. + * + * RTE ethernet device drivers do not use interrupts for transmitting or + * receiving. Instead, Ethernet drivers export Poll-Mode receive and transmit + * functions to applications. + * Both receive and transmit functions are packet-burst oriented to minimize + * their cost per packet through the following optimizations: + * + * - Sharing among multiple packets the incompressible cost of the + * invocation of receive/transmit functions. + * + * - Enabling receive/transmit functions to take advantage of burst-oriented + * hardware features (L1 cache, prefetch instructions, NIC head/tail + * registers) to minimize the number of CPU cycles per packet, for instance, + * by avoiding useless read memory accesses to ring descriptors, or by + * systematically using arrays of pointers that exactly fit L1 cache line + * boundaries and sizes. + * + * The burst-oriented receive function does not provide any error notification, + * to avoid the corresponding overhead. As a hint, the upper-level application + * might check the status of the device link once being systematically returned + * a 0 value by the receive function of the driver for a given number of tries. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include +#include +#include +#include +#include "rte_ether.h" + +/** + * A structure used to retrieve statistics for an Ethernet port. + */ +struct rte_eth_stats { + uint64_t ipackets; /**< Total number of successfully received packets. */ + uint64_t opackets; /**< Total number of successfully transmitted packets.*/ + uint64_t ibytes; /**< Total number of successfully received bytes. */ + uint64_t obytes; /**< Total number of successfully transmitted bytes. */ + uint64_t ierrors; /**< Total number of erroneous received packets. */ + uint64_t oerrors; /**< Total number of failed transmitted packets. */ + uint64_t imcasts; /**< Total number of multicast received packets. */ + uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */ + uint64_t fdirmatch; /**< Total number of RX packets matching a filter. */ + uint64_t fdirmiss; /**< Total number of RX packets not matching any filter. */ +}; + +/** + * A structure used to retrieve link-level information of an Ethernet port. + */ +struct rte_eth_link { + uint16_t link_speed; /**< ETH_LINK_SPEED_[10, 100, 1000, 10000] */ + uint16_t link_duplex; /**< ETH_LINK_[HALF_DUPLEX, FULL_DUPLEX] */ + uint8_t link_status : 1; /**< 1 -> link up, 0 -> link down */ +}__attribute__((aligned(8))); /**< aligned for atomic64 read/write */ + +#define ETH_LINK_SPEED_AUTONEG 0 /**< Auto-negotiate link speed. */ +#define ETH_LINK_SPEED_10 10 /**< 10 megabits/second. */ +#define ETH_LINK_SPEED_100 100 /**< 100 megabits/second. */ +#define ETH_LINK_SPEED_1000 1000 /**< 1 gigabits/second. */ +#define ETH_LINK_SPEED_10000 10000 /**< 10 gigabits/second. */ + +#define ETH_LINK_AUTONEG_DUPLEX 0 /**< Auto-negotiate duplex. */ +#define ETH_LINK_HALF_DUPLEX 1 /**< Half-duplex connection. */ +#define ETH_LINK_FULL_DUPLEX 2 /**< Full-duplex connection. */ + +/** + * A structure used to configure the ring threshold registers of an RX/TX + * queue for an Ethernet port. + */ +struct rte_eth_thresh { + uint8_t pthresh; /**< Ring prefetch threshold. */ + uint8_t hthresh; /**< Ring host threshold. */ + uint8_t wthresh; /**< Ring writeback threshold. */ +}; + +/** + * A set of values to identify what method is to be used to route + * packets to multiple queues. + */ +enum rte_eth_rx_mq_mode { + ETH_RSS = 0, /**< Default to RSS mode */ + ETH_VMDQ_DCB /**< Use VMDQ+DCB to route traffic to queues */ +}; + +/** + * A structure used to configure the RX features of an Ethernet port. + */ +struct rte_eth_rxmode { + /** The multi-queue packet distribution mode to be used, e.g. RSS. */ + enum rte_eth_rx_mq_mode mq_mode; + uint32_t max_rx_pkt_len; /**< Only used if jumbo_frame enabled. */ + uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/ + uint8_t header_split : 1, /**< Header Split enable. */ + hw_ip_checksum : 1, /**< IP/UDP/TCP checksum offload enable. */ + hw_vlan_filter : 1, /**< VLAN filter enable. */ + jumbo_frame : 1, /**< Jumbo Frame Receipt enable. */ + hw_strip_crc : 1; /**< Enable CRC stripping by hardware. */ +}; + +/** + * A structure used to configure the Receive Side Scaling (RSS) feature + * of an Ethernet port. + * If not NULL, the *rss_key* pointer of the *rss_conf* structure points + * to an array of 40 bytes holding the RSS key to use for hashing specific + * header fields of received packets. + * Otherwise, a default random hash key is used by the device driver. + * + * The *rss_hf* field of the *rss_conf* structure indicates the different + * types of IPv4/IPv6 packets to which the RSS hashing must be applied. + * Supplying an *rss_hf* equal to zero disables the RSS feature. + */ +struct rte_eth_rss_conf { + uint8_t *rss_key; /**< If not NULL, 40-byte hash key. */ + uint16_t rss_hf; /**< Hash functions to apply - see below. */ +}; + +#define ETH_RSS_IPV4 0x0001 /**< IPv4 packet. */ +#define ETH_RSS_IPV4_TCP 0x0002 /**< IPv4/TCP packet. */ +#define ETH_RSS_IPV6 0x0004 /**< IPv6 packet. */ +#define ETH_RSS_IPV6_EX 0x0008 /**< IPv6 packet with extension headers.*/ +#define ETH_RSS_IPV6_TCP 0x0010 /**< IPv6/TCP packet. */ +#define ETH_RSS_IPV6_TCP_EX 0x0020 /**< IPv6/TCP with extension headers. */ +/* Intel RSS extensions to UDP packets */ +#define ETH_RSS_IPV4_UDP 0x0040 /**< IPv4/UDP packet. */ +#define ETH_RSS_IPV6_UDP 0x0080 /**< IPv6/UDP packet. */ +#define ETH_RSS_IPV6_UDP_EX 0x0100 /**< IPv6/UDP with extension headers. */ + +/* Definitions used for VMDQ and DCB functionality */ +#define ETH_VMDQ_MAX_VLAN_FILTERS 64 /**< Maximum nb. of VMDQ vlan filters. */ +#define ETH_DCB_NUM_USER_PRIORITIES 8 /**< Maximum nb. of DCB priorities. */ +#define ETH_VMDQ_DCB_NUM_QUEUES 128 /**< Maximum nb. of VMDQ DCB queues. */ + +/** + * This enum indicates the possible number of queue pools + * in VMDQ+DCB configurations. + */ +enum rte_eth_nb_pools { + ETH_16_POOLS = 16, /**< 16 pools with DCB. */ + ETH_32_POOLS = 32 /**< 32 pools with DCB. */ +}; + +/** + * A structure used to configure the VMDQ+DCB feature + * of an Ethernet port. + * + * Using this feature, packets are routed to a pool of queues, based + * on the vlan id in the vlan tag, and then to a specific queue within + * that pool, using the user priority vlan tag field. + * + * A default pool may be used, if desired, to route all traffic which + * does not match the vlan filter rules. + */ +struct rte_eth_vmdq_dcb_conf { + enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools */ + uint8_t enable_default_pool; /**< If non-zero, use a default pool */ + uint8_t default_pool; /**< The default pool, if applicable */ + uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */ + struct { + uint16_t vlan_id; /**< The vlan id of the received frame */ + uint64_t pools; /**< Bitmask of pools for packet rx */ + } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */ + uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES]; + /**< Selects a queue in a pool */ +}; + +/** + * A structure used to configure the TX features of an Ethernet port. + * For future extensions. + */ +struct rte_eth_txmode { +}; + +/** + * A structure used to configure an RX ring of an Ethernet port. + */ +struct rte_eth_rxconf { + struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */ + uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */ +}; + +/** + * A structure used to configure a TX ring of an Ethernet port. + */ +struct rte_eth_txconf { + struct rte_eth_thresh tx_thresh; /**< TX ring threshold registers. */ + uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */ + uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */ +}; + +/** + * This enum indicates the flow control mode + */ +enum rte_eth_fc_mode { + RTE_FC_NONE = 0, /**< Disable flow control. */ + RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */ + RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */ + RTE_FC_FULL /**< Enable flow control on both side. */ +}; + +/** + * A structure used to configure Ethernet flow control parameter. + * These parameters will be configured into the register of the NIC. + * Please refer to the corresponding data sheet for proper value. + */ +struct rte_eth_fc_conf { + uint32_t high_water; /**< High threshold value to trigger XOFF */ + uint32_t low_water; /**< Low threshold value to trigger XON */ + uint16_t pause_time; /**< Pause quota in the Pause frame */ + uint16_t send_xon; /**< Is XON frame need be sent */ + enum rte_eth_fc_mode mode; /**< Link flow control mode */ +}; + +/** + * Flow Director setting modes: none (default), signature or perfect. + */ +enum rte_fdir_mode { + RTE_FDIR_MODE_NONE = 0, /**< Disable FDIR support. */ + RTE_FDIR_MODE_SIGNATURE, /**< Enable FDIR signature filter mode. */ + RTE_FDIR_MODE_PERFECT, /**< Enable FDIR perfect filter mode. */ +}; + +/** + * Memory space that can be configured to store Flow Director filters + * in the board memory. + */ +enum rte_fdir_pballoc_type { + RTE_FDIR_PBALLOC_64K = 0, /**< 64k. */ + RTE_FDIR_PBALLOC_128K, /**< 128k. */ + RTE_FDIR_PBALLOC_256K, /**< 256k. */ +}; + +/** + * Select report mode of FDIR hash information in RX descriptors. + */ +enum rte_fdir_status_mode { + RTE_FDIR_NO_REPORT_STATUS = 0, /**< Never report FDIR hash. */ + RTE_FDIR_REPORT_STATUS, /**< Only report FDIR hash for matching pkts. */ + RTE_FDIR_REPORT_STATUS_ALWAYS, /**< Always report FDIR hash. */ +}; + +/** + * A structure used to configure the Flow Director (FDIR) feature + * of an Ethernet port. + * + * If mode is RTE_FDIR_DISABLE, the pballoc value is ignored. + */ +struct rte_fdir_conf { + enum rte_fdir_mode mode; /**< Flow Director mode. */ + enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */ + enum rte_fdir_status_mode status; /**< How to report FDIR hash. */ + /* Offset of flexbytes field in RX packets (in 16-bit word units). */ + uint8_t flexbytes_offset; + /* RX queue of packets matching a "drop" filter in perfect mode. */ + uint8_t drop_queue; +}; + +/** + * Possible l4type of FDIR filters. + */ +enum rte_l4type { + RTE_FDIR_L4TYPE_NONE = 0, /**< Nnoe. */ + RTE_FDIR_L4TYPE_UDP, /**< UDP. */ + RTE_FDIR_L4TYPE_TCP, /**< TCP. */ + RTE_FDIR_L4TYPE_SCTP, /**< SCTP. */ +}; + +/** + * Select IPv4 or IPv6 FDIR filters. + */ +enum rte_iptype { + RTE_FDIR_IPTYPE_IPV4 = 0, /**< IPv4. */ + RTE_FDIR_IPTYPE_IPV6 , /**< IPv6. */ +}; + +/** + * A structure used to define a FDIR packet filter. + */ +struct rte_fdir_filter { + uint16_t flex_bytes; /**< Flex bytes value to match. */ + uint16_t vlan_id; /**< VLAN ID value to match, 0 otherwise. */ + uint16_t port_src; /**< Source port to match, 0 otherwise. */ + uint16_t port_dst; /**< Destination port to match, 0 otherwise. */ + union { + uint32_t ipv4_addr; /**< IPv4 source address to match. */ + uint32_t ipv6_addr[4]; /**< IPv6 source address to match. */ + } ip_src; /**< IPv4/IPv6 source address to match (union of above). */ + union { + uint32_t ipv4_addr; /**< IPv4 destination address to match. */ + uint32_t ipv6_addr[4]; /**< IPv6 destination address to match */ + } ip_dst; /**< IPv4/IPv6 destination address to match (union of above). */ + enum rte_l4type l4type; /**< l4type to match: NONE/UDP/TCP/SCTP. */ + enum rte_iptype iptype; /**< IP packet type to match: IPv4 or IPv6. */ +}; + +/** + * A structure used to configure FDIR masks that are used by the device + * to match the various fields of RX packet headers. + */ +struct rte_fdir_masks { + /** When set to 1, packet l4type is not relevant in filters, and + source and destination port masks must be set to zero. */ + uint8_t only_ip_flow; + uint8_t vlan_id; /**< If set to 1, vlan_id is relevant in filters. */ + uint8_t vlan_prio; /**< If set to 1, vlan_prio is relevant in filters. */ + uint8_t flexbytes; /**< If set to 1, flexbytes is relevant in filters. */ + /** Mask of Destination IPv4 Address. All bits set to 1 define the + relevant bits to use in the destination address of an IPv4 packet + when matching it against FDIR filters. */ + uint32_t dst_ipv4_mask; + /** Mask of Source IPv4 Address. All bits set to 1 define + the relevant bits to use in the source address of an IPv4 packet + when matching it against FDIR filters. */ + uint32_t src_ipv4_mask; + /** Mask of Source IPv6 Address. All bits set to 1 define the + relevant BYTES to use in the source address of an IPv6 packet + when matching it against FDIR filters. */ + uint16_t src_ipv6_mask; + /** Mask of Source Port. All bits set to 1 define the relevant + bits to use in the source port of an IP packets when matching it + against FDIR filters. */ + uint16_t src_port_mask; + /** Mask of Destination Port. All bits set to 1 define the relevant + bits to use in the destination port of an IP packet when matching it + against FDIR filters. */ + uint16_t dst_port_mask; +}; + +/** + * A structure used to report the status of the flow director filters in use. + */ +struct rte_eth_fdir { + /** Number of filters with collision indication. */ + uint16_t collision; + /** Number of free (non programmed) filters. */ + uint16_t free; + /** The Lookup hash value of the added filter that updated the value + of the MAXLEN field */ + uint16_t maxhash; + /** Longest linked list of filters in the table. */ + uint8_t maxlen; + /** Number of added filters. */ + uint64_t add; + /** Number of removed filters. */ + uint64_t remove; + /** Number of failed added filters (no more space in device). */ + uint64_t f_add; + /** Number of failed removed filters. */ + uint64_t f_remove; +}; + +/** + * A structure used to enable/disable specific device interrupts. + */ +struct rte_intr_conf { + /** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */ + uint16_t lsc; +}; + +/** + * A structure used to configure an Ethernet port. + * Depending upon the RX multi-queue mode, extra advanced + * configuration settings may be needed. + */ +struct rte_eth_conf { + uint16_t link_speed; + /**< ETH_LINK_SPEED_10[0|00|000], or 0 for autonegotation */ + uint16_t link_duplex; + /**< ETH_LINK_[HALF_DUPLEX|FULL_DUPLEX], or 0 for autonegotation */ + struct rte_eth_rxmode rxmode; /**< Port RX configuration. */ + struct rte_eth_txmode txmode; /**< Port TX configuration. */ + union { + struct rte_eth_rss_conf rss_conf; /**< Port RSS configuration */ + struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf; + /**< Port vmdq+dcb configuration. */ + } rx_adv_conf; /**< Port RX filtering configuration (union). */ + struct rte_fdir_conf fdir_conf; /**< FDIR configuration. */ + struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */ +}; + +/** + * A structure used to retrieve the contextual information of + * an Ethernet device, such as the controlling driver of the device, + * its PCI context, etc... + */ +struct rte_eth_dev_info { + struct rte_pci_device *pci_dev; /**< Device PCI information. */ + const char *driver_name; /**< Device Driver name. */ + uint32_t min_rx_bufsize; /**< Minimum size of RX buffer. */ + uint32_t max_rx_pktlen; /**< Maximum configurable length of RX pkt. */ + uint16_t max_rx_queues; /**< Maximum number of RX queues. */ + uint16_t max_tx_queues; /**< Maximum number of TX queues. */ + uint32_t max_mac_addrs; /**< Maximum number of MAC addresses. */ +}; + +struct rte_eth_dev; +struct igb_rx_queue; +struct igb_tx_queue; + +struct rte_eth_dev_callback; +/** @internal Structure to keep track of registered callbacks */ +TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback); + +/* + * Definitions of all functions exported by an Ethernet driver through the + * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev* + * structure associated with an Ethernet device. + */ + +typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev, uint16_t nb_rx_q, + uint16_t nb_tx_q); +/**< Ethernet device configuration. */ + +typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev); +/**< Function used to start a configured Ethernet device. */ + +typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev); +/**< Function used to stop a configured Ethernet device. */ + +typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev); +/**< @internal Function used to close a configured Ethernet device. */ + +typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev); +/**< Function used to enable the RX promiscuous mode of an Ethernet device. */ + +typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev); +/**< Function used to disable the RX promiscuous mode of an Ethernet device. */ + +typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev); +/**< Enable the receipt of all multicast packets by an Ethernet device. */ + +typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev); +/**< Disable the receipt of all multicast packets by an Ethernet device. */ + +typedef int (*eth_link_update_t)(struct rte_eth_dev *dev, + int wait_to_complete); +/**< Get link speed, duplex mode and state (up/down) of an Ethernet device. */ + +typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev, + struct rte_eth_stats *igb_stats); +/**< Get global I/O statistics of an Ethernet device. */ + +typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev); +/**< Reset global I/O statistics of an Ethernet device to 0. */ + +typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +/**< Get specific informations of an Ethernet device. */ + +typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev, + uint16_t rx_queue_id, + uint16_t nb_rx_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); +/**< Set up a receive queue of an Ethernet device. */ + +typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev, + uint16_t tx_queue_id, + uint16_t nb_tx_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); +/**< Setup a transmit queue of an Ethernet device. */ + +typedef void (*vlan_filter_set_t)(struct rte_eth_dev *dev, + uint16_t vlan_id, + int on); +/**< Enable/Disable filtering of a VLAN Tag Identifier by an Ethernet device. */ + +typedef uint16_t (*eth_rx_burst_t)(struct igb_rx_queue *rxq, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +/**< Retrieve input packets from a receive queue of an Ethernet device. */ + +typedef uint16_t (*eth_tx_burst_t)(struct igb_tx_queue *txq, + struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +/**< Send output packets on a transmit queue of an Ethernet device. */ + +typedef int (*fdir_add_signature_filter_t)(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_ftr, + uint8_t rx_queue); +/**< Setup a new signature filter rule on an Ethernet device */ + +typedef int (*fdir_update_signature_filter_t)(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_ftr, + uint8_t rx_queue); +/**< Update a signature filter rule on an Ethernet device */ + +typedef int (*fdir_remove_signature_filter_t)(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_ftr); +/**< Remove a signature filter rule on an Ethernet device */ + +typedef void (*fdir_infos_get_t)(struct rte_eth_dev *dev, + struct rte_eth_fdir *fdir); +/**< Get information about fdir status */ + +typedef int (*fdir_add_perfect_filter_t)(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_ftr, + uint16_t soft_id, uint8_t rx_queue, + uint8_t drop); +/**< Setup a new perfect filter rule on an Ethernet device */ + +typedef int (*fdir_update_perfect_filter_t)(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_ftr, + uint16_t soft_id, uint8_t rx_queue, + uint8_t drop); +/**< Update a perfect filter rule on an Ethernet device */ + +typedef int (*fdir_remove_perfect_filter_t)(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_ftr, + uint16_t soft_id); +/**< Remove a perfect filter rule on an Ethernet device */ + +typedef int (*fdir_set_masks_t)(struct rte_eth_dev *dev, + struct rte_fdir_masks *fdir_masks); +/**< Setup flow director masks on an Ethernet device */ + +typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +/**< Setup flow control parameter on an Ethernet device */ + +typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev); +/**< Turn on SW controllable LED on an Ethernet device */ + +typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev); +/**< Turn off SW controllable LED on an Ethernet device */ + +typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index); +/**< Remove MAC address from receive address register */ + +typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev, + struct ether_addr *mac_addr, + uint32_t index, + uint32_t vmdq); +/**< Set a MAC address into Receive Address Address Register */ + +/** + * A structure containing the functions exported by an Ethernet driver. + */ +struct eth_dev_ops { + eth_dev_configure_t dev_configure; /**< Configure device. */ + eth_dev_start_t dev_start; /**< Start device. */ + eth_dev_stop_t dev_stop; /**< Stop device. */ + eth_dev_close_t dev_close; /**< Close device. */ + eth_promiscuous_enable_t promiscuous_enable; /**< Promiscuous ON. */ + eth_promiscuous_disable_t promiscuous_disable;/**< Promiscuous OFF. */ + eth_allmulticast_enable_t allmulticast_enable;/**< RX multicast ON. */ + eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OF. */ + eth_link_update_t link_update; /**< Get device link state. */ + eth_stats_get_t stats_get; /**< Get device statistics. */ + eth_stats_reset_t stats_reset; /**< Reset device statistics. */ + eth_dev_infos_get_t dev_infos_get; /**< Get device info. */ + vlan_filter_set_t vlan_filter_set; /**< Filter VLAN on/off. */ + eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue.*/ + eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue.*/ + eth_dev_led_on_t dev_led_on; /**< Turn on LED. */ + eth_dev_led_off_t dev_led_off; /**< Turn off LED. */ + flow_ctrl_set_t flow_ctrl_set; /**< Setup flow control. */ + eth_mac_addr_remove_t mac_addr_remove; /**< Remove MAC address */ + eth_mac_addr_add_t mac_addr_add; /**< Add a MAC address */ + + /** Add a signature filter. */ + fdir_add_signature_filter_t fdir_add_signature_filter; + /** Update a signature filter. */ + fdir_update_signature_filter_t fdir_update_signature_filter; + /** Remove a signature filter. */ + fdir_remove_signature_filter_t fdir_remove_signature_filter; + /** Get information about FDIR status. */ + fdir_infos_get_t fdir_infos_get; + /** Add a perfect filter. */ + fdir_add_perfect_filter_t fdir_add_perfect_filter; + /** Update a perfect filter. */ + fdir_update_perfect_filter_t fdir_update_perfect_filter; + /** Remove a perfect filter. */ + fdir_remove_perfect_filter_t fdir_remove_perfect_filter; + /** Setup masks for FDIR filtering. */ + fdir_set_masks_t fdir_set_masks; +}; + +/** + * The generic data structure associated with each ethernet device. + * + * Pointers to burst-oriented packet receive and transmit functions are + * located at the beginning of the structure, along with the pointer to + * where all the data elements for the particular device are stored in shared + * memory. This split allows the function pointer and driver data to be per- + * process, while the actual configuration data for the device is shared. + */ +struct rte_eth_dev { + eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */ + eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */ + struct rte_eth_dev_data *data; /**< Pointer to device data */ + const struct eth_driver *driver;/**< Driver for this device */ + struct eth_dev_ops *dev_ops; /**< Functions exported by PMD */ + struct rte_pci_device *pci_dev; /**< PCI info. supplied by probing */ + struct rte_eth_dev_cb_list callbacks; /**< User application callbacks */ +}; + +/** + * The data part, with no function pointers, associated with each ethernet device. + * + * This structure is safe to place in shared memory to be common among different + * processes in a multi-process configuration. + */ +struct rte_eth_dev_data { + struct igb_rx_queue **rx_queues; /**< Array of pointers to RX queues. */ + struct igb_tx_queue **tx_queues; /**< Array of pointers to TX queues. */ + uint16_t nb_rx_queues; /**< Number of RX queues. */ + uint16_t nb_tx_queues; /**< Number of TX queues. */ + + void *dev_private; /**< PMD-specific private data */ + + struct rte_eth_link dev_link; + /**< Link-level information & status */ + + struct rte_eth_conf dev_conf; /**< Configuration applied to device. */ + uint16_t max_frame_size; /**< Default is ETHER_MAX_LEN (1518). */ + + uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */ + struct ether_addr* mac_addrs;/**< Device Ethernet Link address. */ + uint8_t port_id; /**< Device [external] port identifier. */ + uint8_t promiscuous : 1, /**< RX promiscuous mode ON(1) / OFF(0). */ + scattered_rx : 1, /**< RX of scattered packets is ON(1) / OFF(0) */ + all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */ + dev_started : 1; /**< Device state: STARTED(1) / STOPPED(0). */ +}; + +/** + * The pool of *rte_eth_dev* structures. The size of the pool + * is configured at compile-time in the file. + */ +extern struct rte_eth_dev rte_eth_devices[]; + +/** + * Get the total number of Ethernet devices that have been successfully + * initialized by the [matching] Ethernet driver during the PCI probing phase. + * All devices whose port identifier is in the range + * [0, rte_eth_dev_count() - 1] can be operated on by network applications. + * + * @return + * - The total number of usable Ethernet devices. + */ +extern uint8_t rte_eth_dev_count(void); + +struct eth_driver; +/** + * Initialization function of an Ethernet driver invoked for each matching + * Ethernet PCI device detected during the PCI probing phase. + * + * @param eth_drv + * The pointer to the [matching] Ethernet driver structure supplied by + * the PMD when it registered itself. + * @param eth_dev + * The *eth_dev* pointer is the address of the *rte_eth_dev* structure + * associated with the matching device and which have been [automatically] + * allocated in the *rte_eth_devices* array. + * The *eth_dev* structure is supplied to the driver initialization function + * with the following fields already initialized: + * + * - *pci_dev*: Holds the pointers to the *rte_pci_device* structure which + * contains the generic PCI information of the matching device. + * + * - *dev_private*: Holds a pointer to the device private data structure. + * + * - *max_frame_size*: Contains the default Ethernet maximum frame length + * (1518). + * + * - *port_id*: Contains the port index of the device (actually the index + * of the *eth_dev* structure in the *rte_eth_devices* array). + * + * @return + * - 0: Success, the device is properly initialized by the driver. + * In particular, the driver MUST have set up the *dev_ops* pointer + * of the *eth_dev* structure. + * - <0: Error code of the device initialization failure. + */ +typedef int (*eth_dev_init_t)(struct eth_driver *eth_drv, + struct rte_eth_dev *eth_dev); + +/** + * The structure associated with a PMD Ethernet driver. + * + * Each Ethernet driver acts as a PCI driver and is represented by a generic + * *eth_driver* structure that holds: + * + * - An *rte_pci_driver* structure (which must be the first field). + * + * - The *eth_dev_init* function invoked for each matching PCI device. + * + * - The size of the private data to allocate for each matching device. + */ +struct eth_driver { + struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */ + eth_dev_init_t eth_dev_init; /**< Device init function. */ + unsigned int dev_private_size; /**< Size of device private data. */ +}; + +/** + * A function invoked by the initialization function of an Ethernet driver + * to simultaneously register itself as a PCI driver and as an Ethernet + * Poll Mode Driver (PMD). + * + * @param eth_drv + * The pointer to the *eth_driver* structure associated with + * the Ethernet driver. + */ +extern void rte_eth_driver_register(struct eth_driver *eth_drv); + +/** + * The initialization function of the driver for + * Intel(r) IGB Gigabit Ethernet Controller devices. + * This function is invoked once at EAL start time. + * @return + * 0 on success + */ +extern int rte_igb_pmd_init(void); + +/** + * The initialization function of the driver for 10Gbps Intel IXGBE + * Ethernet devices. + * Invoked once at EAL start time. + * @return + * 0 on success + */ +extern int rte_ixgbe_pmd_init(void); + +/** + * The initialization function of the driver for 10Gbps Intel IXGBE_VF + * Ethernet devices. + * Invoked once at EAL start time. + * @return + * 0 on success + */ +extern int rte_ixgbevf_pmd_init(void); + +/** + * Configure an Ethernet device. + * This function must be invoked first before any other function in the + * Ethernet API. This function can also be re-invoked when a device is in the + * stopped state. + * + * @param port_id + * The port identifier of the Ethernet device to configure. + * @param nb_rx_queue + * The number of receive queues to set up for the Ethernet device. + * @param nb_tx_queue + * The number of transmit queues to set up for the Ethernet device. + * @param eth_conf + * The pointer to the configuration data to be used for the Ethernet device. + * The *rte_eth_conf* structure includes: + * - the hardware offload features to activate, with dedicated fields for + * each statically configurable offload hardware feature provided by + * Ethernet devices, such as IP checksum or VLAN tag stripping for + * example. + * - the Receive Side Scaling (RSS) configuration when using multiple RX + * queues per port. + * + * Embedding all configuration information in a single data structure + * is the more flexible method that allows the addition of new features + * without changing the syntax of the API. + * @return + * - 0: Success, device configured. + * - <0: Error code returned by the driver configuration function. + */ +extern int rte_eth_dev_configure(uint8_t port_id, + uint16_t nb_rx_queue, + uint16_t nb_tx_queue, + const struct rte_eth_conf *eth_conf); + +/** + * Allocate and set up a receive queue for an Ethernet device. + * + * The function allocates a contiguous block of memory for *nb_rx_desc* + * receive descriptors from a memory zone associated with *socket_id* + * and initializes each receive descriptor with a network buffer allocated + * from the memory pool *mb_pool*. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param rx_queue_id + * The index of the receive queue to set up. + * The value must be in the range [0, nb_rx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param nb_rx_desc + * The number of receive descriptors to allocate for the receive ring. + * @param socket_id + * The *socket_id* argument is the socket identifier in case of NUMA. + * The value can be *SOCKET_ID_ANY* if there is no NUMA constraint for + * the DMA memory allocated for the receive descriptors of the ring. + * @param rx_conf + * The pointer to the configuration data to be used for the receive queue. + * The *rx_conf* structure contains an *rx_thresh* structure with the values + * of the Prefetch, Host, and Write-Back threshold registers of the receive + * ring. + * @param mb_pool + * The pointer to the memory pool from which to allocate *rte_mbuf* network + * memory buffers to populate each descriptor of the receive ring. + * @return + * - 0: Success, receive queue correctly set up. + * - -EINVAL: The size of network buffers which can be allocated from the + * memory pool does not fit the various buffer sizes allowed by the + * device controller. + * - -ENOMEM: Unable to allocate the receive ring descriptors or to + * allocate network memory buffers from the memory pool when + * initializing receive descriptors. + */ +extern int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +/** + * Allocate and set up a transmit queue for an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param tx_queue_id + * The index of the transmit queue to set up. + * The value must be in the range [0, nb_tx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param nb_tx_desc + * The number of transmit descriptors to allocate for the transmit ring. + * @param socket_id + * The *socket_id* argument is the socket identifier in case of NUMA. + * Its value can be *SOCKET_ID_ANY* if there is no NUMA constraint for + * the DMA memory allocated for the transmit descriptors of the ring. + * @param tx_conf + * The pointer to the configuration data to be used for the transmit queue. + * The *tx_conf* structure contains the following data: + * - The *tx_thresh* structure with the values of the Prefetch, Host, and + * Write-Back threshold registers of the transmit ring. + * When setting Write-Back threshold to the value greater then zero, + * *tx_rs_thresh* value should be explicitly set to one. + * - The *tx_free_thresh* value indicates the [minimum] number of network + * buffers that must be pending in the transmit ring to trigger their + * [implicit] freeing by the driver transmit function. + * - The *tx_rs_thresh* value indicates the [minimum] number of transmit + * descriptors that must be pending in the transmit ring before setting the + * RS bit on a descriptor by the driver transmit function. + * The *tx_rs_thresh* value should be less or equal then + * *tx_free_thresh* value, and both of them should be less then + * *nb_tx_desc* - 3. + * + * Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces + * the transmit function to use default values. + * @return + * - 0: Success, the transmit queue is correctly set up. + * - -ENOMEM: Unable to allocate the transmit ring descriptors. + */ +extern int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +/** + * Start an Ethernet device. + * + * The device start step is the last one and consists of setting the configured + * offload features and in starting the transmit and the receive units of the + * device. + * On success, all basic functions exported by the Ethernet API (link status, + * receive/transmit, and so on) can be invoked. + * + * @param port_id + * The port identifier of the Ethernet device. + * @return + * - 0: Success, Ethernet device started. + * - <0: Error code of the driver device start function. + */ +extern int rte_eth_dev_start(uint8_t port_id); + +/** + * Stop an Ethernet device. The device can be restarted with a call to + * rte_eth_dev_start() + * + * @param port_id + * The port identifier of the Ethernet device. + */ +extern void rte_eth_dev_stop(uint8_t port_id); + +/** + * Close an Ethernet device. The device cannot be restarted! + * + * @param port_id + * The port identifier of the Ethernet device. + */ +extern void rte_eth_dev_close(uint8_t port_id); + +/** + * Enable receipt in promiscuous mode for an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + */ +extern void rte_eth_promiscuous_enable(uint8_t port_id); + +/** + * Disable receipt in promiscuous mode for an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + */ +extern void rte_eth_promiscuous_disable(uint8_t port_id); + +/** + * Return the value of promiscuous mode for an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @return + * - (1) if promiscuous is enabled + * - (0) if promiscuous is disabled. + * - (-1) on error + */ +extern int rte_eth_promiscuous_get(uint8_t port_id); + +/** + * Enable the receipt of any multicast frame by an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + */ +extern void rte_eth_allmulticast_enable(uint8_t port_id); + +/** + * Disable the receipt of all multicast frames by an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + */ +extern void rte_eth_allmulticast_disable(uint8_t port_id); + +/** + * Return the value of allmulticast mode for an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @return + * - (1) if allmulticast is enabled + * - (0) if allmulticast is disabled. + * - (-1) on error + */ +extern int rte_eth_allmulticast_get(uint8_t port_id); + +/** + * Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX + * or FULL-DUPLEX) of the physical link of an Ethernet device. It might need + * to wait up to 9 seconds in it. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param link + * A pointer to an *rte_eth_link* structure to be filled with + * the status, the speed and the mode of the Ethernet device link. + */ +extern void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link); + +/** + * Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX + * or FULL-DUPLEX) of the physical link of an Ethernet device. It is a no-wait + * version of rte_eth_link_get(). + * + * @param port_id + * The port identifier of the Ethernet device. + * @param link + * A pointer to an *rte_eth_link* structure to be filled with + * the status, the speed and the mode of the Ethernet device link. + */ +extern void rte_eth_link_get_nowait(uint8_t port_id, + struct rte_eth_link *link); + +/** + * Retrieve the general I/O statistics of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param stats + * A pointer to a structure of type *rte_eth_stats* to be filled with + * the values of device counters for the following set of statistics: + * - *ipackets* with the total of successfully received packets. + * - *opackets* with the total of successfully transmitted packets. + * - *ibytes* with the total of successfully received bytes. + * - *obytes* with the total of successfully transmitted bytes. + * - *ierrors* with the total of erroneous received packets. + * - *oerrors* with the total of failed transmitted packets. + */ +extern void rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats); + +/** + * Reset the general I/O statistics of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + */ +extern void rte_eth_stats_reset(uint8_t port_id); + +/** + * Retrieve the Ethernet address of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param mac_addr + * A pointer to a structure of type *ether_addr* to be filled with + * the Ethernet address of the Ethernet device. + */ +extern void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr); + +/** + * Retrieve the contextual information of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param dev_info + * A pointer to a structure of type *rte_eth_dev_info* to be filled with + * the contextual information of the Ethernet device. + */ +extern void rte_eth_dev_info_get(uint8_t port_id, + struct rte_eth_dev_info *dev_info); + +/** + * Enable/Disable hardware filtering by an Ethernet device of received + * VLAN packets tagged with a given VLAN Tag Identifier. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param vlan_id + * The VLAN Tag Identifier whose filtering must be enabled or disabled. + * @param on + * If > 0, enable VLAN filtering of VLAN packets tagged with *vlan_id*. + * Otherwise, disable VLAN filtering of VLAN packets tagged with *vlan_id*. + * @return + * - (0) if successful. + * - (-ENOSUP) if hardware-assisted VLAN filtering not configured. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if VLAN filtering on *port_id* disabled. + * - (-EINVAL) if *vlan_id* > 4095. + */ +extern int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on); + +/** + * + * Retrieve a burst of input packets from a receive queue of an Ethernet + * device. The retrieved packets are stored in *rte_mbuf* structures whose + * pointers are supplied in the *rx_pkts* array. + * + * The rte_eth_rx_burst() function loops, parsing the RX ring of the + * receive queue, up to *nb_pkts* packets, and for each completed RX + * descriptor in the ring, it performs the following operations: + * + * - Initialize the *rte_mbuf* data structure associated with the + * RX descriptor according to the information provided by the NIC into + * that RX descriptor. + * + * - Store the *rte_mbuf* data structure into the next entry of the + * *rx_pkts* array. + * + * - Replenish the RX descriptor with a new *rte_mbuf* buffer + * allocated from the memory pool associated with the receive queue at + * initialization time. + * + * When retrieving an input packet that was scattered by the controller + * into multiple receive descriptors, the rte_eth_rx_burst() function + * appends the associated *rte_mbuf* buffers to the first buffer of the + * packet. + * + * The rte_eth_rx_burst() function returns the number of packets + * actually retrieved, which is the number of *rte_mbuf* data structures + * effectively supplied into the *rx_pkts* array. + * A return value equal to *nb_pkts* indicates that the RX queue contained + * at least *rx_pkts* packets, and this is likely to signify that other + * received packets remain in the input queue. Applications implementing + * a "retrieve as much received packets as possible" policy can check this + * specific case and keep invoking the rte_eth_rx_burst() function until + * a value less than *nb_pkts* is returned. + * + * This receive method has the following advantages: + * + * - It allows a run-to-completion network stack engine to retrieve and + * to immediately process received packets in a fast burst-oriented + * approach, avoiding the overhead of unnecessary intermediate packet + * queue/dequeue operations. + * + * - Conversely, it also allows an asynchronous-oriented processing + * method to retrieve bursts of received packets and to immediately + * queue them for further parallel processing by another logical core, + * for instance. However, instead of having received packets being + * individually queued by the driver, this approach allows the invoker + * of the rte_eth_rx_burst() function to queue a burst of retrieved + * packets at a time and therefore dramatically reduce the cost of + * enqueue/dequeue operations per packet. + * + * - It allows the rte_eth_rx_burst() function of the driver to take + * advantage of burst-oriented hardware features (CPU cache, + * prefetch instructions, and so on) to minimize the number of CPU + * cycles per packet. + * + * To summarize, the proposed receive API enables many + * burst-oriented optimizations in both synchronous and asynchronous + * packet processing environments with no overhead in both cases. + * + * The rte_eth_rx_burst() function does not provide any error + * notification to avoid the corresponding overhead. As a hint, the + * upper-level application might check the status of the device link once + * being systematically returned a 0 value for a given number of tries. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the receive queue from which to retrieve input packets. + * The value must be in the range [0, nb_rx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param rx_pkts + * The address of an array of pointers to *rte_mbuf* structures that + * must be large enough to store *nb_pkts* pointers in it. + * @param nb_pkts + * The maximum number of packets to retrieve. + * @return + * The number of packets actually retrieved, which is the number + * of pointers to *rte_mbuf* structures effectively supplied to the + * *rx_pkts* array. + */ +#ifdef RTE_LIBRTE_ETHDEV_DEBUG +extern uint16_t rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +#else +static inline uint16_t +rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev; + + dev = &rte_eth_devices[port_id]; + return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], rx_pkts, nb_pkts); +} +#endif + +/** + * Send a burst of output packets on a transmit queue of an Ethernet device. + * + * The rte_eth_tx_burst() function is invoked to transmit output packets + * on the output queue *queue_id* of the Ethernet device designated by its + * *port_id*. + * The *nb_pkts* parameter is the number of packets to send which are + * supplied in the *tx_pkts* array of *rte_mbuf* structures. + * The rte_eth_tx_burst() function loops, sending *nb_pkts* packets, + * up to the number of transmit descriptors available in the TX ring of the + * transmit queue. + * For each packet to send, the rte_eth_tx_burst() function performs + * the following operations: + * + * - Pick up the next available descriptor in the transmit ring. + * + * - Free the network buffer previously sent with that descriptor, if any. + * + * - Initialize the transmit descriptor with the information provided + * in the *rte_mbuf data structure. + * + * In the case of a segmented packet composed of a list of *rte_mbuf* buffers, + * the rte_eth_tx_burst() function uses several transmit descriptors + * of the ring. + * + * The rte_eth_tx_burst() function returns the number of packets it + * actually sent. A return value equal to *nb_pkts* means that all packets + * have been sent, and this is likely to signify that other output packets + * could be immediately transmitted again. Applications that implement a + * "send as many packets to transmit as possible" policy can check this + * specific case and keep invoking the rte_eth_tx_burst() function until + * a value less than *nb_pkts* is returned. + * + * It is the responsibility of the rte_eth_tx_burst() function to + * transparently free the memory buffers of packets previously sent. + * This feature is driven by the *tx_free_thresh* value supplied to the + * rte_eth_dev_configure() function at device configuration time. + * When the number of previously sent packets reached the "minimum transmit + * packets to free" threshold, the rte_eth_tx_burst() function must + * [attempt to] free the *rte_mbuf* buffers of those packets whose + * transmission was effectively completed. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param queue_id + * The index of the transmit queue through which output packets must be + * sent. + * The value must be in the range [0, nb_tx_queue - 1] previously supplied + * to rte_eth_dev_configure(). + * @param tx_pkts + * The address of an array of *nb_pkts* pointers to *rte_mbuf* structures + * which contain the output packets. + * @param nb_pkts + * The maximum number of packets to transmit. + * @return + * The number of output packets actually stored in transmit descriptors of + * the transmit ring. The return value can be less than the value of the + * *tx_pkts* parameter when the transmit ring is full or has been filled up. + */ +#ifdef RTE_LIBRTE_ETHDEV_DEBUG +extern uint16_t rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +#else +static inline uint16_t +rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct rte_eth_dev *dev; + + dev = &rte_eth_devices[port_id]; + return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts); +} +#endif + +/** + * Setup a new signature filter rule on an Ethernet device + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fdir_filter + * The pointer to the fdir filter structure describing the signature filter + * rule. + * The *rte_fdir_filter* structure includes the values of the different fields + * to match: source and destination IP addresses, vlan id, flexbytes, source + * and destination ports, and so on. + * @param rx_queue + * The index of the RX queue where to store RX packets matching the added + * signature filter defined in fdir_filter. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if the FDIR mode is not configured in signature mode + * on *port_id*. + * - (-EINVAL) if the fdir_filter information is not correct. + */ +int rte_eth_dev_fdir_add_signature_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint8_t rx_queue); + +/** + * Update a signature filter rule on an Ethernet device. + * If the rule doesn't exits, it is created. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fdir_ftr + * The pointer to the structure describing the signature filter rule. + * The *rte_fdir_filter* structure includes the values of the different fields + * to match: source and destination IP addresses, vlan id, flexbytes, source + * and destination ports, and so on. + * @param rx_queue + * The index of the RX queue where to store RX packets matching the added + * signature filter defined in fdir_ftr. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if the flow director mode is not configured in signature mode + * on *port_id*. + * - (-EINVAL) if the fdir_filter information is not correct. + */ +int rte_eth_dev_fdir_update_signature_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_ftr, + uint8_t rx_queue); + +/** + * Remove a signature filter rule on an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fdir_ftr + * The pointer to the structure describing the signature filter rule. + * The *rte_fdir_filter* structure includes the values of the different fields + * to match: source and destination IP addresses, vlan id, flexbytes, source + * and destination ports, and so on. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if the flow director mode is not configured in signature mode + * on *port_id*. + * - (-EINVAL) if the fdir_filter information is not correct. + */ +int rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_ftr); + +/** + * Retrieve the flow director information of an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fdir + * A pointer to a structure of type *rte_eth_dev_fdir* to be filled with + * the flow director information of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if the flow director mode is not configured on *port_id*. + */ +int rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir); + +/** + * Add a new perfect filter rule on an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fdir_filter + * The pointer to the structure describing the perfect filter rule. + * The *rte_fdir_filter* structure includes the values of the different fields + * to match: source and destination IP addresses, vlan id, flexbytes, source + * and destination ports, and so on. + * IPv6 are not supported. + * @param soft_id + * The 16-bit value supplied in the field hash.fdir.id of mbuf for RX + * packets matching the perfect filter. + * @param rx_queue + * The index of the RX queue where to store RX packets matching the added + * perfect filter defined in fdir_filter. + * @param drop + * If drop is set to 1, matching RX packets are stored into the RX drop + * queue defined in the rte_fdir_conf. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if the flow director mode is not configured in perfect mode + * on *port_id*. + * - (-EINVAL) if the fdir_filter information is not correct. + */ +int rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint16_t soft_id, uint8_t rx_queue, + uint8_t drop); + +/** + * Update a perfect filter rule on an Ethernet device. + * If the rule doesn't exits, it is created. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fdir_filter + * The pointer to the structure describing the perfect filter rule. + * The *rte_fdir_filter* structure includes the values of the different fields + * to match: source and destination IP addresses, vlan id, flexbytes, source + * and destination ports, and so on. + * IPv6 are not supported. + * @param soft_id + * The 16-bit value supplied in the field hash.fdir.id of mbuf for RX + * packets matching the perfect filter. + * @param rx_queue + * The index of the RX queue where to store RX packets matching the added + * perfect filter defined in fdir_filter. + * @param drop + * If drop is set to 1, matching RX packets are stored into the RX drop + * queue defined in the rte_fdir_conf. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if the flow director mode is not configured in perfect mode + * on *port_id*. + * - (-EINVAL) if the fdir_filter information is not correct. + */ +int rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint16_t soft_id, uint8_t rx_queue, + uint8_t drop); + +/** + * Remove a perfect filter rule on an Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fdir_filter + * The pointer to the structure describing the perfect filter rule. + * The *rte_fdir_filter* structure includes the values of the different fields + * to match: source and destination IP addresses, vlan id, flexbytes, source + * and destination ports, and so on. + * IPv6 are not supported. + * @param soft_id + * The soft_id value provided when adding/updating the removed filter. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if the flow director mode is not configured in perfect mode + * on *port_id*. + * - (-EINVAL) if the fdir_filter information is not correct. + */ +int rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id, + struct rte_fdir_filter *fdir_filter, + uint16_t soft_id); +/** + * Configure globally the masks for flow director mode for an Ethernet device. + * For example, the device can match packets with only the first 24 bits of + * the IPv4 source address. + * + * The following fields can be masked: IPv4 addresses and L4 port numbers. + * The following fields can be either enabled or disabled completely for the + * matching functionality: VLAN ID tag; VLAN Priority + CFI bit; Flexible 2-byte + * tuple. + * IPv6 masks are not supported. + * + * All filters must comply with the masks previously configured. + * For example, with a mask equal to 255.255.255.0 for the source IPv4 address, + * all IPv4 filters must be created with a source IPv4 address that fits the + * "X.X.X.0" format. + * + * This function flushes all filters that have been previously added in + * the device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fdir_mask + * The pointer to the fdir mask structure describing relevant headers fields + * and relevant bits to use when matching packets addresses and ports. + * IPv6 masks are not supported. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-ENOSYS) if the flow director mode is not configured in perfect + * mode on *port_id*. + * - (-EINVAL) if the fdir_filter information is not correct + */ +int rte_eth_dev_fdir_set_masks(uint8_t port_id, + struct rte_fdir_masks *fdir_mask); + +/** + * The eth device event type for interrupt, and maybe others in the future. + */ +enum rte_eth_event_type { + RTE_ETH_EVENT_UNKNOWN, /**< unknown event type */ + RTE_ETH_EVENT_INTR_LSC, /**< lsc interrupt event */ + RTE_ETH_EVENT_MAX /**< max value of this enum */ +}; + +typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \ + enum rte_eth_event_type event, void *cb_arg); +/**< user application callback to be registered for interrupts */ + + + +/** + * Register a callback function for specific port id. + * + * @param port_id + * Port id. + * @param event + * Event interested. + * @param cb_fn + * User supplied callback function to be called. + * @param cb_arg + * Pointer to the parameters for the registered callback. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_eth_dev_callback_register(uint8_t port_id, + enum rte_eth_event_type event, + rte_eth_dev_cb_fn cb_fn, void *cb_arg); + +/** + * Unregister a callback function for specific port id. + * + * @param port_id + * Port id. + * @param event + * Event interested. + * @param cb_fn + * User supplied callback function to be called. + * @param cb_arg + * Pointer to the parameters for the registered callback. -1 means to + * remove all for the same callback address and same event. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +int rte_eth_dev_callback_unregister(uint8_t port_id, + enum rte_eth_event_type event, + rte_eth_dev_cb_fn cb_fn, void *cb_arg); + +/** + * @internal Executes all the user application registered callbacks for + * the specific device. It is for DPDK internal user only. User + * application should not call it directly. + * + * @param dev + * Pointer to struct rte_eth_dev. + * @param event + * Eth device interrupt event type. + * + * @return + * void + */ +void _rte_eth_dev_callback_process(struct rte_eth_dev *dev, + enum rte_eth_event_type event); + +/** + * Turn on the LED on the Ethernet device. + * This function turns on the LED on the Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support + * that operation. + * - (-ENODEV) if *port_id* invalid. + */ +int rte_eth_led_on(uint8_t port_id); + +/** + * Turn off the LED on the Ethernet device. + * This function turns off the LED on the Ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @return + * - (0) if successful. + * - (-ENOTSUP) if underlying hardware OR driver doesn't support + * that operation. + * - (-ENODEV) if *port_id* invalid. + */ +int rte_eth_led_off(uint8_t port_id); + +/** + * Configure the Ethernet link flow control for Ethernet device + * + * @param port_id + * The port identifier of the Ethernet device. + * @param fc_conf + * The pointer to the structure of the flow control parameters. + * @return + * - (0) if successful. + * - (-ENOTSUP) if hardware doesn't support flow director mode. + * - (-ENODEV) if *port_id* invalid. + * - (-EINVAL) if bad parameter + * - (-EIO) if flow control setup failure + */ +int rte_eth_dev_flow_ctrl_set(uint8_t port_id, + struct rte_eth_fc_conf *fc_conf); + +/** + * Add a MAC address to an internal array of addresses used to enable whitelist + * filtering to accept packets only if the destination MAC address matches. + * + * @param port + * The port identifier of the Ethernet device. + * @param mac_addr + * The MAC address to add. + * @param pool + * VMDq pool index to associate address with (if VMDq is enabled). If VMDq is + * not enabled, this should be set to 0. + * @return + * - (0) if successfully added or *mac_addr" was already added. + * - (-ENOTSUP) if hardware doesn't support this feature. + * - (-ENODEV) if *port* is invalid. + * - (-ENOSPC) if no more MAC addresses can be added. + * - (-EINVAL) if MAC address is invalid. + */ +int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr, + uint32_t pool); + +/** + * Remove a MAC address from the internal array of addresses. + * + * @param port + * The port identifier of the Ethernet device. + * @param mac_addr + * MAC address to remove. + * @return + * - (0) if successful, or *mac_addr* didn't exist. + * - (-ENOTSUP) if hardware doesn't support. + * - (-ENODEV) if *port* invalid. + * - (-EADDRINUSE) if attempting to remove the default MAC address + */ +int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr); + + +/*-------------------------- Deprecated definitions --------------------------*/ + +/* Needed to stop deprecation warnings becoming errors with GCC. */ +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic warning "-Wdeprecated-declarations" +#endif + +#ifdef RTE_LIBRTE_82576_PMD +#pragma message "\nWARNING: CONFIG_RTE_LIBRTE_82576_PMD is deprecated. " \ +"CONFIG_RTE_LIBRTE_IGB_PMD must be set in the config file to use Intel(R) " \ +"DPDK supported Gigabit Ethernet Controllers." +#endif + +#ifdef RTE_LIBRTE_IGB_PMD +/** + * @deprecated The config file option CONFIG_RTE_LIBRTE_82576_PMD and resulting + * preprocessor define RTE_LIBRTE_82576_PMD are deprecated. + * CONFIG_RTE_LIBRTE_IGB_PMD must be set in the config file to use Intel(R) DPDK + * supported Gigabit Ethernet Controllers, and RTE_LIBRTE_IGB_PMD should be used + * in code. + */ +#define RTE_LIBRTE_82576_PMD 1 +#endif + +/** + * @deprecated rte_82576_pmd_init() is deprecated and will be removed from + * future versions of Intel(R) DPDK. It has been replaced by rte_igb_pmd_init(). + * + * @return + * 0 on success + */ +static inline int __attribute__((deprecated)) +rte_82576_pmd_init(void) { + RTE_LOG(WARNING, PMD, "rte_82576_pmd_init() is deprecated and will be " + "removed from future version of Intel(R) DPDK. It has " + "been replaced by rte_igb_pmd_init()"); + return rte_igb_pmd_init(); +} + + +#ifdef RTE_LIBRTE_82599_PMD +#pragma message "\nWARNING: CONFIG_RTE_LIBRTE_82599_PMD is deprecated. " \ +"CONFIG_RTE_LIBRTE_IXGBE_PMD must be set in the config file to use Intel(R) " \ +"DPDK supported 10 Gigabit Ethernet Controllers." +#endif + +#ifdef RTE_LIBRTE_IXGBE_PMD +/** + * @deprecated The config file option CONFIG_RTE_LIBRTE_82599_PMD and resulting + * preprocessor define RTE_LIBRTE_82599_PMD are deprecated. + * CONFIG_RTE_LIBRTE_IXGBE_PMD must be set in the config file to use Intel(R) + * DPDK supported Gigabit Ethernet Controllers, and RTE_LIBRTE_IXGBE_PMD should + * be used in code. + */ +#define RTE_LIBRTE_82599_PMD 1 +#endif + +/** + * @deprecated rte_82599_pmd_init() is deprecated and will be removed from + * future versions of Intel(R) DPDK. It has been replaced by + * rte_ixgbe_pmd_init(). + * + * @return + * 0 on success + */ +static inline int __attribute__((deprecated)) +rte_82599_pmd_init(void) { + RTE_LOG(WARNING, PMD, "rte_82599_pmd_init() is deprecated and will be " + "removed from future version of Intel(R) DPDK. It has " + "been replaced by rte_ixgbe_pmd_init()"); + return rte_ixgbe_pmd_init(); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_ETHDEV_H_ */ diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h new file mode 100644 index 0000000000..27cadaf29c --- /dev/null +++ b/lib/librte_ether/rte_ether.h @@ -0,0 +1,256 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_ETHER_H_ +#define _RTE_ETHER_H_ + +/** + * @file + * + * Ethernet Helpers in RTE + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#define ETHER_ADDR_LEN 6 /**< Length of Ethernet address. */ +#define ETHER_TYPE_LEN 2 /**< Length of Ethernet type field. */ +#define ETHER_CRC_LEN 4 /**< Length of Ethernet CRC. */ +#define ETHER_HDR_LEN \ + (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) /**< Length of Ethernet header. */ +#define ETHER_MIN_LEN 64 /**< Minimum frame len, including CRC. */ +#define ETHER_MAX_LEN 1518 /**< Maximum frame len, including CRC. */ +#define ETHER_MTU \ + (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN) /**< Ethernet MTU. */ + +#define ETHER_MAX_VLAN_FRAME_LEN \ + (ETHER_MAX_LEN + 4) /**< Maximum VLAN frame length, including CRC. */ + +#define ETHER_MAX_JUMBO_FRAME_LEN \ + 0x3F00 /**< Maximum Jumbo frame length, including CRC. */ + +/** + * Ethernet address: + * A universally administered address is uniquely assigned to a device by its + * manufacturer. The first three octets (in transmission order) contain the + * Organizationally Unique Identifier (OUI). The following three (MAC-48 and + * EUI-48) octets are assigned by that organization with the only constraint + * of uniqueness. + * A locally administered address is assigned to a device by a network + * administrator and does not contain OUIs. + * See http://standards.ieee.org/regauth/groupmac/tutorial.html + */ +struct ether_addr { + uint8_t addr_bytes[ETHER_ADDR_LEN]; /**< Address bytes in transmission order */ +} __attribute__((__packed__)); + +#define ETHER_LOCAL_ADMIN_ADDR 0x02 /**< Locally assigned Eth. address. */ +#define ETHER_GROUP_ADDR 0x01 /**< Multicast or broadcast Eth. address. */ + +/** + * Check if an Ethernet address is filled with zeros. + * + * @param ea + * A pointer to a ether_addr structure containing the ethernet address + * to check. + * @return + * True (1) if the given ethernet address is filled with zeros; + * false (0) otherwise. + */ +static inline int is_zero_ether_addr(const struct ether_addr *ea) +{ + int i; + for (i = 0; i < ETHER_ADDR_LEN; i++) + if (ea->addr_bytes[i] != 0x00) + return 0; + return 1; +} + +/** + * Check if an Ethernet address is a unicast address. + * + * @param ea + * A pointer to a ether_addr structure containing the ethernet address + * to check. + * @return + * True (1) if the given ethernet address is a unicast address; + * false (0) otherwise. + */ +static inline int is_unicast_ether_addr(const struct ether_addr *ea) +{ + return ((ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0); +} + +/** + * Check if an Ethernet address is a multicast address. + * + * @param ea + * A pointer to a ether_addr structure containing the ethernet address + * to check. + * @return + * True (1) if the given ethernet address is a multicast address; + * false (0) otherwise. + */ +static inline int is_multicast_ether_addr(const struct ether_addr *ea) +{ + return (ea->addr_bytes[0] & ETHER_GROUP_ADDR); +} + +/** + * Check if an Ethernet address is a broadcast address. + * + * @param ea + * A pointer to a ether_addr structure containing the ethernet address + * to check. + * @return + * True (1) if the given ethernet address is a broadcast address; + * false (0) otherwise. + */ +static inline int is_broadcast_ether_addr(const struct ether_addr *ea) +{ + const uint16_t *ea_words = (const uint16_t *)ea; + + return (ea_words[0] == 0xFFFF && ea_words[1] == 0xFFFF && + ea_words[2] == 0xFFFF); +} + +/** + * Check if an Ethernet address is a universally assigned address. + * + * @param ea + * A pointer to a ether_addr structure containing the ethernet address + * to check. + * @return + * True (1) if the given ethernet address is a universally assigned address; + * false (0) otherwise. + */ +static inline int is_universal_ether_addr(const struct ether_addr *ea) +{ + return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0); +} + +/** + * Check if an Ethernet address is a locally assigned address. + * + * @param ea + * A pointer to a ether_addr structure containing the ethernet address + * to check. + * @return + * True (1) if the given ethernet address is a locally assigned address; + * false (0) otherwise. + */ +static inline int is_local_admin_ether_addr(const struct ether_addr *ea) +{ + return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 1); +} + +/** + * Check if an Ethernet address is a valid address. Checks that the address is a + * unicast address and is not filled with zeros. + * + * @param ea + * A pointer to a ether_addr structure containing the ethernet address + * to check. + * @return + * True (1) if the given ethernet address is valid; + * false (0) otherwise. + */ +static inline int is_valid_assigned_ether_addr(const struct ether_addr *ea) +{ + return (is_unicast_ether_addr(ea) && (! is_zero_ether_addr(ea))); +} + +/** + * Fast copy an Ethernet address. + * + * @param ea_from + * A pointer to a ether_addr structure holding the Ethernet address to copy. + * @param ea_to + * A pointer to a ether_addr structure where to copy the Ethernet address. + */ +static inline void ether_addr_copy(const struct ether_addr *ea_from, + struct ether_addr *ea_to) +{ +#ifdef __INTEL_COMPILER + uint16_t *from_words = (uint16_t *)(ea_from->addr_bytes); + uint16_t *to_words = (uint16_t *)(ea_to->addr_bytes); + + to_words[0] = from_words[0]; + to_words[1] = from_words[1]; + to_words[2] = from_words[2]; +#else + /* + * Use the common way, because of a strange gcc warning. + */ + *ea_to = *ea_from; +#endif +} + +/** + * Ethernet header: Contains the destination address, source address + * and frame type. + */ +struct ether_hdr { + struct ether_addr d_addr; /**< Destination address. */ + struct ether_addr s_addr; /**< Source address. */ + uint16_t ether_type; /**< Frame type. */ +} __attribute__((__packed__)); + +/** + * Ethernet VLAN Header. + * Contains the 16-bit VLAN Tag Control Identifier and the Ethernet type + * of the encapsulated frame. + */ +struct vlan_hdr { + uint16_t vlan_tci; /**< Priority (3) + CFI (1) + Identifier Code (12) */ + uint16_t eth_proto;/**< Ethernet type of encapsulated frame. */ +} __attribute__((__packed__)); + +/* Ethernet frame types */ +#define ETHER_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */ +#define ETHER_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */ +#define ETHER_TYPE_ARP 0x0806 /**< Arp Protocol. */ +#define ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */ +#define ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */ +#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */ + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_ETHER_H_ */ diff --git a/lib/librte_hash/Makefile b/lib/librte_hash/Makefile new file mode 100644 index 0000000000..103ed799f5 --- /dev/null +++ b/lib/librte_hash/Makefile @@ -0,0 +1,55 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_hash.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_HASH) := rte_hash.c +SRCS-$(CONFIG_RTE_LIBRTE_HASH) += rte_fbk_hash.c + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include := rte_hash.h +SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_hash_crc.h +SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_jhash.h +SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_fbk_hash.h + +# this lib needs eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_HASH) += lib/librte_eal lib/librte_malloc + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_hash/rte_fbk_hash.c b/lib/librte_hash/rte_fbk_hash.c new file mode 100644 index 0000000000..0b1bd596c7 --- /dev/null +++ b/lib/librte_hash/rte_fbk_hash.c @@ -0,0 +1,210 @@ +/** + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_fbk_hash.h" +#include "rte_jhash.h" +#include "rte_hash_crc.h" + +TAILQ_HEAD(rte_fbk_hash_list, rte_fbk_hash_table); + +/* global list of fbk_hashes (used for debug/dump) */ +static struct rte_fbk_hash_list *fbk_hash_list = NULL; + +/* macro to prevent duplication of list creation check code */ +#define CHECK_FBK_HASH_LIST_CREATED() do { \ + if (fbk_hash_list == NULL) \ + if ((fbk_hash_list = RTE_TAILQ_RESERVE("RTE_FBK_HASH", \ + rte_fbk_hash_list)) == NULL){ \ + rte_errno = E_RTE_NO_TAILQ; \ + return NULL; \ + } \ +} while (0) + + +/** + * Performs a lookup for an existing hash table, and returns a pointer to + * the table if found. + * + * @param name + * Name of the hash table to find + * + * @return + * pointer to hash table structure or NULL on error. + */ +struct rte_fbk_hash_table * +rte_fbk_hash_find_existing(const char *name) +{ + struct rte_fbk_hash_table *h; + + /* check that we have an initialised tail queue */ + CHECK_FBK_HASH_LIST_CREATED(); + + TAILQ_FOREACH(h, fbk_hash_list, next) { + if (strncmp(name, h->name, RTE_FBK_HASH_NAMESIZE) == 0) + break; + } + if (h == NULL) + rte_errno = ENOENT; + return h; +} + +/** + * Create a new hash table for use with four byte keys. + * + * @param params + * Parameters used in creation of hash table. + * + * @return + * Pointer to hash table structure that is used in future hash table + * operations, or NULL on error. + */ +struct rte_fbk_hash_table * +rte_fbk_hash_create(const struct rte_fbk_hash_params *params) +{ + struct rte_fbk_hash_table *ht; + char hash_name[RTE_FBK_HASH_NAMESIZE]; + const uint32_t mem_size = + sizeof(*ht) + (sizeof(ht->t[0]) * params->entries); + uint32_t i; + + /* check that we have access to create things in shared memory. */ + if (rte_eal_process_type() == RTE_PROC_SECONDARY){ + rte_errno = E_RTE_SECONDARY; + return NULL; + } + + /* check that we have an initialised tail queue */ + CHECK_FBK_HASH_LIST_CREATED(); + + /* Error checking of parameters. */ + if ((!rte_is_power_of_2(params->entries)) || + (!rte_is_power_of_2(params->entries_per_bucket)) || + (params->entries == 0) || + (params->entries_per_bucket == 0) || + (params->entries_per_bucket > params->entries) || + (params->entries > RTE_FBK_HASH_ENTRIES_MAX) || + (params->entries_per_bucket > RTE_FBK_HASH_ENTRIES_MAX)){ + rte_errno = EINVAL; + return NULL; + } + + rte_snprintf(hash_name, sizeof(hash_name), "FBK_%s", params->name); + + /* Allocate memory for table. */ +#if defined(RTE_LIBRTE_HASH_USE_MEMZONE) + const struct rte_memzone *mz; + mz = rte_memzone_reserve(hash_name, mem_size, params->socket_id, 0); + if (mz == NULL) + return NULL; + ht = (struct rte_fbk_hash_table *)mz->addr; +#else + ht = (struct rte_fbk_hash_table *)rte_malloc(hash_name, mem_size, 0); + if (ht == NULL) + return NULL; +#endif + memset(ht, 0, mem_size); + + /* Set up hash table context. */ + rte_snprintf(ht->name, sizeof(ht->name), "%s", params->name); + ht->entries = params->entries; + ht->entries_per_bucket = params->entries_per_bucket; + ht->used_entries = 0; + ht->bucket_mask = (params->entries / params->entries_per_bucket) - 1; + for (ht->bucket_shift = 0, i = 1; + (params->entries_per_bucket & i) == 0; + ht->bucket_shift++, i <<= 1) + ; /* empty loop body */ + + if (params->hash_func != NULL) { + ht->hash_func = params->hash_func; + ht->init_val = params->init_val; + } + else { + ht->hash_func = RTE_FBK_HASH_FUNC_DEFAULT; + ht->init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT; + } + + if (ht->hash_func == rte_hash_crc_4byte && + !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2)) { + RTE_LOG(WARNING, HASH, "CRC32 instruction requires SSE4.2, " + "which is not supported on this system. " + "Falling back to software hash\n."); + ht->hash_func = rte_jhash_1word; + } + + TAILQ_INSERT_TAIL(fbk_hash_list, ht, next); + return ht; +} + +/** + * Free all memory used by a hash table. + * + * @param ht + * Hash table to deallocate. + */ +void +rte_fbk_hash_free(struct rte_fbk_hash_table *ht) +{ + if (ht == NULL) + return; + /* No way to deallocate memzones - but can de-allocate from malloc */ +#if !defined(RTE_LIBRTE_HASH_USE_MEMZONE) + TAILQ_REMOVE(fbk_hash_list, ht, next); + rte_free(ht); +#endif + RTE_SET_USED(ht); + return; +} + diff --git a/lib/librte_hash/rte_fbk_hash.h b/lib/librte_hash/rte_fbk_hash.h new file mode 100644 index 0000000000..2d16046b66 --- /dev/null +++ b/lib/librte_hash/rte_fbk_hash.h @@ -0,0 +1,334 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_FBK_HASH_H_ +#define _RTE_FBK_HASH_H_ + +/** + * @file + * + * This is a hash table implementation for four byte keys (fbk). + * + * Note that the return value of the add function should always be checked as, + * if a bucket is full, the key is not added even if there is space in other + * buckets. This keeps the lookup function very simple and therefore fast. + */ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#ifndef RTE_FBK_HASH_FUNC_DEFAULT +/** Default four-byte key hash function if none is specified. */ +#define RTE_FBK_HASH_FUNC_DEFAULT rte_hash_crc_4byte +#endif + +#ifndef RTE_FBK_HASH_INIT_VAL_DEFAULT +/** Initialising value used when calculating hash. */ +#define RTE_FBK_HASH_INIT_VAL_DEFAULT 0xFFFFFFFF +#endif + +/** The maximum number of entries in the hash table that is supported. */ +#define RTE_FBK_HASH_ENTRIES_MAX (1 << 20) + +/** The maximum number of entries in each bucket that is supported. */ +#define RTE_FBK_HASH_ENTRIES_PER_BUCKET_MAX 256 + +/** Maximum size of string for naming the hash. */ +#define RTE_FBK_HASH_NAMESIZE 32 + +/** Type of function that can be used for calculating the hash value. */ +typedef uint32_t (*rte_fbk_hash_fn)(uint32_t key, uint32_t init_val); + +/** Parameters used when creating four-byte key hash table. */ +struct rte_fbk_hash_params { + const char *name; /**< Name of the hash table. */ + uint32_t entries; /**< Total number of entries. */ + uint32_t entries_per_bucket; /**< Number of entries in a bucket. */ + int socket_id; /**< Socket to allocate memory on. */ + rte_fbk_hash_fn hash_func; /**< The hash function. */ + uint32_t init_val; /**< For initialising hash function. */ +}; + +/** Individual entry in the four-byte key hash table. */ +union rte_fbk_hash_entry { + uint64_t whole_entry; /**< For accessing entire entry. */ + struct { + uint16_t is_entry; /**< Non-zero if entry is active. */ + uint16_t value; /**< Value returned by lookup. */ + uint32_t key; /**< Key used to find value. */ + } entry; /**< For accessing each entry part. */ +} ; + + + +/** The four-byte key hash table structure. */ +struct rte_fbk_hash_table { + TAILQ_ENTRY(rte_fbk_hash_table) next; /**< Linked list. */ + + char name[RTE_FBK_HASH_NAMESIZE]; /**< Name of the hash. */ + uint32_t entries; /**< Total number of entries. */ + uint32_t entries_per_bucket; /**< Number of entries in a bucket. */ + uint32_t used_entries; /**< How many entries are used. */ + uint32_t bucket_mask; /**< To find which bucket the key is in. */ + uint32_t bucket_shift; /**< Convert bucket to table offset. */ + rte_fbk_hash_fn hash_func; /**< The hash function. */ + uint32_t init_val; /**< For initialising hash function. */ + + /** A flat table of all buckets. */ + union rte_fbk_hash_entry t[0]; +}; + +/** + * Find the offset into hash table of the bucket containing a particular key. + * + * @param ht + * Pointer to hash table. + * @param key + * Key to calculate bucket for. + * @return + * Offset into hash table. + */ +static inline uint32_t +rte_fbk_hash_get_bucket(const struct rte_fbk_hash_table *ht, uint32_t key) +{ + return (ht->hash_func(key, ht->init_val) & ht->bucket_mask) << + ht->bucket_shift; +} + + +/** + * Add a key to an existing hash table. This operation is not multi-thread safe + * and should only be called from one thread. + * + * @param ht + * Hash table to add the key to. + * @param key + * Key to add to the hash table. + * @param value + * Value to associate with key. + * @return + * 0 if ok, or negative value on error. + */ +static inline int +rte_fbk_hash_add_key(struct rte_fbk_hash_table *ht, + uint32_t key, uint16_t value) +{ + /* + * The writing of a new value to the hash table is done as a single + * 64bit operation. This should help prevent individual entries being + * corrupted due to race conditions, but it's still possible to + * overwrite entries that have just been made valid. + */ + const uint64_t new_entry = ((uint64_t)(key) << 32) | + ((uint64_t)(value) << 16) | + 1; /* 1 = is_entry bit. */ + const uint32_t bucket = rte_fbk_hash_get_bucket(ht, key); + uint32_t i; + + for (i = 0; i < ht->entries_per_bucket; i++) { + /* Set entry if unused. */ + if (! ht->t[bucket + i].entry.is_entry) { + ht->t[bucket + i].whole_entry = new_entry; + ht->used_entries++; + return 0; + } + /* Change value if key already exists. */ + if (ht->t[bucket + i].entry.key == key) { + ht->t[bucket + i].entry.value = value; + return 0; + } + } + + return -ENOSPC; /* No space in bucket. */ +} + +/** + * Remove a key from an existing hash table. This operation is not multi-thread + * safe and should only be called from one thread. + * + * @param ht + * Hash table to remove the key from. + * @param key + * Key to remove from the hash table. + * @return + * 0 if ok, or negative value on error. + */ +static inline int +rte_fbk_hash_delete_key(struct rte_fbk_hash_table *ht, uint32_t key) +{ + const uint32_t bucket = rte_fbk_hash_get_bucket(ht, key); + uint32_t last_entry = ht->entries_per_bucket - 1; + uint32_t i, j; + + for (i = 0; i < ht->entries_per_bucket; i++) { + if (ht->t[bucket + i].entry.key == key) { + /* Find last key in bucket. */ + for (j = ht->entries_per_bucket - 1; j > i; j-- ) { + if (! ht->t[bucket + j].entry.is_entry) { + last_entry = j - 1; + } + } + /* + * Move the last key to the deleted key's position, and + * delete the last key. lastEntry and i may be same but + * it doesn't matter. + */ + ht->t[bucket + i].whole_entry = + ht->t[bucket + last_entry].whole_entry; + ht->t[bucket + last_entry].whole_entry = 0; + + ht->used_entries--; + return 0; + } + } + + return -ENOENT; /* Key didn't exist. */ +} + +/** + * Find a key in the hash table. This operation is multi-thread safe. + * + * @param ht + * Hash table to look in. + * @param key + * Key to find. + * @return + * The value that was associated with the key, or negative value on error. + */ +static inline int +rte_fbk_hash_lookup(const struct rte_fbk_hash_table *ht, uint32_t key) +{ + const uint32_t bucket = rte_fbk_hash_get_bucket(ht, key); + union rte_fbk_hash_entry current_entry; + uint32_t i; + + for (i = 0; i < ht->entries_per_bucket; i++) { + /* Single read of entry, which should be atomic. */ + current_entry.whole_entry = ht->t[bucket + i].whole_entry; + if (! current_entry.entry.is_entry) { + return -ENOENT; /* Error once we hit an empty field. */ + } + if (current_entry.entry.key == key) { + return current_entry.entry.value; + } + } + return -ENOENT; /* Key didn't exist. */ +} + +/** + * Delete all entries in a hash table. This operation is not multi-thread + * safe and should only be called from one thread. + * + * @param ht + * Hash table to delete entries in. + */ +static inline void +rte_fbk_hash_clear_all(struct rte_fbk_hash_table *ht) +{ + memset(ht->t, 0, sizeof(ht->t[0]) * ht->entries); + ht->used_entries = 0; +} + +/** + * Find what fraction of entries are being used. + * + * @param ht + * Hash table to find how many entries are being used in. + * @return + * Load factor of the hash table, or negative value on error. + */ +static inline double +rte_fbk_hash_get_load_factor(struct rte_fbk_hash_table *ht) +{ + return (double)ht->used_entries / (double)ht->entries; +} + +/** + * Performs a lookup for an existing hash table, and returns a pointer to + * the table if found. + * + * @param name + * Name of the hash table to find + * + * @return + * pointer to hash table structure or NULL on error with rte_errno + * set appropriately. Possible rte_errno values include: + * - ENOENT - required entry not available to return. + */ +struct rte_fbk_hash_table *rte_fbk_hash_find_existing(const char *name); + +/** + * Create a new hash table for use with four byte keys. + * + * @param params + * Parameters used in creation of hash table. + * + * @return + * Pointer to hash table structure that is used in future hash table + * operations, or NULL on error with rte_errno set appropriately. + * Possible rte_errno error values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - E_RTE_NO_TAILQ - no tailq list could be got for the fbk hash table list + * - EINVAL - invalid parameter value passed to function + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_fbk_hash_table * \ +rte_fbk_hash_create(const struct rte_fbk_hash_params *params); + +/** + * Free all memory used by a hash table. + * Has no effect on hash tables allocated in memory zones + * + * @param ht + * Hash table to deallocate. + */ +void rte_fbk_hash_free(struct rte_fbk_hash_table *ht); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_FBK_HASH_H_ */ diff --git a/lib/librte_hash/rte_hash.c b/lib/librte_hash/rte_hash.c new file mode 100644 index 0000000000..76cba41501 --- /dev/null +++ b/lib/librte_hash/rte_hash.c @@ -0,0 +1,407 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include /* for definition of CACHE_LINE_SIZE */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_hash.h" +#include "rte_jhash.h" +#include "rte_hash_crc.h" + + +TAILQ_HEAD(rte_hash_list, rte_hash); + +/* global list of hashes (used for debug/dump) */ +static struct rte_hash_list *hash_list; + +/* macro to prevent duplication of list creation check code */ +#define CHECK_HASH_LIST_CREATED() do { \ + if (hash_list == NULL) \ + if ((hash_list = RTE_TAILQ_RESERVE("RTE_HASH", rte_hash_list)) == NULL){ \ + rte_errno = E_RTE_NO_TAILQ; \ + return NULL; \ + } \ +} while (0) + +/* Macro to enable/disable run-time checking of function parameters */ +#if defined(RTE_LIBRTE_HASH_DEBUG) +#define RETURN_IF_TRUE(cond, retval) do { \ + if (cond) return (retval); \ +} while (0) +#else +#define RETURN_IF_TRUE(cond, retval) +#endif + +/* Hash function used if none is specified */ +#define DEFAULT_HASH_FUNC rte_hash_crc + +/* Signature bucket size is a multiple of this value */ +#define SIG_BUCKET_ALIGNMENT 16 + +/* Stoered key size is a multiple of this value */ +#define KEY_ALIGNMENT 16 + +/* The high bit is always set in real signatures */ +#define NULL_SIGNATURE 0 + +/* Returns a pointer to the first signature in specified bucket. */ +static inline hash_sig_t * +get_sig_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index) +{ + return (hash_sig_t *) + &(h->sig_tbl[bucket_index * h->sig_tbl_bucket_size]); +} + +/* Returns a pointer to the first key in specified bucket. */ +static inline uint8_t * +get_key_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index) +{ + return (uint8_t *) &(h->key_tbl[bucket_index * h->bucket_entries * + h->key_tbl_key_size]); +} + +/* Returns a pointer to a key at a specific position in a specified bucket. */ +static inline void * +get_key_from_bucket(const struct rte_hash *h, uint8_t *bkt, uint32_t pos) +{ + return (void *) &bkt[pos * h->key_tbl_key_size]; +} + +/* Does integer division with rounding-up of result. */ +static inline uint32_t +div_roundup(uint32_t numerator, uint32_t denominator) +{ + return (numerator + denominator - 1) / denominator; +} + +/* Increases a size (if needed) to a multiple of alignment. */ +static inline uint32_t +align_size(uint32_t val, uint32_t alignment) +{ + return alignment * div_roundup(val, alignment); +} + +/* Returns the index into the bucket of the first occurrence of a signature. */ +static inline int +find_first(uint32_t sig, const uint32_t *sig_bucket, uint32_t num_sigs) +{ + uint32_t i; + for (i = 0; i < num_sigs; i++) { + if (sig == sig_bucket[i]) + return i; + } + return -1; +} + +struct rte_hash * +rte_hash_find_existing(const char *name) +{ + struct rte_hash *h; + + /* check that we have an initialised tail queue */ + CHECK_HASH_LIST_CREATED(); + + TAILQ_FOREACH(h, hash_list, next) { + if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0) + break; + } + if (h == NULL) + rte_errno = ENOENT; + return h; +} + +struct rte_hash * +rte_hash_create(const struct rte_hash_parameters *params) +{ + struct rte_hash *h = NULL; + uint32_t num_buckets, sig_bucket_size, key_size, + hash_tbl_size, sig_tbl_size, key_tbl_size, mem_size; + char hash_name[RTE_HASH_NAMESIZE]; + + if (rte_eal_process_type() == RTE_PROC_SECONDARY){ + rte_errno = E_RTE_SECONDARY; + return NULL; + } + + /* check that we have an initialised tail queue */ + CHECK_HASH_LIST_CREATED(); + + /* Check for valid parameters */ + if ((params == NULL) || + (params->entries > RTE_HASH_ENTRIES_MAX) || + (params->bucket_entries > RTE_HASH_BUCKET_ENTRIES_MAX) || + (params->entries < params->bucket_entries) || + !rte_is_power_of_2(params->entries) || + !rte_is_power_of_2(params->bucket_entries) || + (params->key_len == 0) || + (params->key_len > RTE_HASH_KEY_LENGTH_MAX)) { + rte_errno = EINVAL; + RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n"); + return NULL; + } + + rte_snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name); + + /* Calculate hash dimensions */ + num_buckets = params->entries / params->bucket_entries; + sig_bucket_size = align_size(params->bucket_entries * + sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT); + key_size = align_size(params->key_len, KEY_ALIGNMENT); + + hash_tbl_size = align_size(sizeof(struct rte_hash), CACHE_LINE_SIZE); + sig_tbl_size = align_size(num_buckets * sig_bucket_size, + CACHE_LINE_SIZE); + key_tbl_size = align_size(num_buckets * key_size * + params->bucket_entries, CACHE_LINE_SIZE); + + /* Total memory required for hash context */ + mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size; + + /* Allocate as a memzone, or in normal memory space */ +#if defined(RTE_LIBRTE_HASH_USE_MEMZONE) + const struct rte_memzone *mz; + mz = rte_memzone_reserve(hash_name, mem_size, params->socket_id, 0); + if (mz == NULL) { + RTE_LOG(ERR, HASH, "memzone reservation failed\n"); + return NULL; + } + memset(mz->addr, 0, mem_size); + h = (struct rte_hash *)mz->addr; +#else + h = (struct rte_hash *)rte_zmalloc(hash_name, mem_size, + CACHE_LINE_SIZE); + if (h == NULL) { + RTE_LOG(ERR, HASH, "memory allocation failed\n"); + return NULL; + } +#endif + + /* Setup hash context */ + rte_snprintf(h->name, sizeof(h->name), "%s", params->name); + h->entries = params->entries; + h->bucket_entries = params->bucket_entries; + h->key_len = params->key_len; + h->hash_func_init_val = params->hash_func_init_val; + h->num_buckets = num_buckets; + h->bucket_bitmask = h->num_buckets - 1; + h->sig_msb = 1 << (sizeof(hash_sig_t) * 8 - 1); + h->sig_tbl = (uint8_t *)h + hash_tbl_size; + h->sig_tbl_bucket_size = sig_bucket_size; + h->key_tbl = h->sig_tbl + sig_tbl_size; + h->key_tbl_key_size = key_size; + h->hash_func = (params->hash_func == NULL) ? + DEFAULT_HASH_FUNC : params->hash_func; + + if (h->hash_func == rte_hash_crc && + !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2)) { + RTE_LOG(WARNING, HASH, "CRC32 instruction requires SSE4.2, " + "which is not supported on this system. " + "Falling back to software hash\n."); + h->hash_func = rte_jhash; + } + + TAILQ_INSERT_TAIL(hash_list, h, next); + return h; +} + +void +rte_hash_free(struct rte_hash *h) +{ + if (h == NULL) + return; +#if !defined(RTE_LIBRTE_HASH_USE_MEMZONE) + TAILQ_REMOVE(hash_list, h, next); + rte_free(h); +#endif + /* No way to deallocate memzones */ + return; +} + +int32_t +rte_hash_add_key(const struct rte_hash *h, const void *key) +{ + hash_sig_t sig, *sig_bucket; + uint8_t *key_bucket; + uint32_t bucket_index, i; + int32_t pos; + + RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL); + + /* Get the hash signature and bucket index */ + sig = h->hash_func(key, h->key_len, h->hash_func_init_val) | h->sig_msb; + bucket_index = sig & h->bucket_bitmask; + sig_bucket = get_sig_tbl_bucket(h, bucket_index); + key_bucket = get_key_tbl_bucket(h, bucket_index); + + /* Check if key is already present in the hash */ + for (i = 0; i < h->bucket_entries; i++) { + if ((sig == sig_bucket[i]) && + likely(memcmp(key, get_key_from_bucket(h, key_bucket, i), + h->key_len) == 0)) { + return bucket_index * h->bucket_entries + i; + } + } + + /* Check if any free slot within the bucket to add the new key */ + pos = find_first(NULL_SIGNATURE, sig_bucket, h->bucket_entries); + + if (unlikely(pos < 0)) + return -ENOSPC; + + /* Add the new key to the bucket */ + sig_bucket[pos] = sig; + rte_memcpy(get_key_from_bucket(h, key_bucket, pos), key, h->key_len); + return bucket_index * h->bucket_entries + pos; +} + +int32_t +rte_hash_del_key(const struct rte_hash *h, const void *key) +{ + hash_sig_t sig, *sig_bucket; + uint8_t *key_bucket; + uint32_t bucket_index, i; + + RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL); + + /* Get the hash signature and bucket index */ + sig = h->hash_func(key, h->key_len, h->hash_func_init_val) | h->sig_msb; + bucket_index = sig & h->bucket_bitmask; + sig_bucket = get_sig_tbl_bucket(h, bucket_index); + key_bucket = get_key_tbl_bucket(h, bucket_index); + + /* Check if key is already present in the hash */ + for (i = 0; i < h->bucket_entries; i++) { + if ((sig == sig_bucket[i]) && + likely(memcmp(key, get_key_from_bucket(h, key_bucket, i), + h->key_len) == 0)) { + sig_bucket[i] = NULL_SIGNATURE; + return bucket_index * h->bucket_entries + i; + } + } + + return -ENOENT; +} + +int32_t +rte_hash_lookup(const struct rte_hash *h, const void *key) +{ + hash_sig_t sig, *sig_bucket; + uint8_t *key_bucket; + uint32_t bucket_index, i; + + RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL); + + /* Get the hash signature and bucket index */ + sig = h->hash_func(key, h->key_len, h->hash_func_init_val) | h->sig_msb; + bucket_index = sig & h->bucket_bitmask; + sig_bucket = get_sig_tbl_bucket(h, bucket_index); + key_bucket = get_key_tbl_bucket(h, bucket_index); + + /* Check if key is already present in the hash */ + for (i = 0; i < h->bucket_entries; i++) { + if ((sig == sig_bucket[i]) && + likely(memcmp(key, get_key_from_bucket(h, key_bucket, i), + h->key_len) == 0)) { + return bucket_index * h->bucket_entries + i; + } + } + + return -ENOENT; +} + +int +rte_hash_lookup_multi(const struct rte_hash *h, const void **keys, + uint32_t num_keys, int32_t *positions) +{ + uint32_t i, j, bucket_index; + hash_sig_t sigs[RTE_HASH_LOOKUP_MULTI_MAX]; + + RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) || + (num_keys > RTE_HASH_LOOKUP_MULTI_MAX) || + (positions == NULL)), -EINVAL); + + /* Get the hash signature and bucket index */ + for (i = 0; i < num_keys; i++) { + sigs[i] = h->hash_func(keys[i], h->key_len, + h->hash_func_init_val) | h->sig_msb; + bucket_index = sigs[i] & h->bucket_bitmask; + + /* Pre-fetch relevant buckets */ + rte_prefetch1((void *) get_sig_tbl_bucket(h, bucket_index)); + rte_prefetch1((void *) get_key_tbl_bucket(h, bucket_index)); + } + + /* Check if key is already present in the hash */ + for (i = 0; i < num_keys; i++) { + bucket_index = sigs[i] & h->bucket_bitmask; + hash_sig_t *sig_bucket = get_sig_tbl_bucket(h, bucket_index); + uint8_t *key_bucket = get_key_tbl_bucket(h, bucket_index); + + positions[i] = -ENOENT; + + for (j = 0; j < h->bucket_entries; j++) { + if ((sigs[i] == sig_bucket[j]) && + likely(memcmp(keys[i], + get_key_from_bucket(h, key_bucket, j), + h->key_len) == 0)) { + positions[i] = bucket_index * + h->bucket_entries + j; + break; + } + } + } + + return 0; +} diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h new file mode 100644 index 0000000000..eb55deb5de --- /dev/null +++ b/lib/librte_hash/rte_hash.h @@ -0,0 +1,236 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_HASH_H_ +#define _RTE_HASH_H_ + +/** + * @file + * + * RTE Hash Table + */ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** Maximum size of hash table that can be created. */ +#define RTE_HASH_ENTRIES_MAX (1 << 26) + +/** Maximum bucket size that can be created. */ +#define RTE_HASH_BUCKET_ENTRIES_MAX 16 + +/** Maximum length of key that can be used. */ +#define RTE_HASH_KEY_LENGTH_MAX 64 + +/** Max number of keys that can be searched for using rte_hash_lookup_multi. */ +#define RTE_HASH_LOOKUP_MULTI_MAX 16 + +/** Max number of characters in hash name.*/ +#define RTE_HASH_NAMESIZE 32 + +/** Signature of key that is stored internally. */ +typedef uint32_t hash_sig_t; + +/** Type of function that can be used for calculating the hash value. */ +typedef uint32_t (*rte_hash_function)(const void *key, uint32_t key_len, + uint32_t init_val); + +/** + * Parameters used when creating the hash table. The total table entries and + * bucket entries must be a power of 2. + */ +struct rte_hash_parameters { + const char *name; /**< Name of the hash. */ + uint32_t entries; /**< Total hash table entries. */ + uint32_t bucket_entries; /**< Bucket entries. */ + uint32_t key_len; /**< Length of hash key. */ + rte_hash_function hash_func; /**< Function used to calculate hash. */ + uint32_t hash_func_init_val; /**< Init value used by hash_func. */ + int socket_id; /**< NUMA Socket ID for memory. */ +}; + +/** A hash table structure. */ +struct rte_hash { + TAILQ_ENTRY(rte_hash) next;/**< Next in list. */ + + char name[RTE_HASH_NAMESIZE]; /**< Name of the hash. */ + uint32_t entries; /**< Total table entries. */ + uint32_t bucket_entries; /**< Bucket entries. */ + uint32_t key_len; /**< Length of hash key. */ + rte_hash_function hash_func; /**< Function used to calculate hash. */ + uint32_t hash_func_init_val; /**< Init value used by hash_func. */ + uint32_t num_buckets; /**< Number of buckets in table. */ + uint32_t bucket_bitmask; /**< Bitmask for getting bucket index + from hash signature. */ + hash_sig_t sig_msb; /**< MSB is always set in valid signatures. */ + uint8_t *sig_tbl; /**< Flat array of hash signature buckets. */ + uint32_t sig_tbl_bucket_size; /**< Signature buckets may be padded for + alignment reasons, and this is the + bucket size used by sig_tbl. */ + uint8_t *key_tbl; /**< Flat array of key value buckets. */ + uint32_t key_tbl_key_size; /**< Keys may be padded for alignment + reasons, and this is the key size + used by key_tbl. */ +}; + +/** + * Create a new hash table. If RTE_LIBRTE_HASH_USE_MEMZONE is defined, then + * the hash table is allocated in a memzone on a specific NUMA socket ID, + * otherwise it is allocated in the heap. + * + * @param params + * Parameters used to create and initialise the hash table. + * @return + * Pointer to hash table structure that is used in future hash table + * operations, or NULL on error, with error code set in rte_errno. + * Possible rte_errno errors include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - E_RTE_NO_TAILQ - no tailq list could be got for the hash table list + * - ENOENT - missing entry + * - EINVAL - invalid parameter passed to function + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_hash * +rte_hash_create(const struct rte_hash_parameters *params); + + +/** + * Find an existing hash table object and return a pointer to it. + * + * @param name + * Name of the hash table as passed to rte_hash_create() + * @return + * Pointer to hash table or NULL if object not found + * with rte_errno set appropriately. Possible rte_errno values include: + * - ENOENT - value not available for return + */ +struct rte_hash * +rte_hash_find_existing(const char *name); + +/** + * De-allocate all memory used by hash table. If RTE_LIBRTE_HASH_USE_MEMZONE + * is defined, then this has no effect. + * @param h + * Hash table to free + */ +void +rte_hash_free(struct rte_hash *h); + +/** + * Add a key to an existing hash table. This operation is not multi-thread safe + * and should only be called from one thread. + * + * @param h + * Hash table to add the key to. + * @param key + * Key to add to the hash table. + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOSPC if there is no space in the hash for this key. + * - A positive value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key. + */ +int32_t +rte_hash_add_key(const struct rte_hash *h, const void *key); + +/** + * Remove a key from an existing hash table. This operation is not multi-thread + * safe and should only be called from one thread. + * + * @param h + * Hash table to remove the key from. + * @param key + * Key to remove from the hash table. + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. + * - A positive value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ +int32_t +rte_hash_del_key(const struct rte_hash *h, const void *key); + +/** + * Find a key in the hash table. This operation is multi-thread safe. + * + * @param h + * Hash table to look in. + * @param key + * Key to find. + * @return + * - -EINVAL if the parameters are invalid. + * - -ENOENT if the key is not found. + * - A positive value that can be used by the caller as an offset into an + * array of user data. This value is unique for this key, and is the same + * value that was returned when the key was added. + */ +int32_t +rte_hash_lookup(const struct rte_hash *h, const void *key); + +/** + * Find multiple keys in the hash table. This operation is multi-thread safe. + * + * @param h + * Hash table to look in. + * @param keys + * A pointer to a list of keys to look for. + * @param num_keys + * How many keys are in the keys list (less than RTE_HASH_LOOKUP_MULTI_MAX). + * @param positions + * Output containing a list of values, corresponding to the list of keys that + * can be used by the caller as an offset into an array of user data. These + * values are unique for each key, and are the same values that were returned + * when each key was added. If a key in the list was not found, then -ENOENT + * will be the value. + * @return + * -EINVAL if there's an error, otherwise 0. + */ +int +rte_hash_lookup_multi(const struct rte_hash *h, const void **keys, + uint32_t num_keys, int32_t *positions); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_HASH_H_ */ diff --git a/lib/librte_hash/rte_hash_crc.h b/lib/librte_hash/rte_hash_crc.h new file mode 100644 index 0000000000..c5cee9c9e9 --- /dev/null +++ b/lib/librte_hash/rte_hash_crc.h @@ -0,0 +1,114 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_HASH_CRC_H_ +#define _RTE_HASH_CRC_H_ + +/** + * @file + * + * RTE CRC Hash + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * Use single crc32 instruction to perform a hash on a 4 byte value. + * + * @param data + * Data to perform hash on. + * @param init_val + * Value to initialise hash generator. + * @return + * 32bit calculated hash value. + */ +static inline uint32_t +rte_hash_crc_4byte(uint32_t data, uint32_t init_val) +{ + asm volatile("crc32 %[data], %[init_val]" + : [init_val]"=r" (init_val) + : [data]"r" (data), "[init_val]" (init_val)); + return init_val; +} + +/** + * Use crc32 instruction to perform a hash. + * + * @param data + * Data to perform hash on. + * @param data_len + * How many bytes to use to calculate hash value. + * @param init_val + * Value to initialise hash generator. + * @return + * 32bit calculated hash value. + */ +static inline uint32_t +rte_hash_crc(const void *data, uint32_t data_len, uint32_t init_val) +{ + unsigned i; + uint32_t temp = 0; + const uint32_t *p32 = (const uint32_t *)data; + + for (i = 0; i < data_len / 4; i++) { + init_val = rte_hash_crc_4byte(*p32++, init_val); + } + + switch (3 - (data_len & 0x03)) { + case 0: + temp |= *((const uint8_t *)p32 + 2) << 16; + /* Fallthrough */ + case 1: + temp |= *((const uint8_t *)p32 + 1) << 8; + /* Fallthrough */ + case 2: + temp |= *((const uint8_t *)p32); + init_val = rte_hash_crc_4byte(temp, init_val); + default: + break; + } + + return init_val; +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_HASH_CRC_H_ */ diff --git a/lib/librte_hash/rte_jhash.h b/lib/librte_hash/rte_jhash.h new file mode 100644 index 0000000000..12f794c536 --- /dev/null +++ b/lib/librte_hash/rte_jhash.h @@ -0,0 +1,263 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_JHASH_H +#define _RTE_JHASH_H + +/** + * @file + * + * jhash functions. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* jhash.h: Jenkins hash support. + * + * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net) + * + * http://burtleburtle.net/bob/hash/ + * + * These are the credits from Bob's sources: + * + * lookup2.c, by Bob Jenkins, December 1996, Public Domain. + * hash(), hash2(), hash3, and mix() are externally useful functions. + * Routines to test the hash are included if SELF_TEST is defined. + * You can use this free for any purpose. It has no warranty. + * + * $FreeBSD$ + */ + +/** @internal Internal function. NOTE: Arguments are modified. */ +#define __rte_jhash_mix(a, b, c) do { \ + a -= b; a -= c; a ^= (c>>13); \ + b -= c; b -= a; b ^= (a<<8); \ + c -= a; c -= b; c ^= (b>>13); \ + a -= b; a -= c; a ^= (c>>12); \ + b -= c; b -= a; b ^= (a<<16); \ + c -= a; c -= b; c ^= (b>>5); \ + a -= b; a -= c; a ^= (c>>3); \ + b -= c; b -= a; b ^= (a<<10); \ + c -= a; c -= b; c ^= (b>>15); \ +} while (0) + +/** The golden ratio: an arbitrary value. */ +#define RTE_JHASH_GOLDEN_RATIO 0x9e3779b9 + +/** + * The most generic version, hashes an arbitrary sequence + * of bytes. No alignment or length assumptions are made about + * the input key. + * + * @param key + * Key to calculate hash of. + * @param length + * Length of key in bytes. + * @param initval + * Initialising value of hash. + * @return + * Calculated hash value. + */ +static inline uint32_t +rte_jhash(const void *key, uint32_t length, uint32_t initval) +{ + uint32_t a, b, c, len; + const uint8_t *k = (const uint8_t *) key; + + len = length; + a = b = RTE_JHASH_GOLDEN_RATIO; + c = initval; + + while (len >= 12) { + a += (k[0] + ((uint32_t)k[1] << 8) + ((uint32_t)k[2] << 16) + + ((uint32_t)k[3] << 24)); + b += (k[4] + ((uint32_t)k[5] << 8) + ((uint32_t)k[6] << 16) + + ((uint32_t)k[7] << 24)); + c += (k[8] + ((uint32_t)k[9] << 8) + ((uint32_t)k[10] << 16) + + ((uint32_t)k[11] << 24)); + + __rte_jhash_mix(a,b,c); + + k += 12; + len -= 12; + } + + c += length; + switch (len) { + case 11: c += ((uint32_t)k[10] << 24); + case 10: c += ((uint32_t)k[9] << 16); + case 9 : c += ((uint32_t)k[8] << 8); + case 8 : b += ((uint32_t)k[7] << 24); + case 7 : b += ((uint32_t)k[6] << 16); + case 6 : b += ((uint32_t)k[5] << 8); + case 5 : b += k[4]; + case 4 : a += ((uint32_t)k[3] << 24); + case 3 : a += ((uint32_t)k[2] << 16); + case 2 : a += ((uint32_t)k[1] << 8); + case 1 : a += k[0]; + default: break; + }; + + __rte_jhash_mix(a,b,c); + + return c; +} + +/** + * A special optimized version that handles 1 or more of uint32_ts. + * The length parameter here is the number of uint32_ts in the key. + * + * @param k + * Key to calculate hash of. + * @param length + * Length of key in units of 4 bytes. + * @param initval + * Initialising value of hash. + * @return + * Calculated hash value. + */ +static inline uint32_t +rte_jhash2(uint32_t *k, uint32_t length, uint32_t initval) +{ + uint32_t a, b, c, len; + + a = b = RTE_JHASH_GOLDEN_RATIO; + c = initval; + len = length; + + while (len >= 3) { + a += k[0]; + b += k[1]; + c += k[2]; + __rte_jhash_mix(a, b, c); + k += 3; len -= 3; + } + + c += length * 4; + + switch (len) { + case 2 : b += k[1]; + case 1 : a += k[0]; + default: break; + }; + + __rte_jhash_mix(a,b,c); + + return c; +} + + +/** + * A special ultra-optimized versions that knows it is hashing exactly + * 3 words. + * + * @param a + * First word to calcuate hash of. + * @param b + * Second word to calcuate hash of. + * @param c + * Third word to calcuate hash of. + * @param initval + * Initialising value of hash. + * @return + * Calculated hash value. + */ +static inline uint32_t +rte_jhash_3words(uint32_t a, uint32_t b, uint32_t c, uint32_t initval) +{ + a += RTE_JHASH_GOLDEN_RATIO; + b += RTE_JHASH_GOLDEN_RATIO; + c += initval; + + __rte_jhash_mix(a, b, c); + + /* + * NOTE: In particular the "c += length; __rte_jhash_mix(a,b,c);" + * normally done at the end is not done here. + */ + return c; +} + +/** + * A special ultra-optimized versions that knows it is hashing exactly + * 2 words. + * + * NOTE: In partilar the "c += length; __rte_jhash_mix(a,b,c);" normally + * done at the end is not done here. + * + * @param a + * First word to calcuate hash of. + * @param b + * Second word to calcuate hash of. + * @param initval + * Initialising value of hash. + * @return + * Calculated hash value. + */ +static inline uint32_t +rte_jhash_2words(uint32_t a, uint32_t b, uint32_t initval) +{ + return rte_jhash_3words(a, b, 0, initval); +} + +/** + * A special ultra-optimized versions that knows it is hashing exactly + * 1 word. + * + * NOTE: In partilar the "c += length; __rte_jhash_mix(a,b,c);" normally + * done at the end is not done here. + * + * @param a + * Word to calcuate hash of. + * @param initval + * Initialising value of hash. + * @return + * Calculated hash value. + */ +static inline uint32_t +rte_jhash_1word(uint32_t a, uint32_t initval) +{ + return rte_jhash_3words(a, 0, 0, initval); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_JHASH_H */ diff --git a/lib/librte_lpm/Makefile b/lib/librte_lpm/Makefile new file mode 100644 index 0000000000..1cb8d2793a --- /dev/null +++ b/lib/librte_lpm/Makefile @@ -0,0 +1,51 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_lpm.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_LPM) := rte_lpm.c + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_LPM)-include := rte_lpm.h + +# this lib needs eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_LPM) += lib/librte_eal lib/librte_malloc + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c new file mode 100644 index 0000000000..4269b3c8d8 --- /dev/null +++ b/lib/librte_lpm/rte_lpm.c @@ -0,0 +1,971 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include /* for definition of CACHE_LINE_SIZE */ +#include +#include +#include +#include +#include +#include +#include + +#include "rte_lpm.h" + +TAILQ_HEAD(rte_lpm_list, rte_lpm); + +/* global list of ring (used for debug/dump) */ +static struct rte_lpm_list *lpm_list; + +#define CHECK_LPM_LIST_CREATED() do { \ + if (lpm_list == NULL) \ + if ((lpm_list = RTE_TAILQ_RESERVE("RTE_LPM", rte_lpm_list)) == NULL){ \ + rte_errno = E_RTE_NO_TAILQ; \ + return NULL; \ + } \ +} while (0) + +#define MAX_DEPTH_TBL24 24 + +enum valid_flag { + INVALID = 0, + VALID +}; + +/* Macro to enable/disable run-time checks. */ +#if defined(RTE_LIBRTE_LPM_DEBUG) +#include +#define VERIFY_DEPTH(depth) do { \ + if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \ + rte_panic("LPM: Invalid depth (%u) at line %d", depth, __LINE__); \ +} while (0) +#else +#define VERIFY_DEPTH(depth) +#endif + +/* + * Function Name: depth_to_mask + * Usage : Converts a given depth value to its corresponding mask value. + * + * depth (IN) : range = 1 - 32 + * mask (OUT) : 32bit mask + */ +static uint32_t __attribute__((pure)) +depth_to_mask(uint8_t depth) +{ + VERIFY_DEPTH(depth); + + /* To calculate a mask start with a 1 on the left hand side and right + * shift while populating the left hand side with 1's + */ + return (int)0x80000000 >> (depth - 1); +} + +/* + * Function Name: depth_to_range + * Usage : Converts given depth value to its corresponding range value. + * + * (IN) depth + * (OUT) mask + */ +static inline uint32_t __attribute__((pure)) +depth_to_range(uint8_t depth) +{ + VERIFY_DEPTH(depth); + + /* + * Calculate tbl24 range. (Note: 2^depth = 1 << depth) + */ + if (depth <= MAX_DEPTH_TBL24) + return 1 << (MAX_DEPTH_TBL24 - depth); + + /* Else if depth is greater than 24 */ + return (1 << (RTE_LPM_MAX_DEPTH - depth)); +} + +/* + * Find an existing lpm table and return a pointer to it. + */ +struct rte_lpm * +rte_lpm_find_existing(const char *name) +{ + struct rte_lpm *l; + + /* check that we have an initialised tail queue */ + CHECK_LPM_LIST_CREATED(); + + TAILQ_FOREACH(l, lpm_list, next) { + if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0) + break; + } + + if (l == NULL) + rte_errno = ENOENT; + + return l; +} + +/* + * Function Name : rte_lpm_create + * Usage : Allocates memory for LPM object + * + * rte_lpm (RETURN) + */ +struct rte_lpm * +rte_lpm_create(const char *name, int socket_id, int max_rules, + int mem_location) +{ + char mem_name[RTE_LPM_NAMESIZE]; + struct rte_lpm *lpm = NULL; + uint32_t mem_size; + + /* check that we have access to create things in shared memory. */ + if (rte_eal_process_type() == RTE_PROC_SECONDARY){ + rte_errno = E_RTE_SECONDARY; + return NULL; + } + + /* check that we have an initialised tail queue */ + CHECK_LPM_LIST_CREATED(); + + RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2); + RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2); + + /* Check user arguments. */ + if ((name == NULL) || (socket_id < -1) || (max_rules == 0) || + (mem_location != RTE_LPM_HEAP && + mem_location != RTE_LPM_MEMZONE)){ + rte_errno = EINVAL; + return NULL; + } + + rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); + + /* + * Pad out max_rules so that each depth is given the same number of + * rules. + */ + if (max_rules % RTE_LPM_MAX_DEPTH) { + max_rules += RTE_LPM_MAX_DEPTH - + (max_rules % RTE_LPM_MAX_DEPTH); + } + + /* Determine the amount of memory to allocate. */ + mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules); + + /* Allocate memory to store the LPM data structures. */ + if (mem_location == RTE_LPM_MEMZONE) { + const struct rte_memzone *mz; + uint32_t mz_flags = 0; + + mz = rte_memzone_reserve(mem_name, mem_size, socket_id, + mz_flags); + if (mz == NULL) { + RTE_LOG(ERR, LPM, "LPM memzone creation failed\n"); + return NULL; + } + + memset(mz->addr, 0, mem_size); + lpm = (struct rte_lpm *) mz->addr; + + } + else { + lpm = (struct rte_lpm *)rte_zmalloc(mem_name, mem_size, + CACHE_LINE_SIZE); + if (lpm == NULL) { + RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); + return NULL; + } + } + + /* Save user arguments. */ + lpm->max_rules_per_depth = max_rules / RTE_LPM_MAX_DEPTH; + rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name); + lpm->mem_location = mem_location; + + TAILQ_INSERT_TAIL(lpm_list, lpm, next); + + return lpm; +} + +/* + * Function Name : free + * Usage: Deallocates memory for given LPM table. + */ +void +rte_lpm_free(struct rte_lpm *lpm) +{ + /* Check user arguments. */ + if (lpm == NULL) + return; + + /* Note: Its is currently not possible to free a memzone. */ + if (lpm->mem_location == RTE_LPM_HEAP){ + TAILQ_REMOVE(lpm_list, lpm, next); + rte_free(lpm); + } +} + +/* + * Function Name: rule_add + * Usage : Adds a rule to the rule table. + * + * NOTE: The rule table is split into 32 groups. Each group contains rules that + * apply to a specific prefix depth (i.e. group 1 contains rules that apply to + * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used + * to refer to depth 1 because even though the depth range is 1 - 32, depths + * are stored in the rule table from 0 - 31. + * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. + */ +static inline int32_t +rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, + uint8_t next_hop) +{ + uint32_t rule_gindex, rule_index, last_rule; + + VERIFY_DEPTH(depth); + + /* rule_gindex stands for rule group index. */ + rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); + /* Initialise rule_index to point to start of rule group. */ + rule_index = rule_gindex; + /* Last rule = Last used rule in this rule group. */ + last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1]; + + /* Scan through rule group to see if rule already exists. */ + for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { + + /* If rule already exists update its next_hop and return. */ + if (lpm->rules_tbl[rule_index].ip == ip_masked) { + lpm->rules_tbl[rule_index].next_hop = next_hop; + + return rule_index; + } + } + + /* + * If rule does not exist check if there is space to add a new rule to + * this rule group. If there is no space return error. */ + if (lpm->used_rules_at_depth[depth - 1] == lpm->max_rules_per_depth) { + return -ENOSPC; + } + + /* If there is space for the new rule add it. */ + lpm->rules_tbl[rule_index].ip = ip_masked; + lpm->rules_tbl[rule_index].next_hop = next_hop; + + /* Increment the used rules counter for this rule group. */ + lpm->used_rules_at_depth[depth - 1]++; + + return rule_index; +} + +/* + * Function Name: rule_delete + * Usage : Delete a rule from the rule table. + * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. + */ +static inline void +rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth) +{ + uint32_t rule_gindex, last_rule_index; + + VERIFY_DEPTH(depth); + + rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); + last_rule_index = rule_gindex + + (lpm->used_rules_at_depth[depth - 1]) - 1; + /* + * Overwrite redundant rule with last rule in group and decrement rule + * counter. + */ + lpm->rules_tbl[rule_index] = lpm->rules_tbl[last_rule_index]; + lpm->used_rules_at_depth[depth - 1]--; +} + + +/* + * Function Name: rule_find + * Usage : Finds a rule in rule table. + * NOTE: Valid range for depth parameter is 1 .. 32 inclusive. + */ +static inline int32_t +rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) +{ + uint32_t rule_gindex, last_rule, rule_index; + + VERIFY_DEPTH(depth); + + rule_gindex = ((depth - 1) * lpm->max_rules_per_depth); + last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1]; + + /* Scan used rules at given depth to find rule. */ + for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) { + /* If rule is found return the rule index. */ + if (lpm->rules_tbl[rule_index].ip == ip_masked) + return (rule_index); + } + + /* If rule is not found return -E_RTE_NO_TAILQ. */ + return -E_RTE_NO_TAILQ; +} + +/* + * Function Name: tbl8_alloc + * Usage : Find, clean and allocate a tbl8. + */ +static inline int32_t +tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8) +{ + uint32_t tbl8_gindex; /* tbl8 group index. */ + struct rte_lpm_tbl8_entry *tbl8_entry; + + /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */ + for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS; + tbl8_gindex++) { + tbl8_entry = &tbl8[tbl8_gindex * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; + /* If a free tbl8 group is found clean it and set as VALID. */ + if (!tbl8_entry->valid_group) { + memset(&tbl8_entry[0], 0, + RTE_LPM_TBL8_GROUP_NUM_ENTRIES * + sizeof(tbl8_entry[0])); + + tbl8_entry->valid_group = VALID; + + /* Return group index for allocated tbl8 group. */ + return tbl8_gindex; + } + } + + /* If there are no tbl8 groups free then return error. */ + return -ENOSPC; +} + +static inline void +tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) +{ + /* Set tbl8 group invalid*/ + tbl8[tbl8_group_start].valid_group = INVALID; +} + +static inline int32_t +add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, + uint8_t next_hop) +{ + uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j; + + /* Calculate the index into Table24. */ + tbl24_index = ip >> 8; + tbl24_range = depth_to_range(depth); + + for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { + /* + * For invalid OR valid and non-extended tbl 24 entries set + * entry. + */ + if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 && + lpm->tbl24[i].depth <= depth)) { + + struct rte_lpm_tbl24_entry new_tbl24_entry = { + .valid = VALID, + .ext_entry = 0, + .depth = depth, + { .next_hop = next_hop, } + }; + + /* Setting tbl24 entry in one go to avoid race + * conditions */ + lpm->tbl24[i] = new_tbl24_entry; + + continue; + } + + /* If tbl24 entry is valid and extended calculate the index + * into tbl8. */ + tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < tbl8_group_end; j++) { + if (!lpm->tbl8[j].valid || + lpm->tbl8[j].depth <= depth) { + struct rte_lpm_tbl8_entry new_tbl8_entry = { + .valid = VALID, + .valid_group = VALID, + .depth = depth, + .next_hop = next_hop, + }; + + /* + * Setting tbl8 entry in one go to avoid race + * conditions + */ + lpm->tbl8[j] = new_tbl8_entry; + + continue; + } + } + } + + return 0; +} + +static inline int32_t +add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, + uint8_t next_hop) +{ + uint32_t tbl24_index; + int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index, + tbl8_range, i; + + tbl24_index = (ip_masked >> 8); + tbl8_range = depth_to_range(depth); + + if (!lpm->tbl24[tbl24_index].valid) { + /* Search for a free tbl8 group. */ + tbl8_group_index = tbl8_alloc(lpm->tbl8); + + /* Check tbl8 allocation was successful. */ + if (tbl8_group_index < 0) { + return tbl8_group_index; + } + + /* Find index into tbl8 and range. */ + tbl8_index = (tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES) + + (ip_masked & 0xFF); + + /* Set tbl8 entry. */ + for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; + lpm->tbl8[i].valid = VALID; + } + + /* + * Update tbl24 entry to point to new tbl8 entry. Note: The + * ext_flag and tbl8_index need to be updated simultaneously, + * so assign whole structure in one go + */ + + struct rte_lpm_tbl24_entry new_tbl24_entry = { + .valid = VALID, + .ext_entry = 1, + .depth = 0, + { .tbl8_gindex = (uint8_t)tbl8_group_index, } + }; + + lpm->tbl24[tbl24_index] = new_tbl24_entry; + + }/* If valid entry but not extended calculate the index into Table8. */ + else if (lpm->tbl24[tbl24_index].ext_entry == 0) { + /* Search for free tbl8 group. */ + tbl8_group_index = tbl8_alloc(lpm->tbl8); + + if (tbl8_group_index < 0) { + return tbl8_group_index; + } + + tbl8_group_start = tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_group_end = tbl8_group_start + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + /* Populate new tbl8 with tbl24 value. */ + for (i = tbl8_group_start; i < tbl8_group_end; i++) { + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth; + lpm->tbl8[i].next_hop = + lpm->tbl24[tbl24_index].next_hop; + } + + tbl8_index = tbl8_group_start + (ip_masked & 0xFF); + + /* Insert new rule into the tbl8 entry. */ + for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) { + if (!lpm->tbl8[i].valid || + lpm->tbl8[i].depth <= depth) { + lpm->tbl8[i].valid = VALID; + lpm->tbl8[i].depth = depth; + lpm->tbl8[i].next_hop = next_hop; + + continue; + } + } + + /* + * Update tbl24 entry to point to new tbl8 entry. Note: The + * ext_flag and tbl8_index need to be updated simultaneously, + * so assign whole structure in one go. + */ + + struct rte_lpm_tbl24_entry new_tbl24_entry = { + .valid = VALID, + .ext_entry = 1, + .depth = 0, + { .tbl8_gindex = (uint8_t)tbl8_group_index, } + }; + + lpm->tbl24[tbl24_index] = new_tbl24_entry; + + } + else { /* + * If it is valid, extended entry calculate the index into tbl8. + */ + tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex; + tbl8_group_start = tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_index = tbl8_group_start + (ip_masked & 0xFF); + + for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { + + if (!lpm->tbl8[i].valid || + lpm->tbl8[i].depth <= depth) { + struct rte_lpm_tbl8_entry new_tbl8_entry = { + .valid = VALID, + .depth = depth, + .next_hop = next_hop, + }; + + /* + * Setting tbl8 entry in one go to avoid race + * condition + */ + lpm->tbl8[i] = new_tbl8_entry; + + continue; + } + } + } + + return 0; +} + +/* + * Function Name : rte_lpm_add + * Usage : Add a route + * + *(IN) lpm_handle, + *(IN) ip + *(IN) depth + *(IN) next_hop + */ +int +rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, + uint8_t next_hop) +{ + int32_t rule_index, status = 0; + uint32_t ip_masked = (ip & depth_to_mask(depth)); + + /* Check user arguments. */ + if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) + return -EINVAL; + + /* Add the rule to the rule table. */ + rule_index = rule_add(lpm, ip_masked, depth, next_hop); + + /* If the is no space available for new rule return error. */ + if (rule_index < 0) { + return rule_index; + } + + if (depth <= MAX_DEPTH_TBL24) { + status = add_depth_small(lpm, ip_masked, depth, next_hop); + } + else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */ + status = add_depth_big(lpm, ip_masked, depth, next_hop); + + /* + * If add fails due to exhaustion of tbl8 extensions delete + * rule that was added to rule table. + */ + if (status < 0) { + rule_delete(lpm, rule_index, depth); + + return status; + } + } + + return 0; +} + +static inline int32_t +find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +{ + int32_t rule_index; + uint32_t ip_masked; + uint8_t prev_depth; + + for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) { + ip_masked = ip & depth_to_mask(prev_depth); + + rule_index = rule_find(lpm, ip_masked, prev_depth); + + if (rule_index >= 0) + return rule_index; + } + + return -1; +} + +static inline int32_t +delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, + uint8_t depth, int32_t sub_rule_index) +{ + uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j; + uint8_t new_depth; + + /* Calculate the range and index into Table24. */ + tbl24_range = depth_to_range(depth); + tbl24_index = (ip_masked >> 8); + + /* + * Firstly check the sub_rule_index. A -1 indicates no replacement rule + * and a positive number indicates a sub_rule_index. + */ + if (sub_rule_index < 0) { + /* + * If no replacement rule exists then invalidate entries + * associated with this rule. + */ + for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { + if (lpm->tbl24[i].ext_entry == 0 && + lpm->tbl24[i].depth <= depth ) { + lpm->tbl24[i].valid = INVALID; + } + else { + /* + * If TBL24 entry is extended, then there has + * to be a rule with depth >= 25 in the + * associated TBL8 group. + */ + tbl8_group_index = lpm->tbl24[i].tbl8_gindex; + tbl8_index = tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < (tbl8_index + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { + + if (lpm->tbl8[j].depth <= depth) + lpm->tbl8[j].valid = INVALID; + } + } + } + } + else { + /* + * If a replacement rule exists then modify entries + * associated with this rule. + */ + + /* Calculate depth of sub_rule. */ + new_depth = (uint8_t) (sub_rule_index / + lpm->max_rules_per_depth); + + struct rte_lpm_tbl24_entry new_tbl24_entry = { + .valid = VALID, + .ext_entry = 0, + .depth = new_depth, + {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,} + }; + + struct rte_lpm_tbl8_entry new_tbl8_entry = { + .valid = VALID, + .depth = new_depth, + .next_hop = lpm->rules_tbl + [sub_rule_index].next_hop, + }; + + for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { + + if (lpm->tbl24[i].ext_entry == 0 && + lpm->tbl24[i].depth <= depth ) { + lpm->tbl24[i] = new_tbl24_entry; + } + else { + /* + * If TBL24 entry is extended, then there has + * to be a rule with depth >= 25 in the + * associated TBL8 group. + */ + + tbl8_group_index = lpm->tbl24[i].tbl8_gindex; + tbl8_index = tbl8_group_index * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + for (j = tbl8_index; j < (tbl8_index + + RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) { + + if (lpm->tbl8[j].depth <= depth) + lpm->tbl8[j] = new_tbl8_entry; + } + } + } + } + + return 0; +} + +/* + * Function Name: tbl8_recycle_check + * Usage : Checks if table 8 group can be recycled. + * + * Return of -EEXIST means tbl8 is in use and thus can not be recycled. + * Return of -EINVAL means tbl8 is empty and thus can be recycled + * Return of value > -1 means tbl8 is in use but has all the same values and + * thus can be recycled + */ +static inline int32_t +tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) +{ + uint32_t tbl8_group_end, i; + tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + + /* + * Check the first entry of the given tbl8. If it is invalid we know + * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH + * (As they would affect all entries in a tbl8) and thus this table + * can not be recycled. + */ + if (tbl8[tbl8_group_start].valid) { + /* + * If first entry is valid check if the depth is less than 24 + * and if so check the rest of the entries to verify that they + * are all of this depth. + */ + if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) { + for (i = (tbl8_group_start + 1); i < tbl8_group_end; + i++) { + + if (tbl8[i].depth != + tbl8[tbl8_group_start].depth) { + + return -EEXIST; + } + } + /* If all entries are the same return the tb8 index */ + return tbl8_group_start; + } + + return -EEXIST; + } + /* + * If the first entry is invalid check if the rest of the entries in + * the tbl8 are invalid. + */ + for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) { + if (tbl8[i].valid) + return -EEXIST; + } + /* If no valid entries are found then return -EINVAL. */ + return -EINVAL; +} + +static inline int32_t +delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, + uint8_t depth, int32_t sub_rule_index) +{ + uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index, + tbl8_range, i; + uint8_t new_depth; + int32_t tbl8_recycle_index; + + /* + * Calculate the index into tbl24 and range. Note: All depths larger + * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry. + */ + tbl24_index = ip_masked >> 8; + + /* Calculate the index into tbl8 and range. */ + tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex; + tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; + tbl8_index = tbl8_group_start + (ip_masked & 0xFF); + tbl8_range = depth_to_range(depth); + + if (sub_rule_index < 0) { + /* + * Loop through the range of entries on tbl8 for which the + * rule_to_delete must be removed or modified. + */ + for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { + if (lpm->tbl8[i].depth <= depth) + lpm->tbl8[i].valid = INVALID; + } + } + else { + new_depth = (uint8_t)(sub_rule_index / + lpm->max_rules_per_depth); + + /* Set new tbl8 entry. */ + struct rte_lpm_tbl8_entry new_tbl8_entry = { + .valid = VALID, + .depth = new_depth, + .next_hop = lpm->rules_tbl[sub_rule_index].next_hop, + }; + + /* + * Loop through the range of entries on tbl8 for which the + * rule_to_delete must be modified. + */ + for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) { + if (lpm->tbl8[i].depth <= depth) + lpm->tbl8[i] = new_tbl8_entry; + } + } + + /* + * Check if there are any valid entries in this tbl8 group. If all + * tbl8 entries are invalid we can free the tbl8 and invalidate the + * associated tbl24 entry. + */ + + tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start); + + if (tbl8_recycle_index == -EINVAL){ + /* Set tbl24 before freeing tbl8 to avoid race condition. */ + lpm->tbl24[tbl24_index].valid = 0; + tbl8_free(lpm->tbl8, tbl8_group_start); + } + else if (tbl8_recycle_index > -1) { + /* Update tbl24 entry. */ + struct rte_lpm_tbl24_entry new_tbl24_entry = { + .valid = VALID, + .ext_entry = 0, + .depth = lpm->tbl8[tbl8_recycle_index].depth, + { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, } + }; + + /* Set tbl24 before freeing tbl8 to avoid race condition. */ + lpm->tbl24[tbl24_index] = new_tbl24_entry; + tbl8_free(lpm->tbl8, tbl8_group_start); + } + + return 0; +} + +/* + * Function Name: rte_lpm_delete + * Usage : Deletes a rule + * + *(IN) lpm_handle, + *(IN) ip + *(IN) depth + */ +int +rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth) +{ + int32_t rule_to_delete_index, sub_rule_index; + uint32_t ip_masked; + /* + * Check input arguments. Note: IP must be a positive integer of 32 + * bits in length therefore it need not be checked. + */ + if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) { + return -EINVAL; + } + + ip_masked = ip & depth_to_mask(depth); + + /* + * Find the index of the input rule, that needs to be deleted, in the + * rule table. + */ + rule_to_delete_index = rule_find(lpm, ip_masked, depth); + + /* + * Check if rule_to_delete_index was found. If no rule was found the + * function rule_find returns -E_RTE_NO_TAILQ. + */ + if (rule_to_delete_index < 0) + return -E_RTE_NO_TAILQ; + + /* Delete the rule from the rule table. */ + rule_delete(lpm, rule_to_delete_index, depth); + + /* + * Find rule to replace the rule_to_delete. If there is no rule to + * replace the rule_to_delete we return -1 and invalidate the table + * entries associated with this rule. + */ + sub_rule_index = find_previous_rule(lpm, ip, depth); + + /* + * If the input depth value is less than 25 use function + * delete_depth_small otherwise use delete_depth_big. + */ + if (depth <= MAX_DEPTH_TBL24) { + return delete_depth_small(lpm, ip_masked, depth, + sub_rule_index); + } + else { /* If depth > MAX_DEPTH_TBL24 */ + return delete_depth_big(lpm, ip_masked, depth, sub_rule_index); + } +} + +/* + * Function Name: rte_lpm_delete_all + * Usage : Delete all rules from the LPM table. + * + *(IN) lpm_handle + */ +void +rte_lpm_delete_all(struct rte_lpm *lpm) +{ + /* Zero used rules counter. */ + memset(lpm->used_rules_at_depth, 0, sizeof(lpm->used_rules_at_depth)); + + /* Zero tbl24. */ + memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); + + /* Zero tbl8. */ + memset(lpm->tbl8, 0, sizeof(lpm->tbl8)); + + /* Delete all rules form the rules table. */ + memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * + (lpm->max_rules_per_depth * RTE_LPM_MAX_DEPTH)); +} + diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h new file mode 100644 index 0000000000..e74d70e6d8 --- /dev/null +++ b/lib/librte_lpm/rte_lpm.h @@ -0,0 +1,288 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_LPM_H_ +#define _RTE_LPM_H_ + +/** + * @file + * RTE Longest Prefix Match (LPM) + */ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** Max number of characters in LPM name. */ +#define RTE_LPM_NAMESIZE 32 + +/** Possible location to allocate memory. */ +#define RTE_LPM_HEAP 0 + +/** Possible location to allocate memory. */ +#define RTE_LPM_MEMZONE 1 + +/** Maximum depth value possible for IPv4 LPM. */ +#define RTE_LPM_MAX_DEPTH 32 + +/** Total number of tbl24 entries. */ +#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24) + +/** Number of entries in a tbl8 group. */ +#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256 + +/** Total number of tbl8 groups in the tbl8. */ +#define RTE_LPM_TBL8_NUM_GROUPS 256 + +/** Total number of tbl8 entries. */ +#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS * \ + RTE_LPM_TBL8_GROUP_NUM_ENTRIES) + +/** Macro to enable/disable run-time checks. */ +#if defined(RTE_LIBRTE_LPM_DEBUG) +#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do { \ + if (cond) return (retval); \ +} while (0) +#else +#define RTE_LPM_RETURN_IF_TRUE(cond, retval) +#endif + +/** Tbl24 entry structure. */ +struct rte_lpm_tbl24_entry { + /* Using single uint8_t to store 3 values. */ + uint8_t valid :1; /**< Validation flag. */ + uint8_t ext_entry :1; /**< External entry. */ + uint8_t depth :6; /**< Rule depth. */ + /* Stores Next hop or group index (i.e. gindex)into tbl8. */ + union { + uint8_t next_hop; + uint8_t tbl8_gindex; + }; +}; + +/** Tbl8 entry structure. */ +struct rte_lpm_tbl8_entry { + /* Using single uint8_t to store 3 values. */ + uint8_t valid :1; /**< Validation flag. */ + uint8_t valid_group :1; /**< Group validation flag. */ + uint8_t depth :6; /**< Rule depth. */ + uint8_t next_hop; /**< next hop. */ +}; + +/** Rule structure. */ +struct rte_lpm_rule { + uint32_t ip; /**< Rule IP address. */ + uint8_t next_hop; /**< Rule next hop. */ +}; + +/** LPM structure. */ +struct rte_lpm { + TAILQ_ENTRY(rte_lpm) next; /**< Next in list. */ + + /* LPM metadata. */ + char name[RTE_LPM_NAMESIZE]; /**< Name of the lpm. */ + int mem_location; /**< Location of memory to be allocated. */ + uint32_t max_rules_per_depth; /**< Max. balanced rules per lpm. */ + uint32_t used_rules_at_depth[RTE_LPM_MAX_DEPTH]; /**< Rules / depth. */ + + /* LPM Tables. */ + struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \ + __rte_cache_aligned; /**< LPM tbl24 table. */ + struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \ + __rte_cache_aligned; /**< LPM tbl8 table. */ + struct rte_lpm_rule rules_tbl[0] \ + __rte_cache_aligned; /**< LPM rules. */ +}; + +/** + * Create an LPM object. + * + * @param name + * LPM object name + * @param socket_id + * NUMA socket ID for LPM table memory allocation + * @param max_rules + * Maximum number of LPM rules that can be added + * @param mem_location + * Location of memory to be allocated. Can only be RTE_LPM_HEAP or + * RTE_LPM_MEMZONE + * @return + * Handle to LPM object on success, NULL otherwise with rte_errno set + * to an appropriate values. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - E_RTE_NO_TAILQ - no tailq list could be got for the lpm object list + * - EINVAL - invalid parameter passed to function + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_lpm * +rte_lpm_create(const char *name, int socket_id, int max_rules, + int mem_location); + +/** + * Find an existing LPM object and return a pointer to it. + * + * @param name + * Name of the lpm object as passed to rte_lpm_create() + * @return + * Pointer to lpm object or NULL if object not found with rte_errno + * set appropriately. Possible rte_errno values include: + * - ENOENT - required entry not available to return. + */ +struct rte_lpm * +rte_lpm_find_existing(const char *name); + +/** + * Free an LPM object. + * + * @param lpm + * LPM object handle + * @return + * None + */ +void +rte_lpm_free(struct rte_lpm *lpm); + +/** + * Add a rule to the LPM table. + * + * @param lpm + * LPM object handle + * @param ip + * IP of the rule to be added to the LPM table + * @param depth + * Depth of the rule to be added to the LPM table + * @param next_hop + * Next hop of the rule to be added to the LPM table + * @return + * 0 on success, negative value otherwise + */ +int +rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop); + +/** + * Delete a rule from the LPM table. + * + * @param lpm + * LPM object handle + * @param ip + * IP of the rule to be deleted from the LPM table + * @param depth + * Depth of the rule to be deleted from the LPM table + * @return + * 0 on success, negative value otherwise + */ +int +rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth); + +/** + * Delete all rules from the LPM table. + * + * @param lpm + * LPM object handle + */ +void +rte_lpm_delete_all(struct rte_lpm *lpm); + +/** + * Lookup an IP into the LPM table. + * + * @param lpm + * LPM object handle + * @param ip + * IP to be looked up in the LPM table + * @param next_hop + * Next hop of the most specific rule found for IP (valid on lookup hit only) + * @return + * -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit + */ +static inline int +rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop) +{ + uint32_t tbl24_index, tbl8_group_index, tbl8_index; + + /* DEBUG: Check user input arguments. */ + RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL); + + /* Calculate index into tbl24. */ + tbl24_index = (ip >> 8); + + /* + * Use the tbl24_index to access the required tbl24 entry then check if + * the tbl24 entry is INVALID, if so return -ENOENT. + */ + if (!lpm->tbl24[tbl24_index].valid){ + return -ENOENT; /* Lookup miss. */ + } + /* + * If tbl24 entry is valid check if it is NOT extended (i.e. it does + * not use a tbl8 extension) if so return the next hop. + */ + if (likely(lpm->tbl24[tbl24_index].ext_entry == 0)) { + *next_hop = lpm->tbl24[tbl24_index].next_hop; + return 0; /* Lookup hit. */ + } + + /* + * If tbl24 entry is valid and extended calculate the index into the + * tbl8 entry. + */ + tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex; + tbl8_index = (tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES) + + (ip & 0xFF); + + /* Check if the tbl8 entry is invalid and if so return -ENOENT. */ + if (!lpm->tbl8[tbl8_index].valid) + return -ENOENT;/* Lookup miss. */ + + /* If the tbl8 entry is valid return return the next_hop. */ + *next_hop = lpm->tbl8[tbl8_index].next_hop; + return 0; /* Lookup hit. */ +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_LPM_H_ */ diff --git a/lib/librte_malloc/Makefile b/lib/librte_malloc/Makefile new file mode 100644 index 0000000000..8518c3b673 --- /dev/null +++ b/lib/librte_malloc/Makefile @@ -0,0 +1,50 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_malloc.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_MALLOC) := rte_malloc.c malloc_elem.c malloc_heap.c + +# install includes +SYMLINK-$(CONFIG_RTE_LIBRTE_MALLOC)-include := rte_malloc.h + +# this lib needs eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_MALLOC) += lib/librte_eal + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_malloc/malloc_elem.c b/lib/librte_malloc/malloc_elem.c new file mode 100644 index 0000000000..1c90908867 --- /dev/null +++ b/lib/librte_malloc/malloc_elem.c @@ -0,0 +1,280 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "malloc_elem.h" +#include "malloc_heap.h" + +#define MIN_DATA_SIZE (CACHE_LINE_SIZE * 2) + +/* + * initialise a general malloc_elem header structure + */ +void +malloc_elem_init(struct malloc_elem *elem, + struct malloc_heap *heap, size_t size) +{ + elem->heap = heap; + elem->prev = elem->next_free = NULL; + elem->state = ELEM_FREE; + elem->size = size; + elem->pad = 0; + set_header(elem); + set_trailer(elem); +} + +/* + * initialise a dummy malloc_elem header for the end-of-memzone marker + */ +void +malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev) +{ + malloc_elem_init(elem, prev->heap, 0); + elem->prev = prev; + elem->state = ELEM_BUSY; /* mark busy so its never merged */ +} + +/* + * calculate the starting point of where data of the requested size + * and alignment would fit in the current element. If the data doesn't + * fit, return NULL. + */ +static void * +elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align) +{ + const uintptr_t end_pt = (uintptr_t)elem + + elem->size - MALLOC_ELEM_TRAILER_LEN; + const uintptr_t new_data_start = rte_align_floor_int((end_pt - size),align); + const uintptr_t new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN; + + /* if the new start point is before the exist start, it won't fit */ + return (new_elem_start < (uintptr_t)elem) ? NULL : (void *)new_elem_start; +} + +/* + * use elem_start_pt to determine if we get meet the size and + * alignment request from the current element + */ +int +malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align) +{ + return elem_start_pt(elem, size, align) != NULL; +} + +/* + * split an existing element into two smaller elements at the given + * split_pt parameter. + */ +static void +split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt) +{ + struct malloc_elem *next_elem = RTE_PTR_ADD(elem, elem->size); + const unsigned old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem; + const unsigned new_elem_size = elem->size - old_elem_size; + + malloc_elem_init(split_pt, elem->heap, new_elem_size); + split_pt->prev = elem; + next_elem->prev = split_pt; + elem->size = old_elem_size; + set_trailer(elem); +} + +/* + * reserve a block of data in an existing malloc_elem. If the malloc_elem + * is much larger than the data block requested, we split the element in two. + * This function is only called from malloc_heap_alloc so parameter checking + * is not done here, as it's done there previously. + */ +struct malloc_elem * +malloc_elem_alloc(struct malloc_elem *elem, size_t size, + unsigned align, struct malloc_elem *prev_free) +{ + struct malloc_elem *new_elem = elem_start_pt(elem, size, align); + const unsigned old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem; + + if (old_elem_size <= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE){ + /* don't split it, pad the element instead */ + elem->state = ELEM_BUSY; + elem->pad = old_elem_size; + + /* put a dummy header in padding, to point to real element header */ + if (elem->pad > 0){ /* pad will be at least 64-bytes, as everything + * is cache-line aligned */ + new_elem->pad = elem->pad; + new_elem->state = ELEM_PAD; + new_elem->size = elem->size - elem->pad; + set_header(new_elem); + } + /* remove element from free list */ + if (prev_free == NULL) + elem->heap->free_head = elem->next_free; + else + prev_free->next_free = elem->next_free; + + return new_elem; + } + + /* we are going to split the element in two. The original element + * remains free, and the new element is the one allocated, so no free list + * changes need to be made. + */ + split_elem(elem, new_elem); + new_elem->state = ELEM_BUSY; + + return new_elem; +} + +/* + * joing two struct malloc_elem together. elem1 and elem2 must + * be contiguous in memory. + */ +static inline void +join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2) +{ + struct malloc_elem *next = RTE_PTR_ADD(elem2, elem2->size); + elem1->size += elem2->size; + next->prev = elem1; +} + +/* + * scan the free list, and remove the request element from that + * free list. (Free list to scan is got from heap pointer in element) + */ +static inline void +remove_from_free_list(struct malloc_elem *elem) +{ + if (elem == elem->heap->free_head) + elem->heap->free_head = elem->next_free; + else{ + struct malloc_elem *prev_free = elem->heap->free_head; + while (prev_free && prev_free->next_free != elem) + prev_free = prev_free->next_free; + if (!prev_free) + rte_panic("Corrupted free list\n"); + prev_free->next_free = elem->next_free; + } +} + +/* + * free a malloc_elem block by adding it to the free list. If the + * blocks either immediately before or immediately after newly freed block + * are also free, the blocks are merged together. + */ +int +malloc_elem_free(struct malloc_elem *elem) +{ + if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY) + return -1; + + rte_spinlock_lock(&(elem->heap->lock)); + struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size); + if (next->state == ELEM_FREE){ + /* join to this one, and remove from free list */ + join_elem(elem, next); + remove_from_free_list(next); + } + + /* check if previous element is free, if so join with it and return, + * no need to update free list, as that element is already there + */ + if (elem->prev != NULL && elem->prev->state == ELEM_FREE) + join_elem(elem->prev, elem); + /* otherwise add ourselves to the free list */ + else { + elem->next_free = elem->heap->free_head; + elem->heap->free_head = elem; + elem->state = ELEM_FREE; + elem->pad = 0; + } + rte_spinlock_unlock(&(elem->heap->lock)); + return 0; +} + +/* + * attempt to resize a malloc_elem by expanding into any free space + * immediately after it in memory. + */ +int +malloc_elem_resize(struct malloc_elem *elem, size_t size) +{ + const size_t new_size = size + MALLOC_ELEM_OVERHEAD; + /* if we request a smaller size, then always return ok */ + const size_t current_size = elem->size - elem->pad; + if (current_size >= new_size) + return 0; + + struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size); + rte_spinlock_lock(&elem->heap->lock); + if (next ->state != ELEM_FREE) + goto err_return; + if (current_size + next->size < new_size) + goto err_return; + + /* we now know the element fits, so join the two, then remove from free + * list + */ + join_elem(elem, next); + remove_from_free_list(next); + + if (elem->size - new_size > MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){ + /* now we have a big block together. Lets cut it down a bit, by splitting */ + struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size); + split_pt = RTE_ALIGN_CEIL(split_pt, CACHE_LINE_SIZE); + split_elem(elem, split_pt); + split_pt->state = ELEM_FREE; + split_pt->next_free = elem->heap->free_head; + elem->heap->free_head = split_pt; + } + rte_spinlock_unlock(&elem->heap->lock); + return 0; + +err_return: + rte_spinlock_unlock(&elem->heap->lock); + return -1; +} diff --git a/lib/librte_malloc/malloc_elem.h b/lib/librte_malloc/malloc_elem.h new file mode 100644 index 0000000000..4328c37f53 --- /dev/null +++ b/lib/librte_malloc/malloc_elem.h @@ -0,0 +1,177 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef MALLOC_ELEM_H_ +#define MALLOC_ELEM_H_ + +/* dummy definition of struct so we can use pointers to it in malloc_elem struct */ +struct malloc_heap; + +enum elem_state { + ELEM_FREE = 0, + ELEM_BUSY, + ELEM_PAD /* element is a padding-only header */ +}; + +struct malloc_elem { + struct malloc_heap *heap; + struct malloc_elem *volatile prev; /* points to prev elem in memzone */ + struct malloc_elem *volatile next_free; /* to make list of free elements */ + volatile enum elem_state state; + uint32_t pad; + volatile size_t size; +#ifdef RTE_LIBRTE_MALLOC_DEBUG + uint64_t header_cookie; /* Cookie marking start of data */ + /* trailer cookie at start + size */ +#endif +} __rte_cache_aligned; + +#ifndef RTE_LIBRTE_MALLOC_DEBUG +static const unsigned MALLOC_ELEM_TRAILER_LEN = 0; + +/* dummy function - just check if pointer is non-null */ +static inline int +malloc_elem_cookies_ok(struct malloc_elem *elem){ return elem != NULL; } + +/* dummy function - no header if malloc_debug is not enabled */ +static inline void +set_header(struct malloc_elem *elem __rte_unused){ } + +/* dummy function - no trailer if malloc_debug is not enabled */ +static inline void +set_trailer(struct malloc_elem *elem __rte_unused){ } + + +#else +static const unsigned MALLOC_ELEM_TRAILER_LEN = CACHE_LINE_SIZE; + +#define MALLOC_HEADER_COOKIE 0xbadbadbadadd2e55ULL /**< Header cookie. */ +#define MALLOC_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/ + +/* define macros to make referencing the header and trailer cookies easier */ +#define MALLOC_ELEM_TRAILER(elem) (*((uint64_t*)RTE_PTR_ADD(elem, \ + elem->size - MALLOC_ELEM_TRAILER_LEN))) +#define MALLOC_ELEM_HEADER(elem) (elem->header_cookie) + +static inline void +set_header(struct malloc_elem *elem) +{ + if (elem != NULL) + MALLOC_ELEM_HEADER(elem) = MALLOC_HEADER_COOKIE; +} + +static inline void +set_trailer(struct malloc_elem *elem) +{ + if (elem != NULL) + MALLOC_ELEM_TRAILER(elem) = MALLOC_TRAILER_COOKIE; +} + +/* check that the header and trailer cookies are set correctly */ +static inline int +malloc_elem_cookies_ok(struct malloc_elem *elem) +{ + return (elem != NULL && + MALLOC_ELEM_HEADER(elem) == MALLOC_HEADER_COOKIE && + MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE); +} + +#endif + +static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem); +#define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN) + +/* + * Given a pointer to the start of a memory block returned by malloc, get + * the actual malloc_elem header for that block. + */ +static inline struct malloc_elem * +malloc_elem_from_data(void *data) +{ + if (data == NULL) + return NULL; + + struct malloc_elem *elem = RTE_PTR_SUB(data, MALLOC_ELEM_HEADER_LEN); + if (!malloc_elem_cookies_ok(elem)) + return NULL; + return elem->state != ELEM_PAD ? elem: RTE_PTR_SUB(elem, elem->pad); +} + +/* + * initialise a malloc_elem header + */ +void +malloc_elem_init(struct malloc_elem *elem, + struct malloc_heap *heap, + size_t size); + +/* + * initialise a dummy malloc_elem header for the end-of-memzone marker + */ +void +malloc_elem_mkend(struct malloc_elem *elem, + struct malloc_elem *prev_free); + +/* + * return true if the current malloc_elem can hold a block of data + * of the requested size and with the requested alignment + */ +int +malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align); + +/* + * reserve a block of data in an existing malloc_elem. If the malloc_elem + * is much larger than the data block requested, we split the element in two. + */ +struct malloc_elem * +malloc_elem_alloc(struct malloc_elem *elem, size_t size, + unsigned align, struct malloc_elem *prev_free); + +/* + * free a malloc_elem block by adding it to the free list. If the + * blocks either immediately before or immediately after newly freed block + * are also free, the blocks are merged together. + */ +int +malloc_elem_free(struct malloc_elem *elem); + +/* + * attempt to resize a malloc_elem by expanding into any free space + * immediately after it in memory. + */ +int +malloc_elem_resize(struct malloc_elem *elem, size_t size); + +#endif /* MALLOC_ELEM_H_ */ diff --git a/lib/librte_malloc/malloc_heap.c b/lib/librte_malloc/malloc_heap.c new file mode 100644 index 0000000000..3f621ab86a --- /dev/null +++ b/lib/librte_malloc/malloc_heap.c @@ -0,0 +1,181 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "malloc_elem.h" +#include "malloc_heap.h" + +#define QUOTE_(x) #x +#define QUOTE(x) QUOTE_(x) +/* since the memzone size starts with a digit, it will appear unquoted in + * rte_config.h, so quote it so it can be passed to rte_str_to_size */ +#define MALLOC_MEMZONE_SIZE QUOTE(RTE_MALLOC_MEMZONE_SIZE) + +/* + * returns the configuration setting for the memzone size as a size_t value + */ +static inline size_t +get_malloc_memzone_size(void) +{ + return rte_str_to_size(MALLOC_MEMZONE_SIZE); +} + +/* + * reserve an extra memory zone and make it available for use by a particular + * heap. This reserves the zone and sets a dummy malloc_elem header at the end + * to prevent overflow. The rest of the zone is added to free list as a single + * large free block + */ +static int +malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align) +{ + const unsigned mz_flags = 0; + const size_t min_size = get_malloc_memzone_size(); + /* ensure the data we want to allocate will fit in the memzone */ + size_t mz_size = size + align + MALLOC_ELEM_OVERHEAD * 2; + if (mz_size < min_size) + mz_size = min_size; + + char mz_name[RTE_MEMZONE_NAMESIZE]; + rte_snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u", + heap->numa_socket, heap->mz_count++); + const struct rte_memzone *mz = rte_memzone_reserve(mz_name, mz_size, + heap->numa_socket, mz_flags); + if (mz == NULL) + return -1; + + /* allocate the memory block headers, one at end, one at start */ + struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr; + struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr, + mz_size - MALLOC_ELEM_OVERHEAD); + end_elem = RTE_ALIGN_FLOOR(end_elem, CACHE_LINE_SIZE); + + const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem; + malloc_elem_init(start_elem, heap, elem_size); + malloc_elem_mkend(end_elem, start_elem); + + start_elem->next_free = heap->free_head; + heap->free_head = start_elem; + return 0; +} + +/* + * initialise a malloc heap object. The heap is locked with a private + * lock while being initialised. This function should only be called the + * first time a thread calls malloc - if even then, as heaps are per-socket + * not per-thread. + */ +static void +malloc_heap_init(struct malloc_heap *heap) +{ + static rte_spinlock_t init_lock = RTE_SPINLOCK_INITIALIZER; + rte_spinlock_lock(&init_lock); + if (!heap->initialised) { + heap->free_head = NULL; + heap->mz_count = 0; + heap->numa_socket = malloc_get_numa_socket(); + rte_spinlock_init(&heap->lock); + heap->initialised = INITIALISED; + } + rte_spinlock_unlock(&init_lock); +} + +/* + * Iterates through the freelist for a heap to find a free element + * which can store data of the required size and with the requested alignment. + * Returns null on failure, or pointer to element on success, with the pointer + * to the previous element in the list, if any, being returned in a parameter + * (to make removing the element from the free list faster). + */ +static struct malloc_elem * +find_suitable_element(struct malloc_heap *heap, size_t size, + unsigned align, struct malloc_elem **prev) +{ + struct malloc_elem *elem = heap->free_head; + *prev = NULL; + while(elem){ + if (malloc_elem_can_hold(elem, size, align)) + break; + *prev = elem; + elem = elem->next_free; + } + return elem; +} + +/* + * Main function called by malloc to allocate a block of memory from the + * heap. It locks the free list, scans it, and adds a new memzone if the + * scan fails. Once the new memzone is added, it re-scans and should return + * the new element after releasing the lock. + */ +void * +malloc_heap_alloc(struct malloc_heap *heap, + const char *type __attribute__((unused)), size_t size, unsigned align) +{ + if (!heap->initialised) + malloc_heap_init(heap); + + size = CACHE_LINE_ROUNDUP(size); + align = CACHE_LINE_ROUNDUP(align); + rte_spinlock_lock(&heap->lock); + + struct malloc_elem *prev, *elem = find_suitable_element(heap, + size, align, &prev); + if (elem == NULL){ + malloc_heap_add_memzone(heap, size, align); + elem = find_suitable_element(heap, size, align, &prev); + } + if (elem != NULL) + elem = malloc_elem_alloc(elem, size, align, prev); + rte_spinlock_unlock(&heap->lock); + return elem == NULL ? NULL : (void *)(&elem[1]); +} diff --git a/lib/librte_malloc/malloc_heap.h b/lib/librte_malloc/malloc_heap.h new file mode 100644 index 0000000000..cf599d90fc --- /dev/null +++ b/lib/librte_malloc/malloc_heap.h @@ -0,0 +1,68 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef MALLOC_HEAP_H_ +#define MALLOC_HEAP_H_ + +enum heap_state { + NOT_INITIALISED = 0, + INITIALISED +}; + +struct malloc_heap { + enum heap_state initialised; + unsigned numa_socket; + volatile unsigned mz_count; + rte_spinlock_t lock; + struct malloc_elem * volatile free_head; +} __rte_cache_aligned; + +#define RTE_MALLOC_SOCKET_DEFAULT 0 + +static inline unsigned +malloc_get_numa_socket(void) +{ + unsigned malloc_socket = RTE_MALLOC_SOCKET_DEFAULT; + #ifdef RTE_MALLOC_PER_NUMA_NODE + malloc_socket = rte_socket_id(); + #endif + return malloc_socket; +} + +void * +malloc_heap_alloc(struct malloc_heap *heap, const char *type, + size_t size, unsigned align); + +#endif /* MALLOC_HEAP_H_ */ diff --git a/lib/librte_malloc/rte_malloc.c b/lib/librte_malloc/rte_malloc.c new file mode 100644 index 0000000000..4549884b12 --- /dev/null +++ b/lib/librte_malloc/rte_malloc.c @@ -0,0 +1,166 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "malloc_elem.h" +#include "malloc_heap.h" + +static struct malloc_heap malloc_heap[RTE_MAX_NUMA_NODES] = { + { .initialised = NOT_INITIALISED } +}; + +/* Free the memory space back to heap */ +void rte_free(void *addr) +{ + if (addr == NULL) return; + if (malloc_elem_free(malloc_elem_from_data(addr)) < 0) + rte_panic("Fatal error: Invalid memory\n"); +} + +/* + * Allocate memory on default heap. + */ +void * +rte_malloc(const char *type, size_t size, unsigned align) +{ + unsigned malloc_socket = malloc_get_numa_socket(); + /* return NULL if size is 0 or alignment is not power-of-2 */ + if (size == 0 || !rte_is_power_of_2(align)) + return NULL; + return malloc_heap_alloc(&malloc_heap[malloc_socket], type, + size, align == 0 ? 1 : align); +} + +/* + * Allocate zero'd memory on default heap. + */ +void * +rte_zmalloc(const char *type, size_t size, unsigned align) +{ + void *ptr = rte_malloc(type, size, align); + + if (ptr != NULL) + memset(ptr, 0, size); + return ptr; +} + +/* + * Allocate zero'd memory on default heap. + */ +void * +rte_calloc(const char *type, size_t num, size_t size, unsigned align) +{ + return rte_zmalloc(type, num * size, align); +} + +/* + * Resize allocated memory. + */ +void * +rte_realloc(void *ptr, size_t size, unsigned align) +{ + if (ptr == NULL) + return rte_malloc(NULL, size, align); + + struct malloc_elem *elem = malloc_elem_from_data(ptr); + if (elem == NULL) + rte_panic("Fatal error: memory corruption detected\n"); + + size = CACHE_LINE_ROUNDUP(size), align = CACHE_LINE_ROUNDUP(align); + /* check alignment matches first, and if ok, see if we can resize block */ + if (RTE_ALIGN(ptr,align) == ptr && + malloc_elem_resize(elem, size) == 0) + return ptr; + + /* either alignment is off, or we have no room to expand, + * so move data. */ + void *new_ptr = rte_malloc(NULL, size, align); + if (new_ptr == NULL) + return NULL; + const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD; + rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size); + rte_free(ptr); + + return new_ptr; +} + +int +rte_malloc_validate(void *ptr, size_t *size) +{ + struct malloc_elem *elem = malloc_elem_from_data(ptr); + if (!malloc_elem_cookies_ok(elem)) + return -1; + if (size != NULL) + *size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD; + return 0; +} +/* + * TODO: Print stats on memory type. If type is NULL, info on all types is printed + */ +void +rte_malloc_dump_stats(__rte_unused const char *type) +{ + return; +} + +/* + * TODO: Set limit to memory that can be allocated to memory type + */ +int +rte_malloc_set_limit(__rte_unused const char *type, + __rte_unused size_t max) +{ + return 0; +} + diff --git a/lib/librte_malloc/rte_malloc.h b/lib/librte_malloc/rte_malloc.h new file mode 100644 index 0000000000..29cff55501 --- /dev/null +++ b/lib/librte_malloc/rte_malloc.h @@ -0,0 +1,212 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_MALLOC_H_ +#define _RTE_MALLOC_H_ + +/** + * @file + * RTE Malloc. This library provides methods for dynamically allocating memory + * from hugepages. + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * This function allocates memory from the huge-page area of memory. The memory + * is not cleared. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param size + * Size (in bytes) to be allocated. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_malloc(const char *type, size_t size, unsigned align); + +/** + * Allocate zero'ed memory from the heap. + * + * Equivalent to rte_malloc() except that the memory zone is + * initialised with zeros. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param size + * Size (in bytes) to be allocated. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_zmalloc(const char *type, size_t size, unsigned align); + +/** + * Replacement function for calloc(), using huge-page memory. Memory area is + * initialised with zeros. + * + * @param type + * A string identifying the type of allocated objects (useful for debug + * purposes, such as identifying the cause of a memory leak). Can be NULL. + * @param num + * Number of elements to be allocated. + * @param size + * Size (in bytes) of a single element. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the allocated object. + */ +void * +rte_calloc(const char *type, size_t num, size_t size, unsigned align); + +/** + * Replacement function for realloc(), using huge-page memory. Reserved area + * memory is resized, preserving contents. + * + * @param ptr + * Pointer to already allocated memory + * @param size + * Size (in bytes) of new area. If this is 0, memory is freed. + * @param align + * If 0, the return is a pointer that is suitably aligned for any kind of + * variable (in the same manner as malloc()). + * Otherwise, the return is a pointer that is a multiple of *align*. In + * this case, it must obviously be a power of two. (Minimum alignment is the + * cacheline size, i.e. 64-bytes) + * @return + * - NULL on error. Not enough memory, or invalid arguments (size is 0, + * align is not a power of two). + * - Otherwise, the pointer to the reallocated memory. + */ +void * +rte_realloc(void *ptr, size_t size, unsigned align); + +/** + * Frees the memory space pointed to by the provided pointer. + * + * This pointer must have been returned by a previous call to + * rte_malloc(), rte_zmalloc(), rte_calloc() or rte_realloc(). The behaviour of + * rte_free() is undefined if the pointer does not match this requirement. + * + * If the pointer is NULL, the function does nothing. + * + * @param ptr + * The pointer to memory to be freed. + */ +void +rte_free(void *ptr); + +/** + * If malloc debug is enabled, check a memory block for header + * and trailer markers to indicate that all is well with the block. + * If size is non-null, also return the size of the block. + * + * @param ptr + * pointer to the start of a data block, must have been returned + * by a previous call to rte_malloc(), rte_zmalloc(), rte_calloc() + * or rte_realloc() + * @param size + * if non-null, and memory block pointer is valid, returns the size + * of the memory block + * @return + * -1 on error, invalid pointer passed or header and trailer markers + * are missing or corrupted + * 0 on success + */ +int +rte_malloc_validate(void *ptr, size_t *size); + +/** + * Dump statistics. + * + * Dump for the specified type to the console. If the type argument is + * NULL, all memory types will be dumped. + * + * @param type + * A string identifying the type of objects to dump, or NULL + * to dump all objects. + */ +void +rte_malloc_dump_stats(const char *type); + +/** + * Set the maximum amount of allocated memory for this type. + * + * @param type + * A string identifying the type of allocated objects. + * @param max + * The maximum amount of allocated bytes for this type. + * @return + * - 0: Success. + * - (-1): Error. + */ +int +rte_malloc_set_limit(const char *type, size_t max); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MALLOC_H_ */ diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile new file mode 100644 index 0000000000..db9dc1f67b --- /dev/null +++ b/lib/librte_mbuf/Makefile @@ -0,0 +1,50 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_mbuf.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c + +# install includes +SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include := rte_mbuf.h + +# this lib needs eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF) += lib/librte_eal lib/librte_mempool + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c new file mode 100644 index 0000000000..d011fda219 --- /dev/null +++ b/lib/librte_mbuf/rte_mbuf.c @@ -0,0 +1,252 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * ctrlmbuf constructor, given as a callback function to + * rte_mempool_create() + */ +void +rte_ctrlmbuf_init(struct rte_mempool *mp, + __attribute__((unused)) void *opaque_arg, + void *_m, + __attribute__((unused)) unsigned i) +{ + struct rte_mbuf *m = _m; + + memset(m, 0, mp->elt_size); + + /* start of buffer is just after mbuf structure */ + m->buf_addr = (char *)m + sizeof(struct rte_mbuf); + m->buf_physaddr = rte_mempool_virt2phy(mp, m) + + sizeof(struct rte_mbuf); + m->buf_len = (uint16_t) (mp->elt_size - sizeof(struct rte_mbuf)); + + /* init some constant fields */ + m->type = RTE_MBUF_CTRL; + m->ctrl.data = (char *)m->buf_addr; + m->pool = (struct rte_mempool *)mp; +} + +/* + * pktmbuf pool constructor, given as a callback function to + * rte_mempool_create() + */ +void +rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg) +{ + struct rte_pktmbuf_pool_private *mbp_priv; + uint16_t roomsz; + + mbp_priv = rte_mempool_get_priv(mp); + roomsz = (uint16_t)(uintptr_t)opaque_arg; + + /* Use default data room size. */ + if (0 == roomsz) + roomsz = 2048 + RTE_PKTMBUF_HEADROOM; + + mbp_priv->mbuf_data_room_size = roomsz; +} + +/* + * pktmbuf constructor, given as a callback function to + * rte_mempool_create(). + * Set the fields of a packet mbuf to their default values. + */ +void +rte_pktmbuf_init(struct rte_mempool *mp, + __attribute__((unused)) void *opaque_arg, + void *_m, + __attribute__((unused)) unsigned i) +{ + struct rte_mbuf *m = _m; + uint32_t buf_len = mp->elt_size - sizeof(struct rte_mbuf); + + RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf)); + + memset(m, 0, mp->elt_size); + + /* start of buffer is just after mbuf structure */ + m->buf_addr = (char *)m + sizeof(struct rte_mbuf); + m->buf_physaddr = rte_mempool_virt2phy(mp, m) + + sizeof(struct rte_mbuf); + m->buf_len = (uint16_t)buf_len; + + /* keep some headroom between start of buffer and data */ + m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len); + + /* init some constant fields */ + m->type = RTE_MBUF_PKT; + m->pool = mp; + m->pkt.nb_segs = 1; + m->pkt.in_port = 0xff; +} + +static void +rte_pktmbuf_hexdump(const void *buf, unsigned int len) +{ + unsigned int i, out, ofs; + const unsigned char *data = buf; +#define LINE_LEN 80 + char line[LINE_LEN]; + + printf(" dump data at 0x%p, len=%u\n", data, len); + ofs = 0; + while (ofs < len) { + out = rte_snprintf(line, LINE_LEN, " %08X", ofs); + for (i = 0; ofs+i < len && i < 16; i++) + out += rte_snprintf(line+out, LINE_LEN - out, " %02X", + data[ofs+i]&0xff); + for (; i <= 16; i++) + out += rte_snprintf(line+out, LINE_LEN - out, " "); + for (i = 0; ofs < len && i < 16; i++, ofs++) { + unsigned char c = data[ofs]; + if (!isascii(c) || !isprint(c)) + c = '.'; + out += rte_snprintf(line+out, LINE_LEN - out, "%c", c); + } + printf("%s\n", line); + } +} + +/* do some sanity checks on a mbuf: panic if it fails */ +void +rte_mbuf_sanity_check(const struct rte_mbuf *m, enum rte_mbuf_type t, + int is_header) +{ + const struct rte_mbuf *m_seg; + unsigned nb_segs; + + if (m == NULL) + rte_panic("mbuf is NULL\n"); + if (m->type != (uint8_t)t) + rte_panic("bad mbuf type\n"); + + /* generic checks */ + if (m->pool == NULL) + rte_panic("bad mbuf pool\n"); + if (m->buf_physaddr == 0) + rte_panic("bad phys addr\n"); + if (m->buf_addr == NULL) + rte_panic("bad virt addr\n"); + +#ifdef RTE_MBUF_SCATTER_GATHER + uint16_t cnt = rte_mbuf_refcnt_read(m); + if ((cnt == 0) || (cnt == UINT16_MAX)) + rte_panic("bad ref cnt\n"); +#endif + + /* nothing to check for ctrl messages */ + if (m->type == RTE_MBUF_CTRL) + return; + + /* check pkt consistency */ + else if (m->type == RTE_MBUF_PKT) { + + /* nothing to check for sub-segments */ + if (is_header == 0) + return; + + nb_segs = m->pkt.nb_segs; + m_seg = m; + while (m_seg && nb_segs != 0) { + m_seg = m_seg->pkt.next; + nb_segs --; + } + if (nb_segs != 0) + rte_panic("bad nb_segs\n"); + return; + } + + rte_panic("unknown mbuf type\n"); +} + +/* dump a mbuf on console */ +void +rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned dump_len) +{ + unsigned int len; + unsigned nb_segs; + + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + + printf("dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n", + m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len); + printf(" pkt_len=%"PRIx32", ol_flags=%"PRIx16", nb_segs=%u, " + "in_port=%u\n", m->pkt.pkt_len, m->ol_flags, + (unsigned)m->pkt.nb_segs, (unsigned)m->pkt.in_port); + nb_segs = m->pkt.nb_segs; + + while (m && nb_segs != 0) { + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0); + + printf(" segment at 0x%p, data=0x%p, data_len=%u\n", + m, m->pkt.data, (unsigned)m->pkt.data_len); + len = dump_len; + if (len > m->pkt.data_len) + len = m->pkt.data_len; + if (len != 0) + rte_pktmbuf_hexdump(m->pkt.data, len); + dump_len -= len; + m = m->pkt.next; + nb_segs --; + } +} diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h new file mode 100644 index 0000000000..5acb6a8d48 --- /dev/null +++ b/lib/librte_mbuf/rte_mbuf.h @@ -0,0 +1,1019 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_MBUF_H_ +#define _RTE_MBUF_H_ + +/** + * @file + * RTE Mbuf + * + * The mbuf library provides the ability to create and destroy buffers + * that may be used by the RTE application to store message + * buffers. The message buffers are stored in a mempool, using the + * RTE mempool library. + * + * This library provide an API to allocate/free mbufs, manipulate + * control message buffer (ctrlmbuf), which are generic message + * buffers, and packet buffers (pktmbuf), which are used to carry + * network packets. + * + * To understand the concepts of packet buffers or mbufs, you + * should read "TCP/IP Illustrated, Volume 2: The Implementation, + * Addison-Wesley, 1995, ISBN 0-201-63354-X from Richard Stevens" + * http://www.kohala.com/start/tcpipiv2.html + * + * The main modification of this implementation is the use of mbuf for + * transports other than packets. mbufs can have other types. + */ + +#include + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * A control message buffer. + */ +struct rte_ctrlmbuf { + void *data; /**< Pointer to data. */ + uint32_t data_len; /**< Length of data. */ +}; + + +/* + * Packet Offload Features Flags. It also carry packet type information. + * Critical resources. Both rx/tx shared these bits. Be cautious on any change + */ +#define PKT_RX_VLAN_PKT 0x0001 /**< RX packet is a 802.1q VLAN packet. */ +#define PKT_RX_RSS_HASH 0x0002 /**< RX packet with RSS hash result. */ +#define PKT_RX_FDIR 0x0004 /**< RX packet with FDIR infos. */ +#define PKT_RX_L4_CKSUM_BAD 0x0008 /**< L4 cksum of RX pkt. is not OK. */ +#define PKT_RX_IP_CKSUM_BAD 0x0010 /**< IP cksum of RX pkt. is not OK. */ +#define PKT_RX_IPV4_HDR 0x0020 /**< RX packet with IPv4 header. */ +#define PKT_RX_IPV4_HDR_EXT 0x0040 /**< RX packet with extended IPv4 header. */ +#define PKT_RX_IPV6_HDR 0x0080 /**< RX packet with IPv6 header. */ +#define PKT_RX_IPV6_HDR_EXT 0x0100 /**< RX packet with extended IPv6 header. */ +#define PKT_RX_IEEE1588_PTP 0x0200 /**< RX IEEE1588 L2 Ethernet PT Packet. */ +#define PKT_RX_IEEE1588_TMST 0x0400 /**< RX IEEE1588 L2/L4 timestamped packet.*/ + +#define PKT_TX_VLAN_PKT 0x0800 /**< TX packet is a 802.1q VLAN packet. */ +#define PKT_TX_IP_CKSUM 0x1000 /**< IP cksum of TX pkt. computed by NIC. */ +/* + * Bit 14~13 used for L4 packet type with checksum enabled. + * 00: Reserved + * 01: TCP checksum + * 10: SCTP checksum + * 11: UDP checksum + */ +#define PKT_TX_L4_MASK 0x6000 /**< Mask bits for L4 checksum offload request. */ +#define PKT_TX_L4_NO_CKSUM 0x0000 /**< Disable L4 cksum of TX pkt. */ +#define PKT_TX_TCP_CKSUM 0x2000 /**< TCP cksum of TX pkt. computed by NIC. */ +#define PKT_TX_SCTP_CKSUM 0x4000 /**< SCTP cksum of TX pkt. computed by NIC. */ +#define PKT_TX_UDP_CKSUM 0x6000 /**< UDP cksum of TX pkt. computed by NIC. */ +/* Bit 15 */ +#define PKT_TX_IEEE1588_TMST 0x8000 /**< TX IEEE1588 packet to timestamp. */ + +/** + * Bit Mask to indicate what bits required for building TX context + */ +#define PKT_TX_OFFLOAD_MASK (PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) + +/* Compare mask for vlan_macip_lens, used for context build up */ +#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */ +#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */ +#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */ +/** MAC+IP length. */ +#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK) + +/** + * A packet message buffer. + */ +struct rte_pktmbuf { + /* valid for any segment */ + struct rte_mbuf *next; /**< Next segment of scattered packet. */ + void* data; /**< Start address of data in segment buffer. */ + uint16_t data_len; /**< Amount of data in segment buffer. */ + + /* these fields are valid for first segment only */ + uint8_t nb_segs; /**< Number of segments. */ + uint8_t in_port; /**< Input port. */ + uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */ + + /* offload features */ + uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */ + uint16_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint16_t l3_len:9; /**< L3 (IP) Header Length. */ + union { + uint32_t rss; /**< RSS hash result if RSS enabled */ + struct { + uint16_t hash; + uint16_t id; + } fdir; /**< Filter identifier if FDIR enabled */ + } hash; /**< hash information */ +}; + +/** + * This enum indicates the mbuf type. + */ +enum rte_mbuf_type { + RTE_MBUF_CTRL, /**< Control mbuf. */ + RTE_MBUF_PKT, /**< Packet mbuf. */ +}; + +/** + * The generic rte_mbuf, containing a packet mbuf or a control mbuf. + */ +struct rte_mbuf { + struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */ + void *buf_addr; /**< Virtual address of segment buffer. */ + phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */ + uint16_t buf_len; /**< Length of segment buffer. */ +#ifdef RTE_MBUF_SCATTER_GATHER + /** + * 16-bit Reference counter. + * It should only be accessed using the following functions: + * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and + * rte_mbuf_refcnt_set(). The functionality of these functions (atomic, + * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC + * config option. + */ + union { + rte_atomic16_t refcnt_atomic; /**< Atomically accessed refcnt */ + uint16_t refcnt; /**< Non-atomically accessed refcnt */ + }; +#else + uint16_t refcnt_reserved; /**< Do not use this field */ +#endif + uint8_t type; /**< Type of mbuf. */ + uint8_t reserved; /**< Unused field. Required for padding. */ + uint16_t ol_flags; /**< Offload features. */ + + union { + struct rte_ctrlmbuf ctrl; + struct rte_pktmbuf pkt; + }; +} __rte_cache_aligned; + +/** + * Given the buf_addr returns the pointer to corresponding mbuf. + */ +#define RTE_MBUF_FROM_BADDR(ba) (((struct rte_mbuf *)(ba)) - 1) + +/** + * Given the pointer to mbuf returns an address where it's buf_addr + * should point to. + */ +#define RTE_MBUF_TO_BADDR(mb) (((struct rte_mbuf *)(mb)) + 1) + +/** + * Returns TRUE if given mbuf is indirect, or FALSE otherwise. + */ +#define RTE_MBUF_INDIRECT(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) != (mb)) + +/** + * Returns TRUE if given mbuf is direct, or FALSE otherwise. + */ +#define RTE_MBUF_DIRECT(mb) (RTE_MBUF_FROM_BADDR((mb)->buf_addr) == (mb)) + + +/** + * Private data in case of pktmbuf pool. + * + * A structure that contains some pktmbuf_pool-specific data that are + * appended after the mempool structure (in private data). + */ +struct rte_pktmbuf_pool_private { + uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf.*/ +}; + +#ifdef RTE_LIBRTE_MBUF_DEBUG + +/** check mbuf type in debug mode */ +#define __rte_mbuf_sanity_check(m, t, is_h) rte_mbuf_sanity_check(m, t, is_h) + +/** check mbuf type in debug mode if mbuf pointer is not null */ +#define __rte_mbuf_sanity_check_raw(m, t, is_h) do { \ + if ((m) != NULL) \ + rte_mbuf_sanity_check(m, t, is_h); \ +} while (0) + +/** MBUF asserts in debug mode */ +#define RTE_MBUF_ASSERT(exp) \ +if (!(exp)) { \ + rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \ +} + +#else /* RTE_LIBRTE_MBUF_DEBUG */ + +/** check mbuf type in debug mode */ +#define __rte_mbuf_sanity_check(m, t, is_h) do { } while(0) + +/** check mbuf type in debug mode if mbuf pointer is not null */ +#define __rte_mbuf_sanity_check_raw(m, t, is_h) do { } while(0) + +/** MBUF asserts in debug mode */ +#define RTE_MBUF_ASSERT(exp) do { } while(0) + +#endif /* RTE_LIBRTE_MBUF_DEBUG */ + +#ifdef RTE_MBUF_SCATTER_GATHER +#ifdef RTE_MBUF_REFCNT_ATOMIC + +/** + * Adds given value to an mbuf's refcnt and returns its new value. + * @param m + * Mbuf to update + * @param value + * Value to add/subtract + * @return + * Updated value + */ +static inline uint16_t +rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) +{ + return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value)); +} + +/** + * Reads the value of an mbuf's refcnt. + * @param m + * Mbuf to read + * @return + * Reference count number. + */ +static inline uint16_t +rte_mbuf_refcnt_read(const struct rte_mbuf *m) +{ + return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic)); +} + +/** + * Sets an mbuf's refcnt to a defined value. + * @param m + * Mbuf to update + * @param new_value + * Value set + */ +static inline void +rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value) +{ + rte_atomic16_set(&m->refcnt_atomic, new_value); +} + +#else /* ! RTE_MBUF_REFCNT_ATOMIC */ + +/** + * Adds given value to an mbuf's refcnt and returns its new value. + */ +static inline uint16_t +rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value) +{ + m->refcnt = (uint16_t)(m->refcnt + value); + return m->refcnt; +} + +/** + * Reads the value of an mbuf's refcnt. + */ +static inline uint16_t +rte_mbuf_refcnt_read(const struct rte_mbuf *m) +{ + return m->refcnt; +} + +/** + * Sets an mbuf's refcnt to the defined value. + */ +static inline void +rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value) +{ + m->refcnt = new_value; +} + +#endif /* RTE_MBUF_REFCNT_ATOMIC */ + +/** Mbuf prefetch */ +#define RTE_MBUF_PREFETCH_TO_FREE(m) do { \ + if ((m) != NULL) \ + rte_prefetch0(m); \ +} while (0) + +#else /* ! RTE_MBUF_SCATTER_GATHER */ + +/** Mbuf prefetch */ +#define RTE_MBUF_PREFETCH_TO_FREE(m) do { } while(0) + +#endif /* RTE_MBUF_SCATTER_GATHER */ + + +/** + * Sanity checks on an mbuf. + * + * Check the consistency of the given mbuf. The function will cause a + * panic if corruption is detected. + * + * @param m + * The mbuf to be checked. + * @param t + * The expected type of the mbuf. + * @param is_header + * True if the mbuf is a packet header, false if it is a sub-segment + * of a packet (in this case, some fields like nb_segs are not checked) + */ +void +rte_mbuf_sanity_check(const struct rte_mbuf *m, enum rte_mbuf_type t, + int is_header); + +/** + * @internal Allocate a new mbuf from mempool *mp*. + * The use of that function is reserved for RTE internal needs. + * Please use either rte_ctrlmbuf_alloc() or rte_pktmbuf_alloc(). + * + * @param mp + * The mempool from which mbuf is allocated. + * @return + * - The pointer to the new mbuf on success. + * - NULL if allocation failed. + */ +static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + void *mb = NULL; + if (rte_mempool_get(mp, &mb) < 0) + return NULL; + m = (struct rte_mbuf *)mb; +#ifdef RTE_MBUF_SCATTER_GATHER + RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0); + rte_mbuf_refcnt_set(m, 1); +#endif /* RTE_MBUF_SCATTER_GATHER */ + return (m); +} + +/** + * @internal Put mbuf back into its original mempool. + * The use of that function is reserved for RTE internal needs. + * Please use either rte_ctrlmbuf_free() or rte_pktmbuf_free(). + * + * @param m + * The mbuf to be freed. + */ +static inline void __rte_mbuf_raw_free(struct rte_mbuf *m) +{ +#ifdef RTE_MBUF_SCATTER_GATHER + RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0); +#endif /* RTE_MBUF_SCATTER_GATHER */ + rte_mempool_put(m->pool, m); +} + +/* Operations on ctrl mbuf */ + +/** + * The control mbuf constructor. + * + * This function initializes some fields in an mbuf structure that are + * not modified by the user once created (mbuf type, origin pool, buffer + * start address, and so on). This function is given as a callback function + * to rte_mempool_create() at pool creation time. + * + * @param mp + * The mempool from which the mbuf is allocated. + * @param opaque_arg + * A pointer that can be used by the user to retrieve useful information + * for mbuf initialization. This pointer comes from the ``init_arg`` + * parameter of rte_mempool_create(). + * @param m + * The mbuf to initialize. + * @param i + * The index of the mbuf in the pool table. + */ +void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, + void *m, unsigned i); + +/** + * Allocate a new mbuf (type is ctrl) from mempool *mp*. + * + * This new mbuf is initialized with data pointing to the beginning of + * buffer, and with a length of zero. + * + * @param mp + * The mempool from which the mbuf is allocated. + * @return + * - The pointer to the new mbuf on success. + * - NULL if allocation failed. + */ +static inline struct rte_mbuf *rte_ctrlmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + if ((m = __rte_mbuf_raw_alloc(mp)) != NULL) { + m->ctrl.data = m->buf_addr; + m->ctrl.data_len = 0; + __rte_mbuf_sanity_check(m, RTE_MBUF_CTRL, 0); + } + return (m); +} + +/** + * Free a control mbuf back into its original mempool. + * + * @param m + * The control mbuf to be freed. + */ +static inline void rte_ctrlmbuf_free(struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, RTE_MBUF_CTRL, 0); +#ifdef RTE_MBUF_SCATTER_GATHER + if (rte_mbuf_refcnt_update(m, -1) == 0) +#endif /* RTE_MBUF_SCATTER_GATHER */ + __rte_mbuf_raw_free(m); +} + +/** + * A macro that returns the pointer to the carried data. + * + * The value that can be read or assigned. + * + * @param m + * The control mbuf. + */ +#define rte_ctrlmbuf_data(m) ((m)->ctrl.data) + +/** + * A macro that returns the length of the carried data. + * + * The value that can be read or assigned. + * + * @param m + * The control mbuf. + */ +#define rte_ctrlmbuf_len(m) ((m)->ctrl.data_len) + +/* Operations on pkt mbuf */ + +/** + * The packet mbuf constructor. + * + * This function initializes some fields in the mbuf structure that are not + * modified by the user once created (mbuf type, origin pool, buffer start + * address, and so on). This function is given as a callback function to + * rte_mempool_create() at pool creation time. + * + * @param mp + * The mempool from which mbufs originate. + * @param opaque_arg + * A pointer that can be used by the user to retrieve useful information + * for mbuf initialization. This pointer comes from the ``init_arg`` + * parameter of rte_mempool_create(). + * @param m + * The mbuf to initialize. + * @param i + * The index of the mbuf in the pool table. + */ +void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, + void *m, unsigned i); + + +/** + * A packet mbuf pool constructor. + * + * This function initializes the mempool private data in the case of a + * pktmbuf pool. This private data is needed by the driver. The + * function is given as a callback function to rte_mempool_create() at + * pool creation. It can be extended by the user, for example, to + * provide another packet size. + * + * @param mp + * The mempool from which mbufs originate. + * @param opaque_arg + * A pointer that can be used by the user to retrieve useful information + * for mbuf initialization. This pointer comes from the ``init_arg`` + * parameter of rte_mempool_create(). + */ +void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg); + +/** + * Reset the fields of a packet mbuf to their default values. + * + * The given mbuf must have only one segment. + * + * @param m + * The packet mbuf to be resetted. + */ +static inline void rte_pktmbuf_reset(struct rte_mbuf *m) +{ + uint32_t buf_ofs; + + m->pkt.next = NULL; + m->pkt.pkt_len = 0; + m->pkt.l2_len = 0; + m->pkt.l3_len = 0; + m->pkt.vlan_tci = 0; + m->pkt.nb_segs = 1; + m->pkt.in_port = 0xff; + + m->ol_flags = 0; + buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ? + RTE_PKTMBUF_HEADROOM : m->buf_len; + m->pkt.data = (char*) m->buf_addr + buf_ofs; + + m->pkt.data_len = 0; + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); +} + +/** + * Allocate a new mbuf (type is pkt) from a mempool. + * + * This new mbuf contains one segment, which has a length of 0. The pointer + * to data is initialized to have some bytes of headroom in the buffer + * (if buffer size allows). + * + * @param mp + * The mempool from which the mbuf is allocated. + * @return + * - The pointer to the new mbuf on success. + * - NULL if allocation failed. + */ +static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + if ((m = __rte_mbuf_raw_alloc(mp)) != NULL) + rte_pktmbuf_reset(m); + return (m); +} + +#ifdef RTE_MBUF_SCATTER_GATHER + +/** + * Attach packet mbuf to another packet mbuf. + * After attachment we refer the mbuf we attached as 'indirect', + * while mbuf we attached to as 'direct'. + * Right now, not supported: + * - attachment to indirect mbuf (e.g. - md has to be direct). + * - attachment for already indirect mbuf (e.g. - mi has to be direct). + * - mbuf we trying to attach (mi) is used by someone else + * e.g. it's reference counter is greater then 1. + * + * @param mi + * The indirect packet mbuf. + * @param md + * The direct packet mbuf. + */ + +static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md) +{ + RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(md) && + RTE_MBUF_DIRECT(mi) && + rte_mbuf_refcnt_read(mi) == 1); + + rte_mbuf_refcnt_update(md, 1); + mi->buf_physaddr = md->buf_physaddr; + mi->buf_addr = md->buf_addr; + mi->buf_len = md->buf_len; + + mi->pkt = md->pkt; + + mi->pkt.next = NULL; + mi->pkt.pkt_len = mi->pkt.data_len; + mi->pkt.nb_segs = 1; + + __rte_mbuf_sanity_check(mi, RTE_MBUF_PKT, 1); + __rte_mbuf_sanity_check(md, RTE_MBUF_PKT, 0); +} + +/** + * Detach an indirect packet mbuf - + * - restore original mbuf address and length values. + * - reset pktmbuf data and data_len to their default values. + * All other fields of the given packet mbuf will be left intact. + * + * @param m + * The indirect attached packet mbuf. + */ + +static inline void rte_pktmbuf_detach(struct rte_mbuf *m) +{ + const struct rte_mempool *mp = m->pool; + void *buf = RTE_MBUF_TO_BADDR(m); + uint32_t buf_ofs; + uint32_t buf_len = mp->elt_size - sizeof(*m); + m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof (*m); + + m->buf_addr = buf; + m->buf_len = (uint16_t)buf_len; + + buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ? + RTE_PKTMBUF_HEADROOM : m->buf_len; + m->pkt.data = (char*) m->buf_addr + buf_ofs; + + m->pkt.data_len = 0; +} + +#endif /* RTE_MBUF_SCATTER_GATHER */ + +/** + * Free a segment of a packet mbuf into its original mempool. + * + * Free an mbuf, without parsing other segments in case of chained + * buffers. + * + * @param m + * The packet mbuf segment to be freed. + */ +static inline void rte_pktmbuf_free_seg(struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0); + +#ifdef RTE_MBUF_SCATTER_GATHER + if (likely (rte_mbuf_refcnt_read(m) == 1) || + likely (rte_mbuf_refcnt_update(m, -1) == 0)) { + struct rte_mbuf *md = RTE_MBUF_FROM_BADDR(m->buf_addr); + + rte_mbuf_refcnt_set(m, 0); + + /* if this is an indirect mbuf, then + * - detach mbuf + * - free attached mbuf segment + */ + if (unlikely (md != m)) { + rte_pktmbuf_detach(m); + if (rte_mbuf_refcnt_update(md, -1) == 0) + __rte_mbuf_raw_free(md); + } +#endif + __rte_mbuf_raw_free(m); +#ifdef RTE_MBUF_SCATTER_GATHER + } +#endif +} + +/** + * Free a packet mbuf back into its original mempool. + * + * Free an mbuf, and all its segments in case of chained buffers. Each + * segment is added back into its original mempool. + * + * @param m + * The packet mbuf to be freed. + */ +static inline void rte_pktmbuf_free(struct rte_mbuf *m) +{ + struct rte_mbuf *m_next; + + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + + while (m != NULL) { + m_next = m->pkt.next; + rte_pktmbuf_free_seg(m); + m = m_next; + } +} + +#ifdef RTE_MBUF_SCATTER_GATHER + +/** + * Creates a "clone" of the given packet mbuf. + * + * Walks through all segments of the given packet mbuf, and for each of them: + * - Creates a new packet mbuf from the given pool. + * - Attaches newly created mbuf to the segment. + * Then updates pkt_len and nb_segs of the "clone" packet mbuf to match values + * from the original packet mbuf. + * + * @param md + * The packet mbuf to be cloned. + * @param mp + * The mempool from which the "clone" mbufs are allocated. + * @return + * - The pointer to the new "clone" mbuf on success. + * - NULL if allocation fails. + */ +static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md, + struct rte_mempool *mp) +{ + struct rte_mbuf *mc, *mi, **prev; + uint32_t pktlen; + uint8_t nseg; + + if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL)) + return (NULL); + + mi = mc; + prev = &mi->pkt.next; + pktlen = md->pkt.pkt_len; + nseg = 0; + + do { + nseg++; + rte_pktmbuf_attach(mi, md); + *prev = mi; + prev = &mi->pkt.next; + } while ((md = md->pkt.next) != NULL && + (mi = rte_pktmbuf_alloc(mp)) != NULL); + + *prev = NULL; + mc->pkt.nb_segs = nseg; + mc->pkt.pkt_len = pktlen; + + /* Allocation of new indirect segment failed */ + if (unlikely (mi == NULL)) { + rte_pktmbuf_free(mc); + return (NULL); + } + + __rte_mbuf_sanity_check(mc, RTE_MBUF_PKT, 1); + return (mc); +} + +/** + * Adds given value to the refcnt of all packet mbuf segments. + * + * Walks through all segments of given packet mbuf and for each of them + * invokes rte_mbuf_refcnt_update(). + * + * @param m + * The packet mbuf whose refcnt to be updated. + * @param v + * The value to add to the mbuf's segments refcnt. + */ +static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v) +{ + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + + do { + rte_mbuf_refcnt_update(m, v); + } while ((m = m->pkt.next) != NULL); +} + +#endif /* RTE_MBUF_SCATTER_GATHER */ + +/** + * Get the headroom in a packet mbuf. + * + * @param m + * The packet mbuf. + * @return + * The length of the headroom. + */ +static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + return (uint16_t) ((char*) m->pkt.data - (char*) m->buf_addr); +} + +/** + * Get the tailroom of a packet mbuf. + * + * @param m + * The packet mbuf. + * @return + * The length of the tailroom. + */ +static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) - + m->pkt.data_len); +} + +/** + * Get the last segment of the packet. + * + * @param m + * The packet mbuf. + * @return + * The last segment of the given mbuf. + */ +static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m) +{ + struct rte_mbuf *m2 = (struct rte_mbuf *)m; + + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + while (m2->pkt.next != NULL) + m2 = m2->pkt.next; + return m2; +} + +/** + * A macro that points to the start of the data in the mbuf. + * + * The returned pointer is cast to type t. Before using this + * function, the user must ensure that m_headlen(m) is large enough to + * read its data. + * + * @param m + * The packet mbuf. + * @param t + * The type to cast the result into. + */ +#define rte_pktmbuf_mtod(m, t) ((t)((m)->pkt.data)) + +/** + * A macro that returns the length of the packet. + * + * The value can be read or assigned. + * + * @param m + * The packet mbuf. + */ +#define rte_pktmbuf_pkt_len(m) ((m)->pkt.pkt_len) + +/** + * A macro that returns the length of the segment. + * + * The value can be read or assigned. + * + * @param m + * The packet mbuf. + */ +#define rte_pktmbuf_data_len(m) ((m)->pkt.data_len) + +/** + * Prepend len bytes to an mbuf data area. + * + * Returns a pointer to the new + * data start address. If there is not enough headroom in the first + * segment, the function will return NULL, without modifying the mbuf. + * + * @param m + * The pkt mbuf. + * @param len + * The amount of data to prepend (in bytes). + * @return + * A pointer to the start of the newly prepended data, or + * NULL if there is not enough headroom space in the first segment + */ +static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m, + uint16_t len) +{ + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + + if (unlikely(len > rte_pktmbuf_headroom(m))) + return NULL; + + m->pkt.data = (char*) m->pkt.data - len; + m->pkt.data_len = (uint16_t)(m->pkt.data_len + len); + m->pkt.pkt_len = (m->pkt.pkt_len + len); + + return (char*) m->pkt.data; +} + +/** + * Append len bytes to an mbuf. + * + * Append len bytes to an mbuf and return a pointer to the start address + * of the added data. If there is not enough tailroom in the last + * segment, the function will return NULL, without modifying the mbuf. + * + * @param m + * The packet mbuf. + * @param len + * The amount of data to append (in bytes). + * @return + * A pointer to the start of the newly appended data, or + * NULL if there is not enough tailroom space in the last segment + */ +static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len) +{ + void *tail; + struct rte_mbuf *m_last; + + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + + m_last = rte_pktmbuf_lastseg(m); + if (unlikely(len > rte_pktmbuf_tailroom(m_last))) + return NULL; + + tail = (char*) m_last->pkt.data + m_last->pkt.data_len; + m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len + len); + m->pkt.pkt_len = (m->pkt.pkt_len + len); + return (char*) tail; +} + +/** + * Remove len bytes at the beginning of an mbuf. + * + * Returns a pointer to the start address of the new data area. If the + * length is greater than the length of the first segment, then the + * function will fail and return NULL, without modifying the mbuf. + * + * @param m + * The packet mbuf. + * @param len + * The amount of data to remove (in bytes). + * @return + * A pointer to the new start of the data. + */ +static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len) +{ + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + + if (unlikely(len > m->pkt.data_len)) + return NULL; + + m->pkt.data_len = (uint16_t)(m->pkt.data_len - len); + m->pkt.data = ((char*) m->pkt.data + len); + m->pkt.pkt_len = (m->pkt.pkt_len - len); + return (char*) m->pkt.data; +} + +/** + * Remove len bytes of data at the end of the mbuf. + * + * If the length is greater than the length of the last segment, the + * function will fail and return -1 without modifying the mbuf. + * + * @param m + * The packet mbuf. + * @param len + * The amount of data to remove (in bytes). + * @return + * - 0: On success. + * - -1: On error. + */ +static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len) +{ + struct rte_mbuf *m_last; + + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + + m_last = rte_pktmbuf_lastseg(m); + if (unlikely(len > m_last->pkt.data_len)) + return -1; + + m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len - len); + m->pkt.pkt_len = (m->pkt.pkt_len - len); + return 0; +} + +/** + * Test if mbuf data is contiguous. + * + * @param m + * The packet mbuf. + * @return + * - 1, if all data is contiguous (one segment). + * - 0, if there is several segments. + */ +static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m) +{ + __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1); + return !!(m->pkt.nb_segs == 1); +} + +/** + * Dump an mbuf structure to the console. + * + * Dump all fields for the given packet mbuf and all its associated + * segments (in the case of a chained buffer). + * + * @param m + * The packet mbuf. + * @param dump_len + * If dump_len != 0, also dump the "dump_len" first data bytes of + * the packet. + */ +void rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned dump_len); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MBUF_H_ */ diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile new file mode 100644 index 0000000000..5b3cac075e --- /dev/null +++ b/lib/librte_mempool/Makefile @@ -0,0 +1,50 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_mempool.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) := rte_mempool.c + +# install includes +SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h + +# this lib needs eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_eal lib/librte_ring + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c new file mode 100644 index 0000000000..b0a3c9907b --- /dev/null +++ b/lib/librte_mempool/rte_mempool.c @@ -0,0 +1,491 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_mempool.h" + +TAILQ_HEAD(rte_mempool_list, rte_mempool); + +/* global list of mempool (used for debug/dump) */ +static struct rte_mempool_list *mempool_list; + +/* + * return the greatest common divisor between a and b (fast algorithm) + * + */ +static unsigned get_gcd(unsigned a, unsigned b) +{ + unsigned c; + + if (0 == a) + return b; + if (0 == b) + return a; + + if (a < b) { + c = a; + a = b; + b = c; + } + + while (b != 0) { + c = a % b; + a = b; + b = c; + } + + return a; +} + +/* + * Depending on memory configuration, objects addresses are spreaded + * between channels and ranks in RAM: the pool allocator will add + * padding between objects. This function return the new size of the + * object. + */ +static unsigned optimize_object_size(unsigned obj_size) +{ + unsigned nrank, nchan; + unsigned new_obj_size; + + /* get number of channels */ + nchan = rte_memory_get_nchannel(); + if (nchan == 0) + nchan = 1; + + nrank = rte_memory_get_nrank(); + if (nrank == 0) + nrank = 1; + + /* process new object size */ + new_obj_size = (obj_size + CACHE_LINE_MASK) / CACHE_LINE_SIZE; + while (get_gcd(new_obj_size, nrank * nchan) != 1 || + get_gcd(nchan, new_obj_size) != 1) + new_obj_size++; + return new_obj_size * CACHE_LINE_SIZE; +} + +/* create the mempool */ +struct rte_mempool * +rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + char rg_name[RTE_RING_NAMESIZE]; + struct rte_mempool *mp; + struct rte_ring *r; + const struct rte_memzone *mz; + size_t mempool_size; + int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY; + int rg_flags = 0; + uint32_t header_size, trailer_size; + uint32_t total_elt_size; + unsigned i; + void *obj; + + /* compilation-time checks */ + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) & + CACHE_LINE_MASK) != 0); +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) & + CACHE_LINE_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) & + CACHE_LINE_MASK) != 0); +#endif +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) & + CACHE_LINE_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) & + CACHE_LINE_MASK) != 0); +#endif + + /* check that we have an initialised tail queue */ + if (mempool_list == NULL) + if ((mempool_list = RTE_TAILQ_RESERVE("RTE_MEMPOOL", \ + rte_mempool_list)) == NULL){ + rte_errno = E_RTE_NO_TAILQ; + return NULL; + } + + /* asked cache too big */ + if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE){ + rte_errno = EINVAL; + return NULL; + } + + /* "no cache align" imply "no spread" */ + if (flags & MEMPOOL_F_NO_CACHE_ALIGN) + flags |= MEMPOOL_F_NO_SPREAD; + + /* ring flags */ + if (flags & MEMPOOL_F_SP_PUT) + rg_flags |= RING_F_SP_ENQ; + if (flags & MEMPOOL_F_SC_GET) + rg_flags |= RING_F_SC_DEQ; + + /* allocate the ring that will be used to store objects */ + /* Ring functions will return appropriate errors if we are + * running as a secondary process etc., so no checks made + * in this function for that condition */ + rte_snprintf(rg_name, sizeof(rg_name), "MP_%s", name); + r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags); + if (r == NULL) + return NULL; + + /* + * In header, we have at least the pointer to the pool, and + * optionaly a 64 bits cookie. + */ + header_size = 0; + header_size += sizeof(struct rte_mempool *); /* ptr to pool */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + header_size += sizeof(uint64_t); /* cookie */ +#endif + if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) + header_size = (header_size + CACHE_LINE_MASK) & (~CACHE_LINE_MASK); + + /* trailer contains the cookie in debug mode */ + trailer_size = 0; +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + trailer_size += sizeof(uint64_t); /* cookie */ +#endif + /* element size is 8 bytes-aligned at least */ + elt_size = (elt_size + 7) & (~7); + + /* expand trailer to next cache line */ + if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) { + total_elt_size = header_size + elt_size + trailer_size; + trailer_size += ((CACHE_LINE_SIZE - + (total_elt_size & CACHE_LINE_MASK)) & + CACHE_LINE_MASK); + } + + /* + * increase trailer to add padding between objects in order to + * spread them accross memory channels/ranks + */ + if ((flags & MEMPOOL_F_NO_SPREAD) == 0) { + unsigned new_size; + new_size = optimize_object_size(header_size + elt_size + + trailer_size); + trailer_size = new_size - header_size - elt_size; + } + + /* this is the size of an object, including header and trailer */ + total_elt_size = header_size + elt_size + trailer_size; + + /* reserve a memory zone for this mempool: private data is + * cache-aligned */ + private_data_size = (private_data_size + + CACHE_LINE_MASK) & (~CACHE_LINE_MASK); + mempool_size = total_elt_size * n + + sizeof(struct rte_mempool) + private_data_size; + rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", name); + mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags); + + /* + * no more memory: in this case we loose previously reserved + * space for the as we cannot free it + */ + if (mz == NULL) + return NULL; + + /* init the mempool structure */ + mp = mz->addr; + memset(mp, 0, sizeof(*mp)); + rte_snprintf(mp->name, sizeof(mp->name), "%s", name); + mp->phys_addr = mz->phys_addr; + mp->ring = r; + mp->size = n; + mp->flags = flags; + mp->bulk_default = 1; + mp->elt_size = elt_size; + mp->header_size = header_size; + mp->trailer_size = trailer_size; + mp->cache_size = cache_size; + mp->private_data_size = private_data_size; + + /* call the initializer */ + if (mp_init) + mp_init(mp, mp_init_arg); + + /* fill the headers and trailers, and add objects in ring */ + obj = (char *)mp + sizeof(struct rte_mempool) + private_data_size; + for (i = 0; i < n; i++) { + struct rte_mempool **mpp; + obj = (char *)obj + header_size; + + /* set mempool ptr in header */ + mpp = __mempool_from_obj(obj); + *mpp = mp; + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + __mempool_write_header_cookie(obj, 1); + __mempool_write_trailer_cookie(obj); +#endif + /* call the initializer */ + if (obj_init) + obj_init(mp, obj_init_arg, obj, i); + + /* enqueue in ring */ + rte_ring_sp_enqueue(mp->ring, obj); + obj = (char *)obj + elt_size + trailer_size; + } + + TAILQ_INSERT_TAIL(mempool_list, mp, next); + return mp; +} + +/* Return the number of entries in the mempool */ +unsigned +rte_mempool_count(const struct rte_mempool *mp) +{ + unsigned count; + + count = rte_ring_count(mp->ring); + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + { + unsigned lcore_id; + if (mp->cache_size == 0) + return count; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) + count += mp->local_cache[lcore_id].len; + } +#endif + + /* + * due to race condition (access to len is not locked), the + * total can be greater than size... so fix the result + */ + if (count > mp->size) + return mp->size; + return count; +} + +/* dump the cache status */ +static unsigned +rte_mempool_dump_cache(const struct rte_mempool *mp) +{ +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + unsigned lcore_id; + unsigned count = 0; + unsigned cache_count; + + printf(" cache infos:\n"); + printf(" cache_size=%"PRIu32"\n", mp->cache_size); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + cache_count = mp->local_cache[lcore_id].len; + printf(" cache_count[%u]=%u\n", lcore_id, cache_count); + count += cache_count; + } + printf(" total_cache_count=%u\n", count); + return count; +#else + RTE_SET_USED(mp); + printf(" cache disabled\n"); + return 0; +#endif +} + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +/* check cookies before and after objects */ +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +static void +mempool_audit_cookies(const struct rte_mempool *mp) +{ + unsigned i; + void *obj; + void * const *obj_table; + + obj = (char *)mp + sizeof(struct rte_mempool) + mp->private_data_size; + for (i = 0; i < mp->size; i++) { + obj = (char *)obj + mp->header_size; + obj_table = &obj; + __mempool_check_cookies(mp, obj_table, 1, 2); + obj = (char *)obj + mp->elt_size + mp->trailer_size; + } +} +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic error "-Wcast-qual" +#endif +#else +#define mempool_audit_cookies(mp) do {} while(0) +#endif + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 +/* check cookies before and after objects */ +static void +mempool_audit_cache(const struct rte_mempool *mp) +{ + /* check cache size consistency */ + unsigned lcore_id; + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + if (mp->local_cache[lcore_id].len > mp->cache_size) { + RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n", + lcore_id); + rte_panic("MEMPOOL: invalid cache len\n"); + } + } +} +#else +#define mempool_audit_cache(mp) do {} while(0) +#endif + + +/* check the consistency of mempool (size, cookies, ...) */ +void +rte_mempool_audit(const struct rte_mempool *mp) +{ + mempool_audit_cache(mp); + mempool_audit_cookies(mp); +} + +/* dump the status of the mempool on the console */ +void +rte_mempool_dump(const struct rte_mempool *mp) +{ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + struct rte_mempool_debug_stats sum; + unsigned lcore_id; +#endif + unsigned common_count; + unsigned cache_count; + + printf("mempool <%s>@%p\n", mp->name, mp); + printf(" flags=%x\n", mp->flags); + printf(" ring=<%s>@%p\n", mp->ring->name, mp->ring); + printf(" size=%"PRIu32"\n", mp->size); + printf(" bulk_default=%"PRIu32"\n", mp->bulk_default); + printf(" header_size=%"PRIu32"\n", mp->header_size); + printf(" elt_size=%"PRIu32"\n", mp->elt_size); + printf(" trailer_size=%"PRIu32"\n", mp->trailer_size); + printf(" total_obj_size=%"PRIu32"\n", + mp->header_size + mp->elt_size + mp->trailer_size); + + cache_count = rte_mempool_dump_cache(mp); + common_count = rte_ring_count(mp->ring); + if ((cache_count + common_count) > mp->size) + common_count = mp->size - cache_count; + printf(" common_pool_count=%u\n", common_count); + + /* sum and dump statistics */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + memset(&sum, 0, sizeof(sum)); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + sum.put_bulk += mp->stats[lcore_id].put_bulk; + sum.put_objs += mp->stats[lcore_id].put_objs; + sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk; + sum.get_success_objs += mp->stats[lcore_id].get_success_objs; + sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk; + sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs; + } + printf(" stats:\n"); + printf(" put_bulk=%"PRIu64"\n", sum.put_bulk); + printf(" put_objs=%"PRIu64"\n", sum.put_objs); + printf(" get_success_bulk=%"PRIu64"\n", sum.get_success_bulk); + printf(" get_success_objs=%"PRIu64"\n", sum.get_success_objs); + printf(" get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk); + printf(" get_fail_objs=%"PRIu64"\n", sum.get_fail_objs); +#else + printf(" no statistics available\n"); +#endif + + rte_mempool_audit(mp); +} + +/* dump the status of all mempools on the console */ +void +rte_mempool_list_dump(void) +{ + const struct rte_mempool *mp = NULL; + + TAILQ_FOREACH(mp, mempool_list, next) { + rte_mempool_dump(mp); + } +} + +/* search a mempool from its name */ +struct rte_mempool * +rte_mempool_lookup(const char *name) +{ + struct rte_mempool *mp = NULL; + + /* check that we have an initialised tail queue */ + if (mempool_list == NULL) + if ((mempool_list = RTE_TAILQ_RESERVE("RTE_MEMPOOL", \ + rte_mempool_list)) == NULL){ + rte_errno = E_RTE_NO_TAILQ; + return NULL; + } + + TAILQ_FOREACH(mp, mempool_list, next) { + if (strncmp(name, mp->name, RTE_MEMPOOL_NAMESIZE) == 0) + break; + } + if (mp == NULL) + rte_errno = ENOENT; + + return mp; +} diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h new file mode 100644 index 0000000000..cfc62f70bf --- /dev/null +++ b/lib/librte_mempool/rte_mempool.h @@ -0,0 +1,1087 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_MEMPOOL_H_ +#define _RTE_MEMPOOL_H_ + +/** + * @file + * RTE Mempool. + * + * A memory pool is an allocator of fixed-size object. It is + * identified by its name, and uses a ring to store free objects. It + * provides some other optional services, like a per-core object + * cache, and an alignment helper to ensure that objects are padded + * to spread them equally on all RAM channels, ranks, and so on. + * + * Objects owned by a mempool should never be added in another + * mempool. When an object is freed using rte_mempool_put() or + * equivalent, the object data is not modified; the user can save some + * meta-data in the object data and retrieve them when allocating a + * new object. + * + * Note: the mempool implementation is not preemptable. A lcore must + * not be interrupted by another task that uses the same mempool + * (because it uses a ring which is not preemptable). Also, mempool + * functions must not be used outside the DPDK environment: for + * example, in linuxapp environment, a thread that is not created by + * the EAL must not use mempools. This is due to the per-lcore cache + * that won't work as rte_lcore_id() will not return a correct value. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */ +#define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */ +#define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/ + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +/** + * A structure that stores the mempool statistics (per-lcore). + */ +struct rte_mempool_debug_stats { + uint64_t put_bulk; /**< Number of puts. */ + uint64_t put_objs; /**< Number of objects successfully put. */ + uint64_t get_success_bulk; /**< Successful allocation number. */ + uint64_t get_success_objs; /**< Objects successfully allocated. */ + uint64_t get_fail_bulk; /**< Failed allocation number. */ + uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ +} __rte_cache_aligned; +#endif + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 +/** + * A structure that stores a per-core object cache. + */ +struct rte_mempool_cache { + unsigned len; /**< Cache len */ + void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE]; /**< Cache objects */ +} __rte_cache_aligned; +#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ + +#define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */ + +/** + * The RTE mempool structure. + */ +struct rte_mempool { + TAILQ_ENTRY(rte_mempool) next; /**< Next in list. */ + + char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */ + struct rte_ring *ring; /**< Ring to store objects. */ + phys_addr_t phys_addr; /**< Phys. addr. of mempool struct. */ + int flags; /**< Flags of the mempool. */ + uint32_t size; /**< Size of the mempool. */ + uint32_t bulk_default; /**< Default bulk count. */ + uint32_t cache_size; /**< Size of per-lcore local cache. */ + + uint32_t elt_size; /**< Size of an element. */ + uint32_t header_size; /**< Size of header (before elt). */ + uint32_t trailer_size; /**< Size of trailer (after elt). */ + + unsigned private_data_size; /**< Size of private data. */ + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + /** Per-lcore local cache. */ + struct rte_mempool_cache local_cache[RTE_MAX_LCORE]; +#endif + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + /** Per-lcore statistics. */ + struct rte_mempool_debug_stats stats[RTE_MAX_LCORE]; +#endif +} __rte_cache_aligned; + +#define MEMPOOL_F_NO_SPREAD 0x0001 /**< Do not spread in memory. */ +#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/ +#define MEMPOOL_F_SP_PUT 0x0004 /**< Default put is "single-producer".*/ +#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/ + +/** + * When debug is enabled, store some statistics. + * @param mp + * Pointer to the memory pool. + * @param name + * Name of the statistics field to increment in the memory pool. + * @param n + * Number to add to the object-oriented statistics. + */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +#define __MEMPOOL_STAT_ADD(mp, name, n) do { \ + unsigned __lcore_id = rte_lcore_id(); \ + mp->stats[__lcore_id].name##_objs += n; \ + mp->stats[__lcore_id].name##_bulk += 1; \ + } while(0) +#else +#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0) +#endif + +/** + * Get a pointer to a mempool pointer in the object header. + * @param obj + * Pointer to object. + * @return + * The pointer to the mempool from which the object was allocated. + */ +static inline struct rte_mempool **__mempool_from_obj(void *obj) +{ + struct rte_mempool **mpp; + unsigned off; + + off = sizeof(struct rte_mempool *); +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + off += sizeof(uint64_t); +#endif + mpp = (struct rte_mempool **)((char *)obj - off); + return mpp; +} + +/** + * Return a pointer to the mempool owning this object. + * + * @param obj + * An object that is owned by a pool. If this is not the case, + * the behavior is undefined. + * @return + * A pointer to the mempool structure. + */ +static inline const struct rte_mempool *rte_mempool_from_obj(void *obj) +{ + struct rte_mempool * const *mpp; + mpp = __mempool_from_obj(obj); + return *mpp; +} + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +/* get header cookie value */ +static inline uint64_t __mempool_read_header_cookie(const void *obj) +{ + return *(const uint64_t *)((const char *)obj - sizeof(uint64_t)); +} + +/* get trailer cookie value */ +static inline uint64_t __mempool_read_trailer_cookie(void *obj) +{ + struct rte_mempool **mpp = __mempool_from_obj(obj); + return *(uint64_t *)((char *)obj + (*mpp)->elt_size); +} + +/* write header cookie value */ +static inline void __mempool_write_header_cookie(void *obj, int free) +{ + uint64_t *cookie_p; + cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t)); + if (free == 0) + *cookie_p = RTE_MEMPOOL_HEADER_COOKIE1; + else + *cookie_p = RTE_MEMPOOL_HEADER_COOKIE2; + +} + +/* write trailer cookie value */ +static inline void __mempool_write_trailer_cookie(void *obj) +{ + uint64_t *cookie_p; + struct rte_mempool **mpp = __mempool_from_obj(obj); + cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size); + *cookie_p = RTE_MEMPOOL_TRAILER_COOKIE; +} +#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ + +/** + * Check and update cookies or panic. + * + * @param mp + * Pointer to the memory pool. + * @param obj_table_const + * Pointer to a table of void * pointers (objects). + * @param n + * Index of object in object table. + * @param free + * - 0: object is supposed to be allocated, mark it as free + * - 1: object is supposed to be free, mark it as allocated + * - 2: just check that cookie is valid (free or allocated) + */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif +static inline void __mempool_check_cookies(const struct rte_mempool *mp, + void * const *obj_table_const, + unsigned n, int free) +{ + uint64_t cookie; + void *tmp; + void *obj; + void **obj_table; + + /* Force to drop the "const" attribute. This is done only when + * DEBUG is enabled */ + tmp = (void *) obj_table_const; + obj_table = (void **) tmp; + + while (n--) { + obj = obj_table[n]; + + if (rte_mempool_from_obj(obj) != mp) + rte_panic("MEMPOOL: object is owned by another " + "mempool\n"); + + cookie = __mempool_read_header_cookie(obj); + + if (free == 0) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) { + rte_log_set_history(0); + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%"PRIx64"\n", + obj, mp, cookie); + rte_panic("MEMPOOL: bad header cookie (put)\n"); + } + __mempool_write_header_cookie(obj, 1); + } + else if (free == 1) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) { + rte_log_set_history(0); + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%"PRIx64"\n", + obj, mp, cookie); + rte_panic("MEMPOOL: bad header cookie (get)\n"); + } + __mempool_write_header_cookie(obj, 0); + } + else if (free == 2) { + if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 && + cookie != RTE_MEMPOOL_HEADER_COOKIE2) { + rte_log_set_history(0); + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%"PRIx64"\n", + obj, mp, cookie); + rte_panic("MEMPOOL: bad header cookie (audit)\n"); + } + } + cookie = __mempool_read_trailer_cookie(obj); + if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) { + rte_log_set_history(0); + RTE_LOG(CRIT, MEMPOOL, + "obj=%p, mempool=%p, cookie=%"PRIx64"\n", + obj, mp, cookie); + rte_panic("MEMPOOL: bad trailer cookie\n"); + } + } +} +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic error "-Wcast-qual" +#endif +#else +#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0) +#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ + +/** + * An object constructor callback function for mempool. + * + * Arguments are the mempool, the opaque pointer given by the user in + * rte_mempool_create(), the pointer to the element and the index of + * the element in the pool. + */ +typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *, + void *, unsigned); + +/** + * A mempool constructor callback function. + * + * Arguments are the mempool and the opaque pointer given by the user in + * rte_mempool_create(). + */ +typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); + +/** + * Creates a new mempool named *name* in memory. + * + * This function uses ``memzone_reserve()`` to allocate memory. The + * pool contains n elements of elt_size. Its size is set to n. By + * default, bulk_default_count (the default number of elements to + * get/put in the pool) is set to 1. @see rte_mempool_set_bulk_count() + * to modify this valule. + * + * @param name + * The name of the mempool. + * @param n + * The number of elements in the mempool. The optimum size (in terms of + * memory usage) for a mempool is when n is a power of two minus one: + * n = (2^q - 1). + * @param elt_size + * The size of each element. + * @param cache_size + * If cache_size is non-zero, the rte_mempool library will try to + * limit the accesses to the common lockless pool, by maintaining a + * per-lcore object cache. This argument must be lower or equal to + * CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose + * cache_size to have "n modulo cache_size == 0": if this is + * not the case, some elements will always stay in the pool and will + * never be used. The access to the per-lcore table is of course + * faster than the multi-producer/consumer pool. The cache can be + * disabled if the cache_size argument is set to 0; it can be useful to + * avoid loosing objects in cache. Note that even if not used, the + * memory space for cache is always reserved in a mempool structure, + * except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0. + * @param private_data_size + * The size of the private data appended after the mempool + * structure. This is useful for storing some private data after the + * mempool structure, as is done for rte_mbuf_pool for example. + * @param mp_init + * A function pointer that is called for initialization of the pool, + * before object initialization. The user can initialize the private + * data in this function if needed. This parameter can be NULL if + * not needed. + * @param mp_init_arg + * An opaque pointer to data that can be used in the mempool + * constructor function. + * @param obj_init + * A function pointer that is called for each object at + * initialization of the pool. The user can set some meta data in + * objects if needed. This parameter can be NULL if not needed. + * The obj_init() function takes the mempool pointer, the init_arg, + * the object pointer and the object number as parameters. + * @param obj_init_arg + * An opaque pointer to data that can be used as an argument for + * each call to the object constructor function. + * @param socket_id + * The *socket_id* argument is the socket identifier in the case of + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA + * constraint for the reserved zone. + * @param flags + * The *flags* arguments is an OR of following flags: + * - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread + * between channels in RAM: the pool allocator will add padding + * between objects depending on the hardware configuration. See + * Memory alignment constraints for details. If this flag is set, + * the allocator will just align them to a cache line. + * - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are + * cache-aligned. This flag removes this constraint, and no + * padding will be present between objects. This flag implies + * MEMPOOL_F_NO_SPREAD. + * - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior + * when using rte_mempool_put() or rte_mempool_put_bulk() is + * "single-producer". Otherwise, it is "multi-producers". + * - MEMPOOL_F_SC_GET: If this flag is set, the default behavior + * when using rte_mempool_get() or rte_mempool_get_bulk() is + * "single-consumer". Otherwise, it is "multi-consumers". + * @return + * The pointer to the new allocated mempool, on success. NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list + * - EINVAL - cache size provided is too large + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_mempool * +rte_mempool_create(const char *name, unsigned n, unsigned elt_size, + unsigned cache_size, unsigned private_data_size, + rte_mempool_ctor_t *mp_init, void *mp_init_arg, + rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + int socket_id, unsigned flags); + +/** + * Set the default bulk count for put/get. + * + * The *count* parameter is the default number of bulk elements to + * get/put when using ``rte_mempool_*_{en,de}queue_bulk()``. It must + * be greater than 0 and less than half of the mempool size. + * + * @param mp + * A pointer to the mempool structure. + * @param count + * A new water mark value. + * @return + * - 0: Success; default_bulk_count changed. + * - -EINVAL: Invalid count value. + */ +static inline int +rte_mempool_set_bulk_count(struct rte_mempool *mp, unsigned count) +{ + if (unlikely(count == 0 || count >= mp->size)) + return -EINVAL; + + mp->bulk_default = count; + return 0; +} + +/** + * Get the default bulk count for put/get. + * + * @param mp + * A pointer to the mempool structure. + * @return + * The default bulk count for enqueue/dequeue. + */ +static inline unsigned +rte_mempool_get_bulk_count(struct rte_mempool *mp) +{ + return mp->bulk_default; +} + +/** + * Dump the status of the mempool to the console. + * + * @param mp + * A pointer to the mempool structure. + */ +void rte_mempool_dump(const struct rte_mempool *mp); + +/** + * @internal Put several objects back in the mempool; used internally. + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to store back in the mempool, must be strictly + * positive. + * @param is_mp + * Mono-producer (0) or multi-producers (1). + */ +static inline void +__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n, int is_mp) +{ +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + struct rte_mempool_cache *cache; + uint32_t cache_len; + void **cache_objs; + unsigned lcore_id = rte_lcore_id(); + uint32_t cache_size = mp->cache_size; + uint32_t cache_add_count; +#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ + + /* increment stat now, adding in mempool always success */ + __MEMPOOL_STAT_ADD(mp, put, n); + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + /* cache is not enabled or single producer */ + if (unlikely(cache_size == 0 || is_mp == 0)) + goto ring_enqueue; + + cache = &mp->local_cache[lcore_id]; + cache_len = cache->len; + cache_objs = cache->objs; + + /* cache is full and we add many objects: enqueue in ring */ + if (unlikely(cache_len == cache_size && n >= cache_size)) + goto ring_enqueue; + + /* + * cache is full and we add few objects: enqueue the content + * of the cache in ring + */ + if (unlikely(cache_len == cache_size)) { +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (rte_ring_mp_enqueue_bulk(mp->ring, cache->objs, + cache_size) < 0) + rte_panic("cannot put objects in mempool\n"); +#else + rte_ring_mp_enqueue_bulk(mp->ring, cache->objs, + cache_size); +#endif + cache_len = 0; + } + + /* determine how many objects we can add in cache */ + if (likely(n <= cache_size - cache_len)) + cache_add_count = n; + else + cache_add_count = cache_size - cache_len; + + /* add in cache while there is enough room */ + while (cache_add_count > 0) { + cache_objs[cache_len] = *obj_table; + obj_table++; + cache_len++; + n--; + cache_add_count--; + } + + cache->len = cache_len; + + /* no more object to add, return */ + if (likely(n == 0)) + return; + + ring_enqueue: +#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ + + /* push remaining objects in ring */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (is_mp) { + if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0) + rte_panic("cannot put objects in mempool\n"); + } + else { + if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0) + rte_panic("cannot put objects in mempool\n"); + } +#else + if (is_mp) + rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n); + else + rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n); +#endif +} + + +/** + * Put several objects back in the mempool (multi-producers safe). + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the mempool from the obj_table. + */ +static inline void +rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n) +{ + __mempool_check_cookies(mp, obj_table, n, 0); + __mempool_put_bulk(mp, obj_table, n, 1); +} + +/** + * Put several objects back in the mempool (NOT multi-producers safe). + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the mempool from obj_table. + */ +static inline void +rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n) +{ + __mempool_check_cookies(mp, obj_table, n, 0); + __mempool_put_bulk(mp, obj_table, n, 0); +} + +/** + * Put several objects back in the mempool. + * + * This function calls the multi-producer or the single-producer + * version depending on the default behavior that was specified at + * mempool creation time (see flags). + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the mempool from obj_table. + */ +static inline void +rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, + unsigned n) +{ + __mempool_check_cookies(mp, obj_table, n, 0); + __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT)); +} + +/** + * Put one object in the mempool (multi-producers safe). + * + * @param mp + * A pointer to the mempool structure. + * @param obj + * A pointer to the object to be added. + */ +static inline void +rte_mempool_mp_put(struct rte_mempool *mp, void *obj) +{ + rte_mempool_mp_put_bulk(mp, &obj, 1); +} + +/** + * Put one object back in the mempool (NOT multi-producers safe). + * + * @param mp + * A pointer to the mempool structure. + * @param obj + * A pointer to the object to be added. + */ +static inline void +rte_mempool_sp_put(struct rte_mempool *mp, void *obj) +{ + rte_mempool_sp_put_bulk(mp, &obj, 1); +} + +/** + * Put one object back in the mempool. + * + * This function calls the multi-producer or the single-producer + * version depending on the default behavior that was specified at + * mempool creation time (see flags). + * + * @param mp + * A pointer to the mempool structure. + * @param obj + * A pointer to the object to be added. + */ +static inline void +rte_mempool_put(struct rte_mempool *mp, void *obj) +{ + rte_mempool_put_bulk(mp, &obj, 1); +} + +/** + * @internal Get several objects from the mempool; used internally. + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to get, must be strictly positive. + * @param is_mc + * Mono-consumer (0) or multi-consumers (1). + * @return + * - >=0: Success; number of objects supplied. + * - <0: Error; code of ring dequeue function. + */ +static inline int +__mempool_get_bulk(struct rte_mempool *mp, void **obj_table, + unsigned n, int is_mc) +{ + int ret; +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + unsigned n_orig = n; +#endif +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + struct rte_mempool_cache *cache; + uint32_t cache_len, cache_len_save = 0; + void **cache_objs; + unsigned lcore_id = rte_lcore_id(); + uint32_t cache_size = mp->cache_size; + uint32_t cache_del_count; + + cache = &mp->local_cache[lcore_id]; + + /* cache is not enabled or single consumer */ + if (unlikely(cache_size == 0 || is_mc == 0)) + goto ring_dequeue; + + cache_len = cache->len; + cache_objs = cache->objs; + + /* cache is empty and we need many objects: dequeue from ring */ + if (unlikely(cache_len == 0 && n >= cache_size)) + goto ring_dequeue; + + /* cache is empty and we dequeue few objects: fill the cache first */ + if (unlikely(cache_len == 0 && n < cache_size)) { + ret = rte_ring_mc_dequeue_bulk(mp->ring, cache_objs, + cache_size); + if (unlikely(ret < 0)) { + __MEMPOOL_STAT_ADD(mp, get_fail, n_orig); + return ret; + } + + cache_len = cache_size; + } + + if (likely(n <= cache_len)) + cache_del_count = n; + else + cache_del_count = cache_len; + + cache_len_save = cache_len; + + /* add in cache only while there is enough room */ + while (cache_del_count > 0) { + cache_len--; + *obj_table = cache_objs[cache_len]; + obj_table++; + n--; + cache_del_count--; + } + + cache->len = cache_len; + + /* no more object to get, return */ + if (likely(n == 0)) { + __MEMPOOL_STAT_ADD(mp, get_success, n_orig); + return 0; + } + + ring_dequeue: +#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */ + + /* get remaining objects from ring */ + if (is_mc) + ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n); + else + ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n); + +#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 + /* + * bad luck, the ring is empty but we already dequeued some + * entries from cache, we have to restore them + */ + if (unlikely(ret < 0 && cache_len_save != 0)) + cache->len = cache_len_save; +#endif + + if (ret < 0) + __MEMPOOL_STAT_ADD(mp, get_fail, n_orig); + else + __MEMPOOL_STAT_ADD(mp, get_success, n_orig); + + return ret; +} + +/** + * Get several objects from the mempool (multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to get from mempool to obj_table. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int +rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + int ret; + ret = __mempool_get_bulk(mp, obj_table, n, 1); + if (ret == 0) + __mempool_check_cookies(mp, obj_table, n, 1); + return ret; +} + +/** + * Get several objects from the mempool (NOT multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to get from the mempool to obj_table. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is + * retrieved. + */ +static inline int +rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + int ret; + ret = __mempool_get_bulk(mp, obj_table, n, 0); + if (ret == 0) + __mempool_check_cookies(mp, obj_table, n, 1); + return ret; +} + +/** + * Get several objects from the mempool. + * + * This function calls the multi-consumers or the single-consumer + * version, depending on the default behaviour that was specified at + * mempool creation time (see flags). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to get from the mempool to obj_table. + * @return + * - 0: Success; objects taken + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int +rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n) +{ + int ret; + ret = __mempool_get_bulk(mp, obj_table, n, + !(mp->flags & MEMPOOL_F_SC_GET)); + if (ret == 0) + __mempool_check_cookies(mp, obj_table, n, 1); + return ret; +} + +/** + * Get one object from the mempool (multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int +rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p) +{ + return rte_mempool_mc_get_bulk(mp, obj_p, 1); +} + +/** + * Get one object from the mempool (NOT multi-consumers safe). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int +rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p) +{ + return rte_mempool_sc_get_bulk(mp, obj_p, 1); +} + +/** + * Get one object from the mempool. + * + * This function calls the multi-consumers or the single-consumer + * version, depending on the default behavior that was specified at + * mempool creation (see flags). + * + * If cache is enabled, objects will be retrieved first from cache, + * subsequently from the common pool. Note that it can return -ENOENT when + * the local cache and common pool are empty, even if cache from other + * lcores are full. + * + * @param mp + * A pointer to the mempool structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success; objects taken. + * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + */ +static inline int +rte_mempool_get(struct rte_mempool *mp, void **obj_p) +{ + return rte_mempool_get_bulk(mp, obj_p, 1); +} + +/** + * Return the number of entries in the mempool. + * + * When cache is enabled, this function has to browse the length of + * all lcores, so it should not be used in a data path, but only for + * debug purposes. + * + * @param mp + * A pointer to the mempool structure. + * @return + * The number of entries in the mempool. + */ +unsigned rte_mempool_count(const struct rte_mempool *mp); + +/** + * Return the number of free entries in the mempool. + * + * When cache is enabled, this function has to browse the length of + * all lcores, so it should not be used in a data path, but only for + * debug purposes. + * + * @param mp + * A pointer to the mempool structure. + * @return + * The number of free entries in the mempool. + */ +static inline unsigned +rte_mempool_free_count(const struct rte_mempool *mp) +{ + return mp->size - rte_mempool_count(mp); +} + +/** + * Test if the mempool is full. + * + * When cache is enabled, this function has to browse the length of all + * lcores, so it should not be used in a data path, but only for debug + * purposes. + * + * @param mp + * A pointer to the mempool structure. + * @return + * - 1: The mempool is full. + * - 0: The mempool is not full. + */ +static inline int +rte_mempool_full(const struct rte_mempool *mp) +{ + return !!(rte_mempool_count(mp) == mp->size); +} + +/** + * Test if the mempool is empty. + * + * When cache is enabled, this function has to browse the length of all + * lcores, so it should not be used in a data path, but only for debug + * purposes. + * + * @param mp + * A pointer to the mempool structure. + * @return + * - 1: The mempool is empty. + * - 0: The mempool is not empty. + */ +static inline int +rte_mempool_empty(const struct rte_mempool *mp) +{ + return !!(rte_mempool_count(mp) == 0); +} + +/** + * Return the physical address of elt, which is an element of the pool mp. + * + * @param mp + * A pointer to the mempool structure. + * @param elt + * A pointer (virtual address) to the element of the pool. + * @return + * The physical address of the elt element. + */ +static inline phys_addr_t rte_mempool_virt2phy(const struct rte_mempool *mp, + const void *elt) +{ + uintptr_t off; + + off = (const char *)elt - (const char *)mp; + return mp->phys_addr + off; +} + + +/** + * Check the consistency of mempool objects. + * + * Verify the coherency of fields in the mempool structure. Also check + * that the cookies of mempool objects (even the ones that are not + * present in pool) have a correct value. If not, a panic will occur. + * + * @param mp + * A pointer to the mempool structure. + */ +void rte_mempool_audit(const struct rte_mempool *mp); + +/** + * Return a pointer to the private data in an mempool structure. + * + * @param mp + * A pointer to the mempool structure. + * @return + * A pointer to the private data. + */ +static inline void *rte_mempool_get_priv(struct rte_mempool *mp) +{ + return (char *)mp + sizeof(struct rte_mempool); +} + +/** + * Dump the status of all mempools on the console + */ +void rte_mempool_list_dump(void); + +/** + * Search a mempool from its name + * + * @param name + * The name of the mempool. + * @return + * The pointer to the mempool matching the name, or NULL if not found.NULL on error + * with rte_errno set appropriately. Possible rte_errno values include: + * - ENOENT - required entry not available to return. + * + */ +struct rte_mempool *rte_mempool_lookup(const char *name); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_MEMPOOL_H_ */ diff --git a/lib/librte_net/Makefile b/lib/librte_net/Makefile new file mode 100644 index 0000000000..230af6cc17 --- /dev/null +++ b/lib/librte_net/Makefile @@ -0,0 +1,42 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 + +# install includes +SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include := rte_ip.h rte_tcp.h rte_udp.h rte_sctp.h + + +include $(RTE_SDK)/mk/rte.install.mk diff --git a/lib/librte_net/rte_ip.h b/lib/librte_net/rte_ip.h new file mode 100644 index 0000000000..2689397cf5 --- /dev/null +++ b/lib/librte_net/rte_ip.h @@ -0,0 +1,255 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.h 8.3 (Berkeley) 1/3/94 + * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $ + */ + +#ifndef _RTE_IP_H_ +#define _RTE_IP_H_ + +/** + * @file + * + * IP-related defines + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * IPv4 Header + */ +struct ipv4_hdr { + uint8_t version_ihl; /**< version and header length */ + uint8_t type_of_service; /**< type of service */ + uint16_t total_length; /**< length of packet */ + uint16_t packet_id; /**< packet ID */ + uint16_t fragment_offset; /**< fragmentation offset */ + uint8_t time_to_live; /**< time to live */ + uint8_t next_proto_id; /**< protocol ID */ + uint16_t hdr_checksum; /**< header checksum */ + uint32_t src_addr; /**< source address */ + uint32_t dst_addr; /**< destination address */ +} __attribute__((__packed__)); + +/** Create IPv4 address */ +#define IPv4(a,b,c,d) ((uint32_t)(((a) & 0xff) << 24) | \ + (((b) & 0xff) << 16) | \ + (((c) & 0xff) << 8) | \ + ((d) & 0xff)) + +/* IPv4 protocols */ +#define IPPROTO_IP 0 /**< dummy for IP */ +#define IPPROTO_HOPOPTS 0 /**< IP6 hop-by-hop options */ +#define IPPROTO_ICMP 1 /**< control message protocol */ +#define IPPROTO_IGMP 2 /**< group mgmt protocol */ +#define IPPROTO_GGP 3 /**< gateway^2 (deprecated) */ +#define IPPROTO_IPV4 4 /**< IPv4 encapsulation */ +#define IPPROTO_TCP 6 /**< tcp */ +#define IPPROTO_ST 7 /**< Stream protocol II */ +#define IPPROTO_EGP 8 /**< exterior gateway protocol */ +#define IPPROTO_PIGP 9 /**< private interior gateway */ +#define IPPROTO_RCCMON 10 /**< BBN RCC Monitoring */ +#define IPPROTO_NVPII 11 /**< network voice protocol*/ +#define IPPROTO_PUP 12 /**< pup */ +#define IPPROTO_ARGUS 13 /**< Argus */ +#define IPPROTO_EMCON 14 /**< EMCON */ +#define IPPROTO_XNET 15 /**< Cross Net Debugger */ +#define IPPROTO_CHAOS 16 /**< Chaos*/ +#define IPPROTO_UDP 17 /**< user datagram protocol */ +#define IPPROTO_MUX 18 /**< Multiplexing */ +#define IPPROTO_MEAS 19 /**< DCN Measurement Subsystems */ +#define IPPROTO_HMP 20 /**< Host Monitoring */ +#define IPPROTO_PRM 21 /**< Packet Radio Measurement */ +#define IPPROTO_IDP 22 /**< xns idp */ +#define IPPROTO_TRUNK1 23 /**< Trunk-1 */ +#define IPPROTO_TRUNK2 24 /**< Trunk-2 */ +#define IPPROTO_LEAF1 25 /**< Leaf-1 */ +#define IPPROTO_LEAF2 26 /**< Leaf-2 */ +#define IPPROTO_RDP 27 /**< Reliable Data */ +#define IPPROTO_IRTP 28 /**< Reliable Transaction */ +#define IPPROTO_TP 29 /**< tp-4 w/ class negotiation */ +#define IPPROTO_BLT 30 /**< Bulk Data Transfer */ +#define IPPROTO_NSP 31 /**< Network Services */ +#define IPPROTO_INP 32 /**< Merit Internodal */ +#define IPPROTO_SEP 33 /**< Sequential Exchange */ +#define IPPROTO_3PC 34 /**< Third Party Connect */ +#define IPPROTO_IDPR 35 /**< InterDomain Policy Routing */ +#define IPPROTO_XTP 36 /**< XTP */ +#define IPPROTO_DDP 37 /**< Datagram Delivery */ +#define IPPROTO_CMTP 38 /**< Control Message Transport */ +#define IPPROTO_TPXX 39 /**< TP++ Transport */ +#define IPPROTO_IL 40 /**< IL transport protocol */ +#define IPPROTO_IPV6 41 /**< IP6 header */ +#define IPPROTO_SDRP 42 /**< Source Demand Routing */ +#define IPPROTO_ROUTING 43 /**< IP6 routing header */ +#define IPPROTO_FRAGMENT 44 /**< IP6 fragmentation header */ +#define IPPROTO_IDRP 45 /**< InterDomain Routing*/ +#define IPPROTO_RSVP 46 /**< resource reservation */ +#define IPPROTO_GRE 47 /**< General Routing Encap. */ +#define IPPROTO_MHRP 48 /**< Mobile Host Routing */ +#define IPPROTO_BHA 49 /**< BHA */ +#define IPPROTO_ESP 50 /**< IP6 Encap Sec. Payload */ +#define IPPROTO_AH 51 /**< IP6 Auth Header */ +#define IPPROTO_INLSP 52 /**< Integ. Net Layer Security */ +#define IPPROTO_SWIPE 53 /**< IP with encryption */ +#define IPPROTO_NHRP 54 /**< Next Hop Resolution */ +/* 55-57: Unassigned */ +#define IPPROTO_ICMPV6 58 /**< ICMP6 */ +#define IPPROTO_NONE 59 /**< IP6 no next header */ +#define IPPROTO_DSTOPTS 60 /**< IP6 destination option */ +#define IPPROTO_AHIP 61 /**< any host internal protocol */ +#define IPPROTO_CFTP 62 /**< CFTP */ +#define IPPROTO_HELLO 63 /**< "hello" routing protocol */ +#define IPPROTO_SATEXPAK 64 /**< SATNET/Backroom EXPAK */ +#define IPPROTO_KRYPTOLAN 65 /**< Kryptolan */ +#define IPPROTO_RVD 66 /**< Remote Virtual Disk */ +#define IPPROTO_IPPC 67 /**< Pluribus Packet Core */ +#define IPPROTO_ADFS 68 /**< Any distributed FS */ +#define IPPROTO_SATMON 69 /**< Satnet Monitoring */ +#define IPPROTO_VISA 70 /**< VISA Protocol */ +#define IPPROTO_IPCV 71 /**< Packet Core Utility */ +#define IPPROTO_CPNX 72 /**< Comp. Prot. Net. Executive */ +#define IPPROTO_CPHB 73 /**< Comp. Prot. HeartBeat */ +#define IPPROTO_WSN 74 /**< Wang Span Network */ +#define IPPROTO_PVP 75 /**< Packet Video Protocol */ +#define IPPROTO_BRSATMON 76 /**< BackRoom SATNET Monitoring */ +#define IPPROTO_ND 77 /**< Sun net disk proto (temp.) */ +#define IPPROTO_WBMON 78 /**< WIDEBAND Monitoring */ +#define IPPROTO_WBEXPAK 79 /**< WIDEBAND EXPAK */ +#define IPPROTO_EON 80 /**< ISO cnlp */ +#define IPPROTO_VMTP 81 /**< VMTP */ +#define IPPROTO_SVMTP 82 /**< Secure VMTP */ +#define IPPROTO_VINES 83 /**< Banyon VINES */ +#define IPPROTO_TTP 84 /**< TTP */ +#define IPPROTO_IGP 85 /**< NSFNET-IGP */ +#define IPPROTO_DGP 86 /**< dissimilar gateway prot. */ +#define IPPROTO_TCF 87 /**< TCF */ +#define IPPROTO_IGRP 88 /**< Cisco/GXS IGRP */ +#define IPPROTO_OSPFIGP 89 /**< OSPFIGP */ +#define IPPROTO_SRPC 90 /**< Strite RPC protocol */ +#define IPPROTO_LARP 91 /**< Locus Address Resoloution */ +#define IPPROTO_MTP 92 /**< Multicast Transport */ +#define IPPROTO_AX25 93 /**< AX.25 Frames */ +#define IPPROTO_IPEIP 94 /**< IP encapsulated in IP */ +#define IPPROTO_MICP 95 /**< Mobile Int.ing control */ +#define IPPROTO_SCCSP 96 /**< Semaphore Comm. security */ +#define IPPROTO_ETHERIP 97 /**< Ethernet IP encapsulation */ +#define IPPROTO_ENCAP 98 /**< encapsulation header */ +#define IPPROTO_APES 99 /**< any private encr. scheme */ +#define IPPROTO_GMTP 100 /**< GMTP */ +#define IPPROTO_IPCOMP 108 /**< payload compression (IPComp) */ +/* 101-254: Partly Unassigned */ +#define IPPROTO_PIM 103 /**< Protocol Independent Mcast */ +#define IPPROTO_PGM 113 /**< PGM */ +#define IPPROTO_SCTP 132 /**< Stream Control Transport Protocol */ +/* 255: Reserved */ +/* BSD Private, local use, namespace incursion */ +#define IPPROTO_DIVERT 254 /**< divert pseudo-protocol */ +#define IPPROTO_RAW 255 /**< raw IP packet */ +#define IPPROTO_MAX 256 /**< maximum protocol number */ + +/* + * IPv4 address types + */ +#define IPV4_ANY ((uint32_t)0x00000000) /**< 0.0.0.0 */ +#define IPV4_LOOPBACK ((uint32_t)0x7f000001) /**< 127.0.0.1 */ +#define IPV4_BROADCAST ((uint32_t)0xe0000000) /**< 224.0.0.0 */ +#define IPV4_ALLHOSTS_GROUP ((uint32_t)0xe0000001) /**< 224.0.0.1 */ +#define IPV4_ALLRTRS_GROUP ((uint32_t)0xe0000002) /**< 224.0.0.2 */ +#define IPV4_MAX_LOCAL_GROUP ((uint32_t)0xe00000ff) /**< 224.0.0.255 */ + +/* + * IPv4 Multicast-related macros + */ +#define IPV4_MIN_MCAST IPv4(224, 0, 0, 0) /**< Minimal IPv4-multicast address */ +#define IPV4_MAX_MCAST IPv4(239, 255, 255, 255) /**< Maximum IPv4 multicast address */ + +#define IS_IPV4_MCAST(x) \ + ((x) >= IPV4_MIN_MCAST && (x) <= IPV4_MAX_MCAST) /**< check if IPv4 address is multicast */ + +/** + * IPv6 Header + */ +struct ipv6_hdr { + uint32_t vtc_flow; /**< IP version, traffic class & flow label. */ + uint16_t payload_len; /**< IP packet length - includes sizeof(ip_header). */ + uint8_t proto; /**< Protocol, next header. */ + uint8_t hop_limits; /**< Hop limits. */ + uint8_t src_addr[16]; /**< IP address of source host. */ + uint8_t dst_addr[16]; /**< IP address of destination host(s). */ +} __attribute__((__packed__)); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_IP_H_ */ diff --git a/lib/librte_net/rte_sctp.h b/lib/librte_net/rte_sctp.h new file mode 100644 index 0000000000..da7b562099 --- /dev/null +++ b/lib/librte_net/rte_sctp.h @@ -0,0 +1,101 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.h 8.3 (Berkeley) 1/3/94 + * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $ + */ + +/** + * @file + * + * SCTP-related defines + */ + +#ifndef _RTE_SCTP_H_ +#define _RTE_SCTP_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * SCTP Header + */ +struct sctp_hdr { + uint16_t src_port; /**< Source port. */ + uint16_t dst_port; /**< Destin port. */ + uint32_t tag; /**< Validation tag. */ + uint32_t cksum; /**< Checksum. */ +} __attribute__((__packed__)); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_SCTP_H_ */ diff --git a/lib/librte_net/rte_tcp.h b/lib/librte_net/rte_tcp.h new file mode 100644 index 0000000000..25bd105ea7 --- /dev/null +++ b/lib/librte_net/rte_tcp.h @@ -0,0 +1,106 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.h 8.3 (Berkeley) 1/3/94 + * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $ + */ + +#ifndef _RTE_TCP_H_ +#define _RTE_TCP_H_ + +/** + * @file + * + * TCP-related defines + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * TCP Header + */ +struct tcp_hdr { + uint16_t src_port; /**< TCP source port. */ + uint16_t dst_port; /**< TCP destination port. */ + uint32_t sent_seq; /**< TX data sequence number. */ + uint32_t recv_ack; /**< RX data acknowledgement sequence number. */ + uint8_t data_off; /**< Data offset. */ + uint8_t tcp_flags; /**< TCP flags */ + uint16_t rx_win; /**< RX flow control window. */ + uint16_t cksum; /**< TCP checksum. */ + uint16_t tcp_urp; /**< TCP urgent pointer, if any. */ +} __attribute__((__packed__)); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_TCP_H_ */ diff --git a/lib/librte_net/rte_udp.h b/lib/librte_net/rte_udp.h new file mode 100644 index 0000000000..1da163f603 --- /dev/null +++ b/lib/librte_net/rte_udp.h @@ -0,0 +1,101 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Copyright (c) 1982, 1986, 1990, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in.h 8.3 (Berkeley) 1/3/94 + * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $ + */ + +#ifndef _RTE_UDP_H_ +#define _RTE_UDP_H_ + +/** + * @file + * + * UDP-related defines + */ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * UDP Header + */ +struct udp_hdr { + uint16_t src_port; /**< UDP source port. */ + uint16_t dst_port; /**< UDP destination port. */ + uint16_t dgram_len; /**< UDP datagram length */ + uint16_t dgram_cksum; /**< UDP datagram checksum */ +} __attribute__((__packed__)); + +#ifdef __cplusplus +} +#endif + +#endif /* RTE_UDP_H_ */ diff --git a/lib/librte_pmd_igb/Makefile b/lib/librte_pmd_igb/Makefile new file mode 100644 index 0000000000..127f466646 --- /dev/null +++ b/lib/librte_pmd_igb/Makefile @@ -0,0 +1,64 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_igb.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_nvm.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_manage.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_mac.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_82575.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_api.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_osdep.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += e1000_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += e1000_ethdev.c + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_net lib/librte_malloc + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_pmd_igb/e1000_ethdev.c b/lib/librte_pmd_igb/e1000_ethdev.c new file mode 100644 index 0000000000..a984428552 --- /dev/null +++ b/lib/librte_pmd_igb/e1000_ethdev.c @@ -0,0 +1,1319 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "igb/e1000_api.h" +#include "igb/e1000_hw.h" +#include "e1000_ethdev.h" + +static int eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, + uint16_t nb_tx_q); +static int eth_igb_start(struct rte_eth_dev *dev); +static void eth_igb_stop(struct rte_eth_dev *dev); +static void eth_igb_close(struct rte_eth_dev *dev); +static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev); +static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev); +static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev); +static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev); +static int eth_igb_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static void eth_igb_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *rte_stats); +static void eth_igb_stats_reset(struct rte_eth_dev *dev); +static void eth_igb_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static int eth_igb_interrupt_setup(struct rte_eth_dev *dev); +static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev); +static int eth_igb_interrupt_action(struct rte_eth_dev *dev); +static void eth_igb_interrupt_handler(struct rte_intr_handle *handle, + void *param); +static int igb_hardware_init(struct e1000_hw *hw); +static void igb_hw_control_acquire(struct e1000_hw *hw); +static void igb_hw_control_release(struct e1000_hw *hw); +static void igb_init_manageability(struct e1000_hw *hw); +static void igb_release_manageability(struct e1000_hw *hw); +static void igb_vlan_hw_support_enable(struct rte_eth_dev *dev); +static void igb_vlan_hw_support_disable(struct rte_eth_dev *dev); +static void eth_igb_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, + int on); +static int eth_igb_led_on(struct rte_eth_dev *dev); +static int eth_igb_led_off(struct rte_eth_dev *dev); + +static void igb_intr_disable(struct e1000_hw *hw); +static int igb_get_rx_buffer_size(struct e1000_hw *hw); +static void eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index); + +#define IGB_FC_PAUSE_TIME 0x0680 +#define IGB_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */ +#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */ + +static enum e1000_fc_mode igb_fc_setting = e1000_fc_full; + +/* + * The set of PCI devices this driver supports + */ +static struct rte_pci_id pci_id_igb_map[] = { + +#undef RTE_LIBRTE_IXGBE_PMD +#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{.device_id = 0}, +}; + +static struct eth_dev_ops eth_igb_ops = { + .dev_configure = eth_igb_configure, + .dev_start = eth_igb_start, + .dev_stop = eth_igb_stop, + .dev_close = eth_igb_close, + .promiscuous_enable = eth_igb_promiscuous_enable, + .promiscuous_disable = eth_igb_promiscuous_disable, + .allmulticast_enable = eth_igb_allmulticast_enable, + .allmulticast_disable = eth_igb_allmulticast_disable, + .link_update = eth_igb_link_update, + .stats_get = eth_igb_stats_get, + .stats_reset = eth_igb_stats_reset, + .dev_infos_get = eth_igb_infos_get, + .vlan_filter_set = eth_igb_vlan_filter_set, + .rx_queue_setup = eth_igb_rx_queue_setup, + .tx_queue_setup = eth_igb_tx_queue_setup, + .dev_led_on = eth_igb_led_on, + .dev_led_off = eth_igb_led_off, + .flow_ctrl_set = eth_igb_flow_ctrl_set, + .mac_addr_add = eth_igb_rar_set, + .mac_addr_remove = eth_igb_rar_clear, +}; + +/** + * Atomically reads the link status information from global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static void +igb_identify_hardware(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + hw->vendor_id = dev->pci_dev->id.vendor_id; + hw->device_id = dev->pci_dev->id.device_id; + hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id; + hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id; + + e1000_set_mac_type(hw); + + /* need to check if it is a vf device below */ +} + +static int +eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, + struct rte_eth_dev *eth_dev) +{ + int error = 0; + struct rte_pci_device *pci_dev; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + + pci_dev = eth_dev->pci_dev; + eth_dev->dev_ops = ð_igb_ops; + eth_dev->rx_pkt_burst = ð_igb_recv_pkts; + eth_dev->tx_pkt_burst = ð_igb_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = ð_igb_recv_scattered_pkts; + return 0; + } + + hw->hw_addr= (void *)pci_dev->mem_resource.addr; + + igb_identify_hardware(eth_dev); + + if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) { + error = -EIO; + goto err_late; + } + + e1000_get_bus_info(hw); + + hw->mac.autoneg = 1; + hw->phy.autoneg_wait_to_complete = 0; + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + + /* Copper options */ + if (hw->phy.media_type == e1000_media_type_copper) { + hw->phy.mdix = 0; /* AUTO_ALL_MODES */ + hw->phy.disable_polarity_correction = 0; + hw->phy.ms_type = e1000_ms_hw_default; + } + + /* + * Start from a known state, this is important in reading the nvm + * and mac from that. + */ + e1000_reset_hw(hw); + + /* Make sure we have a good EEPROM before we read from it */ + if (e1000_validate_nvm_checksum(hw) < 0) { + /* + * Some PCI-E parts fail the first check due to + * the link being in sleep state, call it again, + * if it fails a second time its a real issue. + */ + if (e1000_validate_nvm_checksum(hw) < 0) { + PMD_INIT_LOG(ERR, "EEPROM checksum invalid"); + error = -EIO; + goto err_late; + } + } + + /* Read the permanent MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw) != 0) { + PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address"); + error = -EIO; + goto err_late; + } + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("e1000", + ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to " + "store MAC addresses", + ETHER_ADDR_LEN * hw->mac.rar_entry_count); + error = -ENOMEM; + goto err_late; + } + + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *)hw->mac.addr, ð_dev->data->mac_addrs[0]); + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* Now initialize the hardware */ + if (igb_hardware_init(hw) != 0) { + PMD_INIT_LOG(ERR, "Hardware initialization failed"); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; + error = -ENODEV; + goto err_late; + } + hw->mac.get_link_status = 1; + + /* Indicate SOL/IDER usage */ + if (e1000_check_reset_block(hw) < 0) { + PMD_INIT_LOG(ERR, "PHY reset is blocked due to" + "SOL/IDER session"); + } + + PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(&(pci_dev->intr_handle), + eth_igb_interrupt_handler, (void *)eth_dev); + + return 0; + +err_late: + igb_hw_control_release(hw); + + return (error); +} + +static struct eth_driver rte_igb_pmd = { + { + .name = "rte_igb_pmd", + .id_table = pci_id_igb_map, + .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, + }, + .eth_dev_init = eth_igb_dev_init, + .dev_private_size = sizeof(struct e1000_adapter), +}; + +int +rte_igb_pmd_init(void) +{ + rte_eth_driver_register(&rte_igb_pmd); + return 0; +} + +static int +eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + int diag; + + PMD_INIT_LOG(DEBUG, ">>"); + + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + + /* Allocate the array of pointers to RX structures */ + diag = igb_dev_rx_queue_alloc(dev, nb_rx_q); + if (diag != 0) { + PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u" + " pointers to RX queues failed", + dev->data->port_id, nb_rx_q); + return diag; + } + + /* Allocate the array of pointers to TX structures */ + diag = igb_dev_tx_queue_alloc(dev, nb_tx_q); + if (diag != 0) { + PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u" + " pointers to TX queues failed", + dev->data->port_id, nb_tx_q); + + return diag; + } + + PMD_INIT_LOG(DEBUG, "<<"); + + return (0); +} + +static int +eth_igb_start(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret, i; + + PMD_INIT_LOG(DEBUG, ">>"); + + igb_intr_disable(hw); + + /* Power up the phy. Needed to make the link go Up */ + e1000_power_up_phy(hw); + + /* + * Packet Buffer Allocation (PBA) + * Writing PBA sets the receive portion of the buffer + * the remainder is used for the transmit buffer. + */ + if (hw->mac.type == e1000_82575) { + uint32_t pba; + + pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ + E1000_WRITE_REG(hw, E1000_PBA, pba); + } + + /* Put the address into the Receive Address Array */ + e1000_rar_set(hw, hw->mac.addr, 0); + + /* Initialize the hardware */ + if (igb_hardware_init(hw)) { + PMD_INIT_LOG(ERR, "Unable to initialize the hardware"); + return (-1); + } + + E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + + /* Configure for OS presence */ + igb_init_manageability(hw); + + eth_igb_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + ret = eth_igb_rx_init(dev); + if (ret) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware"); + return ret; + } + + e1000_clear_hw_cntrs_base_generic(hw); + + /* + * If VLAN filtering is enabled, set up VLAN tag offload and filtering + * and restore the VFTA. + */ + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + igb_vlan_hw_support_enable(dev); + else + igb_vlan_hw_support_disable(dev); + + /* + * Configure the Interrupt Moderation register (EITR) with the maximum + * possible value (0xFFFF) to minimize "System Partial Write" issued by + * spurious [DMA] memory updates of RX and TX ring descriptors. + * + * With a EITR granularity of 2 microseconds in the 82576, only 7/8 + * spurious memory updates per second should be expected. + * ((65535 * 2) / 1000.1000 ~= 0.131 second). + * + * Because interrupts are not used at all, the MSI-X is not activated + * and interrupt moderation is controlled by EITR[0]. + * + * Note that having [almost] disabled memory updates of RX and TX ring + * descriptors through the Interrupt Moderation mechanism, memory + * updates of ring descriptors are now moderated by the configurable + * value of Write-Back Threshold registers. + */ + if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) || + (hw->mac.type == e1000_i350)) { + uint32_t ivar; + + /* Enable all RX & TX queues in the IVAR registers */ + ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID); + for (i = 0; i < 8; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar); + + /* Configure EITR with the maximum possible value (0xFFFF) */ + E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF); + } + + /* Don't reset the phy next time init gets called */ + hw->phy.reset_disable = 1; + + /* Setup link speed and duplex */ + switch (dev->data->dev_conf.link_speed) { + case ETH_LINK_SPEED_AUTONEG: + if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX; + else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX; + else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX; + else + goto error_invalid_config; + break; + case ETH_LINK_SPEED_10: + if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_10_SPEED; + else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX) + hw->phy.autoneg_advertised = ADVERTISE_10_HALF; + else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX) + hw->phy.autoneg_advertised = ADVERTISE_10_FULL; + else + goto error_invalid_config; + break; + case ETH_LINK_SPEED_100: + if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) + hw->phy.autoneg_advertised = E1000_ALL_100_SPEED; + else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX) + hw->phy.autoneg_advertised = ADVERTISE_100_HALF; + else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX) + hw->phy.autoneg_advertised = ADVERTISE_100_FULL; + else + goto error_invalid_config; + break; + case ETH_LINK_SPEED_1000: + if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) || + (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)) + hw->phy.autoneg_advertised = ADVERTISE_1000_FULL; + else + goto error_invalid_config; + break; + case ETH_LINK_SPEED_10000: + default: + goto error_invalid_config; + } + e1000_setup_link(hw); + + PMD_INIT_LOG(DEBUG, "<<"); + + /* check if lsc interrupt feature is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) + return eth_igb_interrupt_setup(dev); + + return (0); + +error_invalid_config: + PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n", + dev->data->dev_conf.link_speed, + dev->data->dev_conf.link_duplex, dev->data->port_id); + return -1; +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC. + * + **********************************************************************/ +static void +eth_igb_stop(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + + igb_intr_disable(hw); + e1000_reset_hw(hw); + E1000_WRITE_REG(hw, E1000_WUC, 0); + + /* Power down the phy. Needed to make the link go Down */ + e1000_power_down_phy(hw); + + igb_dev_clear_queues(dev); + + /* clear the recorded link status */ + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_write_link_status(dev, &link); +} + +static void +eth_igb_close(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link; + + eth_igb_stop(dev); + e1000_phy_hw_reset(hw); + igb_release_manageability(hw); + igb_hw_control_release(hw); + + igb_dev_clear_queues(dev); + + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_write_link_status(dev, &link); +} + +static int +igb_get_rx_buffer_size(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + if (hw->mac.type == e1000_82576) { + rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10; + } else if (hw->mac.type == e1000_82580) { + /* PBS needs to be translated according to a lookup table */ + rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf); + rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size); + rx_buf_size = (rx_buf_size << 10); + } else { + rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10; + } + + return rx_buf_size; +} + +/********************************************************************* + * + * Initialize the hardware + * + **********************************************************************/ +static int +igb_hardware_init(struct e1000_hw *hw) +{ + uint32_t rx_buf_size; + int diag; + + /* Let the firmware know the OS is in control */ + igb_hw_control_acquire(hw); + + /* + * These parameters control the automatic generation (Tx) and + * response (Rx) to Ethernet PAUSE frames. + * - High water mark should allow for at least two standard size (1518) + * frames to be received after sending an XOFF. + * - Low water mark works best when it is very near the high water mark. + * This allows the receiver to restart by sending XON when it has + * drained a bit. Here we use an arbitary value of 1500 which will + * restart after one full frame is pulled from the buffer. There + * could be several smaller frames in the buffer and if so they will + * not trigger the XON until their total number reduces the buffer + * by 1500. + * - The pause time is fairly large at 1000 x 512ns = 512 usec. + */ + rx_buf_size = igb_get_rx_buffer_size(hw); + + hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2); + hw->fc.low_water = hw->fc.high_water - 1500; + hw->fc.pause_time = IGB_FC_PAUSE_TIME; + hw->fc.send_xon = 1; + + /* Set Flow control, use the tunable location if sane */ + if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4)) + hw->fc.requested_mode = igb_fc_setting; + else + hw->fc.requested_mode = e1000_fc_none; + + /* Issue a global reset */ + e1000_reset_hw(hw); + E1000_WRITE_REG(hw, E1000_WUC, 0); + + diag = e1000_init_hw(hw); + if (diag < 0) + return (diag); + + E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN); + e1000_get_phy_info(hw); + e1000_check_for_link(hw); + + return (0); +} + +/* This function is based on igb_update_stats_counters() in igb/if_igb.c */ +static void +eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_hw_stats *stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + int pause_frames; + + if(hw->phy.media_type == e1000_media_type_copper || + (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + stats->symerrs += + E1000_READ_REG(hw,E1000_SYMERRS); + stats->sec += E1000_READ_REG(hw, E1000_SEC); + } + + stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); + stats->mpc += E1000_READ_REG(hw, E1000_MPC); + stats->scc += E1000_READ_REG(hw, E1000_SCC); + stats->ecol += E1000_READ_REG(hw, E1000_ECOL); + + stats->mcc += E1000_READ_REG(hw, E1000_MCC); + stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); + stats->colc += E1000_READ_REG(hw, E1000_COLC); + stats->dc += E1000_READ_REG(hw, E1000_DC); + stats->rlec += E1000_READ_REG(hw, E1000_RLEC); + stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); + stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); + /* + ** For watchdog management we need to know if we have been + ** paused during the last interval, so capture that here. + */ + pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC); + stats->xoffrxc += pause_frames; + stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); + stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); + stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); + stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); + stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); + stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); + stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); + stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); + stats->gprc += E1000_READ_REG(hw, E1000_GPRC); + stats->bprc += E1000_READ_REG(hw, E1000_BPRC); + stats->mprc += E1000_READ_REG(hw, E1000_MPRC); + stats->gptc += E1000_READ_REG(hw, E1000_GPTC); + + /* For the 64-bit byte counters the low dword must be read first. */ + /* Both registers clear on the read of the high dword */ + + stats->gorc += E1000_READ_REG(hw, E1000_GORCL); + stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32); + stats->gotc += E1000_READ_REG(hw, E1000_GOTCL); + stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32); + + stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); + stats->ruc += E1000_READ_REG(hw, E1000_RUC); + stats->rfc += E1000_READ_REG(hw, E1000_RFC); + stats->roc += E1000_READ_REG(hw, E1000_ROC); + stats->rjc += E1000_READ_REG(hw, E1000_RJC); + + stats->tor += E1000_READ_REG(hw, E1000_TORH); + stats->tot += E1000_READ_REG(hw, E1000_TOTH); + + stats->tpr += E1000_READ_REG(hw, E1000_TPR); + stats->tpt += E1000_READ_REG(hw, E1000_TPT); + stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); + stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); + stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); + stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); + stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); + stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); + stats->mptc += E1000_READ_REG(hw, E1000_MPTC); + stats->bptc += E1000_READ_REG(hw, E1000_BPTC); + + /* Interrupt Counts */ + + stats->iac += E1000_READ_REG(hw, E1000_IAC); + stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); + stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); + stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); + stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); + stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); + stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); + stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); + + /* Host to Card Statistics */ + + stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); + stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); + stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); + stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); + stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); + stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); + stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); + stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL); + stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32); + stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL); + stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32); + stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); + stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); + stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); + + stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); + stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); + stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); + stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); + stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); + + if (rte_stats == NULL) + return; + + /* Rx Errors */ + rte_stats->ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc + + stats->ruc + stats->roc + stats->mpc + stats->cexterr; + + /* Tx Errors */ + rte_stats->oerrors = stats->ecol + stats->latecol; + + rte_stats->ipackets = stats->gprc; + rte_stats->opackets = stats->gptc; + rte_stats->ibytes = stats->gorc; + rte_stats->obytes = stats->gotc; +} + +static void +eth_igb_stats_reset(struct rte_eth_dev *dev) +{ + struct e1000_hw_stats *hw_stats = + E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + eth_igb_stats_get(dev, NULL); + + /* Reset software totals */ + memset(hw_stats, 0, sizeof(*hw_stats)); +} + +static void +eth_igb_infos_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */ + dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */ + dev_info->max_mac_addrs = hw->mac.rar_entry_count; + + switch (hw->mac.type) { + case e1000_82575: + dev_info->max_rx_queues = 4; + dev_info->max_tx_queues = 4; + break; + + case e1000_82576: + dev_info->max_rx_queues = 16; + dev_info->max_tx_queues = 16; + break; + + case e1000_82580: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + break; + + case e1000_i350: + dev_info->max_rx_queues = 8; + dev_info->max_tx_queues = 8; + break; + + default: + /* Should not happen */ + dev_info->max_rx_queues = 0; + dev_info->max_tx_queues = 0; + } +} + +/* return 0 means link status changed, -1 means not changed */ +static int +eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link, old; + int link_check, count; + + link_check = 0; + hw->mac.get_link_status = 1; + + /* possible wait-to-complete in up to 9 seconds */ + for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) { + /* Read the real link status */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + /* Do the work to read phy */ + e1000_check_for_link(hw); + link_check = !hw->mac.get_link_status; + break; + + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_check = (E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LU); + break; + + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_check = hw->mac.serdes_has_link; + break; + + default: + case e1000_media_type_unknown: + break; + } + if (link_check || wait_to_complete == 0) + break; + rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL); + } + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_read_link_status(dev, &link); + old = link; + + /* Now we check if a transition has happened */ + if (link_check) { + hw->mac.ops.get_link_up_info(hw, &link.link_speed, + &link.link_duplex); + link.link_status = 1; + } else if (!link_check) { + link.link_speed = 0; + link.link_duplex = 0; + link.link_status = 0; + } + rte_igb_dev_atomic_write_link_status(dev, &link); + + /* not changed */ + if (old.link_status == link.link_status) + return -1; + + /* changed */ + return 0; +} + +/* + * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means + * that the driver is loaded. + */ +static void +igb_hw_control_acquire(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + + /* Let firmware know the driver has taken over */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +/* + * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + */ +static void +igb_hw_control_release(struct e1000_hw *hw) +{ + uint32_t ctrl_ext; + + /* Let firmware taken over control of h/w */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +/* + * Bit of a misnomer, what this really means is + * to enable OS management of the system... aka + * to disable special hardware management features. + */ +static void +igb_init_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H); + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h |= 1 << 5; /* Mng Port 623 */ + manc2h |= 1 << 6; /* Mng Port 664 */ + E1000_WRITE_REG(hw, E1000_MANC2H, manc2h); + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static void +igb_release_manageability(struct e1000_hw *hw) +{ + if (e1000_enable_mng_pass_thru(hw)) { + uint32_t manc = E1000_READ_REG(hw, E1000_MANC); + + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + E1000_WRITE_REG(hw, E1000_MANC, manc); + } +} + +static void +eth_igb_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_igb_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_UPE); + if (dev->data->all_multicast == 1) + rctl |= E1000_RCTL_MPE; + else + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_igb_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl |= E1000_RCTL_MPE; + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_igb_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t rctl; + + if (dev->data->promiscuous == 1) + return; /* must remain in all_multicast mode */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + rctl &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); +} + +static void +eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK); + vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK)); + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; +} + +static void +igb_vlan_hw_support_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_vfta * shadow_vfta = + E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t reg; + int i; + + /* VLAN Mode Enable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + + /* Filter Table Enable */ + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); + + /* Update maximum frame size */ + reg = E1000_READ_REG(hw, E1000_RLPML); + reg += VLAN_TAG_SIZE; + E1000_WRITE_REG(hw, E1000_RLPML, reg); + + /* restore VFTA table */ + for (i = 0; i < E1000_VFTA_SIZE; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]); +} + +static void +igb_vlan_hw_support_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t reg; + + /* VLAN Mode disable */ + reg = E1000_READ_REG(hw, E1000_CTRL); + reg &= ~E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); +} + +static void +igb_intr_disable(struct e1000_hw *hw) +{ + E1000_WRITE_REG(hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(hw); +} + +/** + * It enables the interrupt mask and then enable the interrupt. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_interrupt_setup(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC); + E1000_WRITE_FLUSH(hw); + rte_intr_enable(&(dev->pci_dev->intr_handle)); + + return 0; +} + +/* + * It reads ICR and gets interrupt causes, check it and set a bit flag + * to update link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t icr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + /* read-on-clear nic registers here */ + icr = E1000_READ_REG(hw, E1000_ICR); + if (icr & E1000_ICR_LSC) { + intr->flags |= E1000_FLAG_NEED_LINK_UPDATE; + } + + return 0; +} + +/* + * It executes link_update after knowing an interrupt is prsent. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +eth_igb_interrupt_action(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + uint32_t tctl, rctl; + struct rte_eth_link link; + int ret; + + if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE)) + return -1; + + intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE; + rte_intr_enable(&(dev->pci_dev->intr_handle)); + + /* set get_link_status to check register later */ + hw->mac.get_link_status = 1; + ret = eth_igb_link_update(dev, 0); + + /* check if link has changed */ + if (ret < 0) + return 0; + + memset(&link, 0, sizeof(link)); + rte_igb_dev_atomic_read_link_status(dev, &link); + if (link.link_status) { + PMD_INIT_LOG(INFO, + " Port %d: Link Up - speed %u Mbps - %s\n", + dev->data->port_id, (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down\n", + dev->data->port_id); + } + PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", + dev->pci_dev->addr.domain, + dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, + dev->pci_dev->addr.function); + tctl = E1000_READ_REG(hw, E1000_TCTL); + rctl = E1000_READ_REG(hw, E1000_RCTL); + if (link.link_status) { + /* enable Tx/Rx */ + tctl |= E1000_TCTL_EN; + rctl |= E1000_RCTL_EN; + } else { + /* disable Tx/Rx */ + tctl &= ~E1000_TCTL_EN; + rctl &= ~E1000_RCTL_EN; + } + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + return 0; +} + +/** + * Interrupt handler which shall be registered at first. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +eth_igb_interrupt_handler(struct rte_intr_handle *handle, void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_igb_interrupt_get_status(dev); + eth_igb_interrupt_action(dev); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); +} + +static int +eth_igb_led_on(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP); +} + +static int +eth_igb_led_off(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP); +} + +static int +eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct e1000_hw *hw; + int err; + enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = { + e1000_fc_none, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full + }; + uint32_t rx_buf_size; + uint32_t max_high_water; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rx_buf_size = igb_get_rx_buffer_size(hw); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + + /* At least reserve one Ethernet frame for watermark */ + max_high_water = rx_buf_size - ETHER_MAX_LEN; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n"); + PMD_INIT_LOG(ERR, "high water must <= 0x%x \n", max_high_water); + return (-EINVAL); + } + + hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water = fc_conf->high_water; + hw->fc.low_water = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + + err = e1000_setup_link_generic(hw); + if (err == E1000_SUCCESS) { + return 0; + } + + PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err); + return (-EIO); +} + +static void +eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, __rte_unused uint32_t pool) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + e1000_rar_set(hw, mac_addr->addr_bytes, index); +} + +static void +eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index) +{ + uint8_t addr[ETHER_ADDR_LEN]; + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(addr, 0, sizeof(addr)); + + e1000_rar_set(hw, addr, index); +} diff --git a/lib/librte_pmd_igb/e1000_ethdev.h b/lib/librte_pmd_igb/e1000_ethdev.h new file mode 100644 index 0000000000..201866b110 --- /dev/null +++ b/lib/librte_pmd_igb/e1000_ethdev.h @@ -0,0 +1,117 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _E1000_ETHDEV_H_ +#define _E1000_ETHDEV_H_ + +/* need update link, bit flag */ +#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) + +/* + * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD + * driver. + */ +#define E1000_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */ +#define E1000_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */ +#define E1000_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE of Reserved */ +#define E1000_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ +#define E1000_RXD_ERR_CKSUM_BIT 29 +#define E1000_RXD_ERR_CKSUM_MSK 3 +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ + +#define E1000_VFTA_SIZE 128 + +/* structure for interrupt relative data */ +struct e1000_interrupt { + uint32_t flags; +}; + +/* local vfta copy */ +struct e1000_vfta { + uint32_t vfta[E1000_VFTA_SIZE]; +}; + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct e1000_adapter { + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_interrupt intr; + struct e1000_vfta shadow_vfta; +}; + +#define E1000_DEV_PRIVATE_TO_HW(adapter) \ + (&((struct e1000_adapter *)adapter)->hw) + +#define E1000_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct e1000_adapter *)adapter)->stats) + +#define E1000_DEV_PRIVATE_TO_INTR(adapter) \ + (&((struct e1000_adapter *)adapter)->intr) + +#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \ + (&((struct e1000_adapter *)adapter)->shadow_vfta) + +/* + * RX/TX function prototypes + */ +int igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues); +int igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues); +void igb_dev_clear_queues(struct rte_eth_dev *dev); + +int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +int eth_igb_rx_init(struct rte_eth_dev *dev); + +void eth_igb_tx_init(struct rte_eth_dev *dev); + +uint16_t eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +#endif /* _E1000_ETHDEV_H_ */ diff --git a/lib/librte_pmd_igb/e1000_logs.h b/lib/librte_pmd_igb/e1000_logs.h new file mode 100644 index 0000000000..e0c50b5f9d --- /dev/null +++ b/lib/librte_pmd_igb/e1000_logs.h @@ -0,0 +1,74 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _E1000_LOGS_H_ +#define _E1000_LOGS_H_ + +#ifdef RTE_LIBRTE_IGB_DEBUG_INIT +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_INIT_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IGB_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IGB_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IGB_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IGB_DEBUG_DRIVER +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while(0) +#endif + +#endif /* _E1000_LOGS_H_ */ diff --git a/lib/librte_pmd_igb/e1000_rxtx.c b/lib/librte_pmd_igb/e1000_rxtx.c new file mode 100644 index 0000000000..a891d12efe --- /dev/null +++ b/lib/librte_pmd_igb/e1000_rxtx.c @@ -0,0 +1,1859 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000_logs.h" +#include "igb/e1000_api.h" +#include "e1000_ethdev.h" + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0); + return (m); +} + +#define RTE_MBUF_DATA_DMA_ADDR(mb) \ + (uint64_t) ((mb)->buf_physaddr + \ + (uint64_t) ((char *)((mb)->pkt.data) - \ + (char *)(mb)->buf_addr)) + +#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ + (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct igb_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct igb_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct igb_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + struct igb_rx_entry *sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ +}; + +/** + * Hardware context number + */ +enum igb_advctx_num { + IGB_CTX_0 = 0, /**< CTX0 */ + IGB_CTX_1 = 1, /**< CTX1 */ + IGB_CTX_NUM = 2, /**< CTX NUM */ +}; + +/** + * Strucutre to check if new context need be built + */ +struct igb_advctx_info { + uint16_t flags; /**< ol_flags related to context build. */ + uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */ + uint32_t vlan_macip_lens; /**< vlan, mac.ip length. */ +}; + +/** + * Structure associated with each TX queue. + */ +struct igb_tx_queue { + volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */ + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */ + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint32_t txd_type; /**< Device-specific TXD type */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< Current value of TDT register. */ + uint16_t tx_head; /**< Index of first used TX descriptor. */ + uint16_t queue_id; /**< TX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold register. */ + uint32_t ctx_curr; /**< Current used hardware descriptor. */ + uint32_t ctx_start;/**< Start context position for transmit queue. */ + struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/ +}; + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +#define rte_igb_prefetch(p) rte_prefetch0(p) +#else +#define rte_igb_prefetch(p) do {} while(0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +/********************************************************************* + * + * TX function + * + **********************************************************************/ + +/* + * Advanced context descriptor are almost same between igb/ixgbe + * This is a separate function, looking for optimization opportunity here + * Rework required to go with the pre-defined values. + */ + +static inline void +igbe_set_xmit_ctx(struct igb_tx_queue* txq, + volatile struct e1000_adv_tx_context_desc *ctx_txd, + uint16_t ol_flags, uint32_t vlan_macip_lens) +{ + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx; + uint32_t ctx_idx, ctx_curr; + uint32_t cmp_mask; + + ctx_curr = txq->ctx_curr; + ctx_idx = ctx_curr + txq->ctx_start; + + cmp_mask = 0; + type_tucmd_mlhl = 0; + + if (ol_flags & PKT_TX_VLAN_PKT) { + cmp_mask |= TX_VLAN_CMP_MASK; + } + + if (ol_flags & PKT_TX_IP_CKSUM) { + type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4; + cmp_mask |= TX_MAC_LEN_CMP_MASK; + } + + /* Specify which HW CTX to upload. */ + mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT); + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + case PKT_TX_TCP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + case PKT_TX_SCTP_CKSUM: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + default: + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV | + E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT; + break; + } + + txq->ctx_cache[ctx_curr].flags = ol_flags; + txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask; + txq->ctx_cache[ctx_curr].vlan_macip_lens = vlan_macip_lens & cmp_mask; + + ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); + ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); + ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); + ctx_txd->seqnum_seed = 0; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_advctx_update(struct igb_tx_queue *txq, uint16_t flags, + uint32_t vlan_macip_lens) +{ + /* If match with the current context */ + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens == + (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) { + return txq->ctx_curr; + } + + /* If match with the second context */ + txq->ctx_curr ^= 1; + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens == + (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) { + return txq->ctx_curr; + } + + /* Mismatch, use the previous context */ + return (IGB_CTX_NUM); +} + +static inline uint32_t +tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags) +{ + static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM}; + static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM}; + uint32_t tmp; + + tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; + tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + return tmp; +} + +static inline uint32_t +tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags) +{ + static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE}; + return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0]; +} + +uint16_t +eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct igb_tx_entry *sw_ring; + struct igb_tx_entry *txe, *txn; + volatile union e1000_adv_tx_desc *txr; + volatile union e1000_adv_tx_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t olinfo_status; + uint32_t cmd_type_len; + uint32_t pkt_len; + uint16_t slen; + uint16_t ol_flags; + uint16_t tx_end; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint16_t tx_ol_req; + uint32_t new_ctx; + uint32_t ctx; + uint32_t vlan_macip_lens; + + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt.pkt_len; + + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the VLAN Tag Identifier, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1); + + ol_flags = tx_pkt->ol_flags; + vlan_macip_lens = (tx_pkt->pkt.vlan_tci << 16) | (tx_pkt->pkt.l2_len << E1000_ADVTXD_MACLEN_SHIFT) | tx_pkt->pkt.l3_len; + tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK); + + /* If a Context Descriptor need be built . */ + if (tx_ol_req) { + ctx = what_advctx_update(txq, tx_ol_req,vlan_macip_lens); + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == IGB_CTX_NUM); + ctx = txq->ctx_curr; + tx_last = (uint16_t) (tx_last + new_ctx); + } + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u\n", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Check if there are enough free descriptors in the TX ring + * to transmit the next packet. + * This operation is based on the two following rules: + * + * 1- Only check that the last needed TX descriptor can be + * allocated (by construction, if that descriptor is free, + * all intermediate ones are also free). + * + * For this purpose, the index of the last TX descriptor + * used for a packet (the "last descriptor" of a packet) + * is recorded in the TX entries (the last one included) + * that are associated with all TX descriptors allocated + * for that packet. + * + * 2- Avoid to allocate the last free TX descriptor of the + * ring, in order to never set the TDT register with the + * same value stored in parallel by the NIC in the TDH + * register, which makes the TX engine of the NIC enter + * in a deadlock situation. + * + * By extension, avoid to allocate a free descriptor that + * belongs to the last set of free descriptors allocated + * to the same packet previously transmitted. + */ + + /* + * The "last descriptor" of the previously sent packet, if any, + * which used the last descriptor to allocate. + */ + tx_end = sw_ring[tx_last].last_id; + + /* + * The next descriptor following that "last descriptor" in the + * ring. + */ + tx_end = sw_ring[tx_end].next_id; + + /* + * The "last descriptor" associated with that next descriptor. + */ + tx_end = sw_ring[tx_end].last_id; + + /* + * Check that this descriptor is free. + */ + if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) { + if (nb_tx == 0) + return (0); + goto end_of_tx; + } + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - E1000_ADVTXD_DTYP_DATA + * - E1000_ADVTXD_DCMD_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - E1000_ADVTXD_DCMD_IFCS + * - E1000_ADVTXD_MAC_1588 + * - E1000_ADVTXD_DCMD_VLE + * + * The following bits must only be set in the last Data + * Descriptor: + * - E1000_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - E1000_TXD_CMD_RS + */ + cmd_type_len = txq->txd_type | + E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; + olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT); +#if defined(RTE_LIBRTE_IEEE1588) + if (ol_flags & PKT_TX_IEEE1588_TMST) + cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP; +#endif + if (tx_ol_req) { + /* Setup TX Advanced context descriptor if required */ + if (new_ctx) { + volatile struct e1000_adv_tx_context_desc * + ctx_txd; + + ctx_txd = (volatile struct + e1000_adv_tx_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + vlan_macip_lens); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* Setup the TX Advanced Data Descriptor */ + cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags); + olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags); + olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT); + } + + m_seg = tx_pkt; + do { + txn = &sw_ring[txe->next_id]; + txd = &txr[tx_id]; + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up transmit descriptor. + */ + slen = (uint16_t) m_seg->pkt.data_len; + buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + txd->read.buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->read.cmd_type_len = + rte_cpu_to_le_32(cmd_type_len | slen); + txd->read.olinfo_status = + rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->pkt.next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + * and Report Status (RS). + */ + txd->read.cmd_type_len |= + rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); + } + end_of_tx: + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT). + */ + E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + txq->tx_tail = tx_id; + + return (nb_tx); +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ +static inline uint16_t +rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) +{ + uint16_t pkt_flags; + + static uint16_t ip_pkt_types_map[16] = { + 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT, + PKT_RX_IPV6_HDR, 0, 0, 0, + PKT_RX_IPV6_HDR_EXT, 0, 0, 0, + PKT_RX_IPV6_HDR_EXT, 0, 0, 0, + }; + +#if defined(RTE_LIBRTE_IEEE1588) + static uint32_t ip_pkt_etqf_map[8] = { + 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, 0, + }; + + pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? + ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] : + ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; +#else + pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 : + ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]; +#endif + return pkt_flags | (uint16_t) (((hl_tp_rs & 0x0F) == 0) ? 0 : + PKT_RX_RSS_HASH); +} + +static inline uint16_t +rx_desc_status_to_pkt_flags(uint32_t rx_status) +{ + uint16_t pkt_flags; + + /* Check if VLAN present */ + pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + +#if defined(RTE_LIBRTE_IEEE1588) + if (rx_status & E1000_RXD_STAT_TMST) + pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST; +#endif + return pkt_flags; +} + +static inline uint16_t +rx_desc_error_to_pkt_flags(uint32_t rx_status) +{ + /* + * Bit 30: IPE, IPv4 checksum error + * Bit 29: L4I, L4I integrity error + */ + + static uint16_t error_to_pkt_flags_map[4] = { + 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD + }; + return error_to_pkt_flags_map[(rx_status >> + E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK]; +} + +uint16_t +eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile union e1000_adv_rx_desc *rx_ring; + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_entry *sw_ring; + struct igb_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union e1000_adv_rx_desc rxd; + uint64_t dma_addr; + uint32_t staterr; + uint32_t hlen_type_rss; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD))) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is + * likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x pkt_len=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u\n", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_igb_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_igb_prefetch(&rx_ring[rx_id]); + rte_igb_prefetch(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rxdp->read.hdr_addr = dma_addr; + rxdp->read.pkt_addr = dma_addr; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) - + rxq->crc_len); + rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch(rxm->pkt.data); + rxm->pkt.nb_segs = 1; + rxm->pkt.next = NULL; + rxm->pkt.pkt_len = pkt_len; + rxm->pkt.data_len = pkt_len; + rxm->pkt.in_port = rxq->port_id; + + rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss; + hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); + pkt_flags = (pkt_flags | + rx_desc_status_to_pkt_flags(staterr)); + pkt_flags = (pkt_flags | + rx_desc_error_to_pkt_flags(staterr)); + rxm->ol_flags = pkt_flags; + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return (nb_rx); +} + +uint16_t +eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile union e1000_adv_rx_desc *rx_ring; + volatile union e1000_adv_rx_desc *rxdp; + struct igb_rx_entry *sw_ring; + struct igb_rx_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union e1000_adv_rx_desc rxd; + uint64_t dma; /* Physical address of mbuf data buffer */ + uint32_t staterr; + uint32_t hlen_type_rss; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t data_len; + uint16_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + + /* + * Retrieve RX context of current packet, if any. + */ + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + + while (nb_rx < nb_pkts) { + next_desc: + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD))) + break; + rxd = *rxdp; + + /* + * Descriptor done. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u\n", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_igb_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_igb_prefetch(&rx_ring[rx_id]); + rte_igb_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rxdp->read.pkt_addr = dma; + rxdp->read.hdr_addr = dma; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.wb.upper.length); + rxm->pkt.data_len = data_len; + rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt.pkt_len = data_len; + first_seg->pkt.nb_segs = 1; + } else { + first_seg->pkt.pkt_len += data_len; + first_seg->pkt.nb_segs++; + last_seg->pkt.next = rxm; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (! (staterr & E1000_RXD_STAT_EOP)) { + last_seg = rxm; + goto next_desc; + } + + /* + * This is the last buffer of the received packet. + * If the CRC is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. + * If part of the CRC is also contained in the previous + * mbuf, subtract the length of that CRC part from the + * data length of the previous mbuf. + */ + rxm->pkt.next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt.pkt_len -= ETHER_CRC_LEN; + if (data_len <= ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->pkt.nb_segs--; + last_seg->pkt.data_len = (uint16_t) + (last_seg->pkt.data_len - + (ETHER_CRC_LEN - data_len)); + last_seg->pkt.next = NULL; + } else + rxm->pkt.data_len = + (uint16_t) (data_len - ETHER_CRC_LEN); + } + + /* + * Initialize the first mbuf of the returned packet: + * - RX port identifier, + * - hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + first_seg->pkt.in_port = rxq->port_id; + first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss; + + /* + * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is + * set in the pkt_flags field. + */ + first_seg->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); + pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr)); + pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr)); + first_seg->ol_flags = pkt_flags; + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch(first_seg->pkt.data); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * Save receive context. + */ + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return (nb_rx); +} + +/* + * Rings setup and release. + * + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. + * This will also optimize cache line size effect. + * H/W supports up to cache line size 128. + */ +#define IGB_ALIGN 128 + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring + * desscriptors should meet the following condition: + * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 + */ +#define IGB_MIN_RING_DESC 32 +#define IGB_MAX_RING_DESC 4096 + +static const struct rte_memzone * +ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, uint32_t ring_size, int socket_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + dev->driver->pci_drv.name, ring_name, + dev->data->port_id, queue_id); + mz = rte_memzone_lookup(z_name); + if (mz) + return mz; + + return rte_memzone_reserve_aligned(z_name, (uint64_t)ring_size, + socket_id, 0, IGB_ALIGN); +} + +static void +igb_tx_queue_release_mbufs(struct igb_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +igb_tx_queue_release(struct igb_tx_queue *txq) +{ + igb_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); +} + +int +igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + uint16_t i, old_nb_queues = dev->data->nb_tx_queues; + struct igb_tx_queue **txq; + + if (dev->data->tx_queues == NULL) { + dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", + sizeof(struct igb_tx_queue *) * nb_queues, + CACHE_LINE_SIZE); + if (dev->data->tx_queues == NULL) { + dev->data->nb_tx_queues = 0; + return -ENOMEM; + } + } else { + if (nb_queues < old_nb_queues) + for (i = nb_queues; i < old_nb_queues; i++) + igb_tx_queue_release(dev->data->tx_queues[i]); + + if (nb_queues != old_nb_queues) { + txq = rte_realloc(dev->data->tx_queues, + sizeof(struct igb_tx_queue *) * nb_queues, + CACHE_LINE_SIZE); + if (txq == NULL) + return -ENOMEM; + else + dev->data->tx_queues = txq; + if (nb_queues > old_nb_queues) + memset(&(txq[old_nb_queues]), 0, + sizeof(struct igb_tx_queue *) * + (nb_queues - old_nb_queues)); + } + } + dev->data->nb_tx_queues = nb_queues; + + return 0; +} + +static void +igb_reset_tx_queue_stat(struct igb_tx_queue *txq) +{ + txq->tx_head = 0; + txq->tx_tail = 0; + txq->ctx_curr = 0; + memset((void*)&txq->ctx_cache, 0, + IGB_CTX_NUM * sizeof(struct igb_advctx_info)); +} + +static void +igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev) +{ + struct igb_tx_entry *txe = txq->sw_ring; + uint32_t size; + uint16_t i, prev; + struct e1000_hw *hw; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc; + /* Zero out HW ring memory */ + for (i = 0; i < size; i++) { + ((volatile char *)txq->tx_ring)[i] = 0; + } + + /* Initialize ring entries */ + prev = txq->nb_tx_desc - 1; + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]); + + txd->wb.status = E1000_TXD_STAT_DD; + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->txd_type = E1000_ADVTXD_DTYP_DATA; + /* 82575 specific, each tx queue will use 2 hw contexts */ + if (hw->mac.type == e1000_82575) + txq->ctx_start = txq->queue_id * IGB_CTX_NUM; + + igb_reset_tx_queue_stat(txq); +} + +int +eth_igb_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct igb_tx_queue *txq; + struct e1000_hw *hw; + uint32_t size; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IGB_ALIGN. + */ + if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 || + (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) { + return -EINVAL; + } + + /* + * The tx_free_thresh and tx_rs_thresh values are not used in the 1G + * driver. + */ + if (tx_conf->tx_free_thresh != 0) + RTE_LOG(WARNING, PMD, + "The tx_free_thresh parameter is not " + "used for the 1G driver."); + if (tx_conf->tx_rs_thresh != 0) + RTE_LOG(WARNING, PMD, + "The tx_rs_thresh parameter is not " + "used for the 1G driver."); + if (tx_conf->tx_thresh.wthresh == 0) + RTE_LOG(WARNING, PMD, + "To improve 1G driver performance, consider setting " + "the TX WTHRESH value to 4, 8, or 16."); + + /* Free memory prior to re-allocation if needed */ + if (dev->data->tx_queues[queue_idx] != NULL) + igb_tx_queue_release(dev->data->tx_queues[queue_idx]); + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue), + CACHE_LINE_SIZE); + if (txq == NULL) + return (-ENOMEM); + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC; + tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, + size, socket_id); + if (tz == NULL) { + igb_tx_queue_release(txq); + return (-ENOMEM); + } + + txq->nb_tx_desc = nb_desc; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + + txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx)); + txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr; + txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr; + + size = sizeof(union e1000_adv_tx_desc) * nb_desc; + + /* Allocate software ring */ + txq->sw_ring = rte_zmalloc("txq->sw_ring", + sizeof(struct igb_tx_entry) * nb_desc, + CACHE_LINE_SIZE); + if (txq->sw_ring == NULL) { + igb_tx_queue_release(txq); + return (-ENOMEM); + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + igb_reset_tx_queue(txq, dev); + dev->tx_pkt_burst = eth_igb_xmit_pkts; + dev->data->tx_queues[queue_idx] = txq; + + return (0); +} + +static void +igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq) +{ + unsigned i; + + if (rxq->sw_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +igb_rx_queue_release(struct igb_rx_queue *rxq) +{ + igb_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); +} + +int +igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + uint16_t i, old_nb_queues = dev->data->nb_rx_queues; + struct igb_rx_queue **rxq; + + if (dev->data->rx_queues == NULL) { + dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", + sizeof(struct igb_rx_queue *) * nb_queues, + CACHE_LINE_SIZE); + if (dev->data->rx_queues == NULL) { + dev->data->nb_rx_queues = 0; + return -ENOMEM; + } + } else { + for (i = nb_queues; i < old_nb_queues; i++) { + igb_rx_queue_release(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + if (nb_queues != old_nb_queues) { + rxq = rte_realloc(dev->data->rx_queues, + sizeof(struct igb_rx_queue *) * nb_queues, + CACHE_LINE_SIZE); + if (rxq == NULL) + return -ENOMEM; + else + dev->data->rx_queues = rxq; + if (nb_queues > old_nb_queues) + memset(&(rxq[old_nb_queues]), 0, + sizeof(struct igb_rx_queue *) * + (nb_queues - old_nb_queues)); + } + } + dev->data->nb_rx_queues = nb_queues; + + return 0; +} + +static void +igb_reset_rx_queue(struct igb_rx_queue *rxq) +{ + unsigned size; + unsigned i; + + /* Zero out HW ring memory */ + size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc; + for (i = 0; i < size; i++) { + ((volatile char *)rxq->rx_ring)[i] = 0; + } + + rxq->rx_tail = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +int +eth_igb_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct igb_rx_queue *rxq; + struct e1000_hw *hw; + unsigned int size; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IGB_ALIGN. + */ + if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 || + (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) { + return (-EINVAL); + } + + /* Free memory prior to re-allocation if needed */ + if (dev->data->rx_queues[queue_idx] != NULL) { + igb_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* First allocate the RX queue data structure. */ + rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue), + CACHE_LINE_SIZE); + if (rxq == NULL) + return (-ENOMEM); + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->pthresh = rx_conf->rx_thresh.pthresh; + rxq->hthresh = rx_conf->rx_thresh.hthresh; + rxq->wthresh = rx_conf->rx_thresh.wthresh; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : + ETHER_CRC_LEN); + + /* + * Allocate RX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC; + rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id); + if (rz == NULL) { + igb_rx_queue_release(rxq); + return (-ENOMEM); + } + rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); + rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr; + rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr; + + /* Allocate software ring. */ + rxq->sw_ring = rte_zmalloc("rxq->sw_ring", + sizeof(struct igb_rx_entry) * nb_desc, + CACHE_LINE_SIZE); + if (rxq->sw_ring == NULL) { + igb_rx_queue_release(rxq); + return (-ENOMEM); + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + + dev->data->rx_queues[queue_idx] = rxq; + igb_reset_rx_queue(rxq); + + return 0; +} + +void +igb_dev_clear_queues(struct rte_eth_dev *dev) +{ + uint16_t i; + struct igb_tx_queue *txq; + struct igb_rx_queue *rxq; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + igb_tx_queue_release_mbufs(txq); + igb_reset_tx_queue(txq, dev); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + igb_rx_queue_release_mbufs(rxq); + igb_reset_rx_queue(rxq); + } +} + +/** + * Receive Side Scaling (RSS). + * See section 7.1.1.7 in the following document: + * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009 + * + * Principles: + * The source and destination IP addresses of the IP header and the source and + * destination ports of TCP/UDP headers, if any, of received packets are hashed + * against a configurable random key to compute a 32-bit RSS hash result. + * The seven (7) LSBs of the 32-bit hash result are used as an index into a + * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit + * RSS output index which is used as the RX queue index where to store the + * received packets. + * The following output is supplied in the RX write-back descriptor: + * - 32-bit result of the Microsoft RSS hash function, + * - 4-bit RSS type field. + */ + +/* + * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet. + * Used as the default key. + */ +static uint8_t rss_intel_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +static void +igb_rss_disable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + uint32_t mrqc; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc = E1000_READ_REG(hw, E1000_MRQC); + mrqc &= ~E1000_MRQC_ENABLE_MASK; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); +} + +static void +igb_rss_configure(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + uint8_t *hash_key; + uint32_t rss_key; + uint32_t mrqc; + uint32_t shift; + uint16_t rss_hf; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + if (rss_hf == 0) /* Disable RSS. */ { + igb_rss_disable(dev); + return; + } + hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; + if (hash_key == NULL) + hash_key = rss_intel_key; /* Default hash key. */ + + /* Fill in RSS hash key. */ + for (i = 0; i < 10; i++) { + rss_key = hash_key[(i * 4)]; + rss_key |= hash_key[(i * 4) + 1] << 8; + rss_key |= hash_key[(i * 4) + 2] << 16; + rss_key |= hash_key[(i * 4) + 3] << 24; + E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key); + } + + /* Fill in redirection table. */ + shift = (hw->mac.type == e1000_82575) ? 6 : 0; + for (i = 0; i < 128; i++) { + union e1000_reta { + uint32_t dword; + uint8_t bytes[4]; + } reta; + uint8_t q_idx; + + q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ? + i % dev->data->nb_rx_queues : 0); + reta.bytes[i & 3] = (uint8_t) (q_idx << shift); + if ((i & 3) == 3) + E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword); + } + + /* Set configured hashing functions in MRQC register. */ + mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */ + if (rss_hf & ETH_RSS_IPV4) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4; + if (rss_hf & ETH_RSS_IPV4_TCP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP; + if (rss_hf & ETH_RSS_IPV6) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6; + if (rss_hf & ETH_RSS_IPV6_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX; + if (rss_hf & ETH_RSS_IPV6_TCP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP; + if (rss_hf & ETH_RSS_IPV6_TCP_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX; + if (rss_hf & ETH_RSS_IPV4_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP_EX) + mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX; + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); +} + +/********************************************************************* + * + * Enable receive unit. + * + **********************************************************************/ + +static int +igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) +{ + struct igb_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned i; + + /* Initialize software ring entries. */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union e1000_adv_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed " + "queue_id=%hu\n", rxq->queue_id); + igb_rx_queue_release(rxq); + return (-ENOMEM); + } + dma_addr = + rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + rxd = &rxq->rx_ring[i]; + rxd->read.hdr_addr = dma_addr; + rxd->read.pkt_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +int +eth_igb_rx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_rx_queue *rxq; + struct rte_pktmbuf_pool_private *mbp_priv; + uint32_t rctl; + uint32_t rxcsum; + uint32_t srrctl; + uint16_t buf_size; + uint16_t rctl_bsize; + uint16_t i; + int ret; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + srrctl = 0; + + /* + * Make sure receives are disabled while setting + * up the descriptor ring. + */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + + /* + * Configure support of jumbo frames, if any. + */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + rctl |= E1000_RCTL_LPE; + + /* Set maximum packet length. */ + E1000_WRITE_REG(hw, E1000_RLPML, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + } else + rctl &= ~E1000_RCTL_LPE; + + /* Configure and enable each RX queue. */ + rctl_bsize = 0; + dev->rx_pkt_burst = eth_igb_recv_pkts; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint64_t bus_addr; + uint32_t rxdctl; + + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings and set up queue */ + ret = igb_alloc_rx_queue_mbufs(rxq); + if (ret) { + igb_dev_clear_queues(dev); + return ret; + } + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure + */ + rxq->crc_len = + (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ? + 0 : ETHER_CRC_LEN); + + bus_addr = rxq->rx_ring_phys_addr; + E1000_WRITE_REG(hw, E1000_RDLEN(i), + rxq->nb_rx_desc * + sizeof(union e1000_adv_rx_desc)); + E1000_WRITE_REG(hw, E1000_RDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr); + + srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* + * Configure RX buffer size. + */ + mbp_priv = (struct rte_pktmbuf_pool_private *) + ((char *)rxq->mb_pool + sizeof(struct rte_mempool)); + buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size - + RTE_PKTMBUF_HEADROOM); + if (buf_size >= 1024) { + /* + * Configure the BSIZEPACKET field of the SRRCTL + * register of the queue. + * Value is in 1 KB resolution, from 1 KB to 127 KB. + * If this field is equal to 0b, then RCTL.BSIZE + * determines the RX packet buffer size. + */ + srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) & + E1000_SRRCTL_BSIZEPKT_MASK); + buf_size = (uint16_t) ((srrctl & + E1000_SRRCTL_BSIZEPKT_MASK) << + E1000_SRRCTL_BSIZEPKT_SHIFT); + + if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){ + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + } else { + /* + * Use BSIZE field of the device RCTL register. + */ + if ((rctl_bsize == 0) || (rctl_bsize > buf_size)) + rctl_bsize = buf_size; + dev->rx_pkt_burst = eth_igb_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + + E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); + + /* Enable this RX queue. */ + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= (rxq->pthresh & 0x1F); + rxdctl |= ((rxq->hthresh & 0x1F) << 8); + rxdctl |= ((rxq->wthresh & 0x1F) << 16); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + } + + /* + * Setup BSIZE field of RCTL register, if needed. + * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL + * register, since the code above configures the SRRCTL register of + * the RX queue in such a case. + * All configurable sizes are: + * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX); + * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX); + * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX); + * 2048: rctl |= E1000_RCTL_SZ_2048; + * 1024: rctl |= E1000_RCTL_SZ_1024; + * 512: rctl |= E1000_RCTL_SZ_512; + * 256: rctl |= E1000_RCTL_SZ_256; + */ + if (rctl_bsize > 0) { + if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */ + rctl |= E1000_RCTL_SZ_512; + else /* 256 <= buf_size < 512 - use 256 */ + rctl |= E1000_RCTL_SZ_256; + } + + /* + * Configure RSS if device configured with multiple RX queues. + */ + if (dev->data->nb_rx_queues > 1) + igb_rss_configure(dev); + else + igb_rss_disable(dev); + + /* + * Setup the Checksum Register. + * Receive Full-Packet Checksum Offload is mutually exclusive with RSS. + */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + /* Enable both L3/L4 rx checksum offload */ + if (dev->data->dev_conf.rxmode.hw_ip_checksum) + rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); + else + rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* Setup the Receive Control Register. */ + if (dev->data->dev_conf.rxmode.hw_strip_crc) { + rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */ + + /* set STRCRC bit in all queues for Powerville */ + if (hw->mac.type == e1000_i350) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i)); + dvmolr |= E1000_DVMOLR_STRCRC; + E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr); + } + } + + } else { + rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */ + + /* clear STRCRC bit in all queues for Powerville */ + if (hw->mac.type == e1000_i350) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i)); + dvmolr &= ~E1000_DVMOLR_STRCRC; + E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr); + } + } + } + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Make sure VLAN Filters are off. */ + rctl &= ~E1000_RCTL_VFE; + /* Don't store bad packets. */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Receives. */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers. + * This needs to be done after enable. + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + E1000_WRITE_REG(hw, E1000_RDH(i), 0); + E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1); + } + + return 0; +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +void +eth_igb_tx_init(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw; + struct igb_tx_queue *txq; + uint32_t tctl; + uint32_t txdctl; + uint16_t i; + + hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + uint64_t bus_addr; + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + + E1000_WRITE_REG(hw, E1000_TDLEN(i), + txq->nb_tx_desc * + sizeof(union e1000_adv_tx_desc)); + E1000_WRITE_REG(hw, E1000_TDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers. */ + E1000_WRITE_REG(hw, E1000_TDT(i), 0); + E1000_WRITE_REG(hw, E1000_TDH(i), 0); + + /* Setup Transmit threshold registers. */ + txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i)); + txdctl |= txq->pthresh & 0x1F; + txdctl |= ((txq->hthresh & 0x1F) << 8); + txdctl |= ((txq->wthresh & 0x1F) << 16); + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + } + + /* Program the Transmit Control Register. */ + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + + e1000_config_collision_dist(hw); + + /* This write will effectively turn on the transmit unit. */ + E1000_WRITE_REG(hw, E1000_TCTL, tctl); +} + diff --git a/lib/librte_pmd_igb/igb/README b/lib/librte_pmd_igb/igb/README new file mode 100644 index 0000000000..5a5658e9be --- /dev/null +++ b/lib/librte_pmd_igb/igb/README @@ -0,0 +1,74 @@ +.. + BSD LICENSE + + Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + version: DPDK.L.1.2.3-3 + +Intel® IGB driver +================= + +This directory contains code from the Intel® Network Adapter Driver for 82575/6 +and 82580-based Gigabit Network Connections under FreeBSD, version 2.2.3, +dated 04/25/2011. This code is available from +`http://downloadmirror.intel.com/15815/eng/igb-2.2.3.tar.gz` + +This driver is valid for the product(s) listed below + +* Intel® 82575EB Gigabit Ethernet Controller +* Intel® 82576 Gigabit Ethernet Controller +* Intel® 82580EB Gigabit Ethernet Controller +* Intel® Ethernet Controller I350 +* Intel® Ethernet Server Adapter I340-F4 +* Intel® Ethernet Server Adapter I340-T4 +* Intel® Ethernet Server Adapter I350-F2 +* Intel® Ethernet Server Adapter I350-F4 +* Intel® Ethernet Server Adapter I350-T2 +* Intel® Ethernet Server Adapter I350-T4 +* Intel® Gigabit EF Dual Port Server Adapter +* Intel® Gigabit ET Dual Port Server Adapter +* Intel® Gigabit ET Quad Port Server Adapter +* Intel® Gigabit ET2 Quad Port Server Adapter +* Intel® Gigabit VT Quad Port Server Adapter + + +Updating driver +=============== + +The following modifications have been made to this code to integrate it with the +Intel® DPDK: + + +e1000_osdep.h and e1000_osdep.c +------------------------------- + +The OS dependency layer has been extensively modified to support the drivers in +the Intel® DPDK environment. It is expected that these files will not need to be +changed on updating the driver. diff --git a/lib/librte_pmd_igb/igb/e1000_82575.c b/lib/librte_pmd_igb/igb/e1000_82575.c new file mode 100644 index 0000000000..b2f1fca79e --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_82575.c @@ -0,0 +1,2429 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +/* + * 82575EB Gigabit Network Connection + * 82575EB Gigabit Backplane Connection + * 82575GB Gigabit Network Connection + * 82576 Gigabit Network Connection + * 82576 Quad Port Gigabit Mezzanine Adapter + */ + +#include "e1000_api.h" + +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); +static void e1000_release_phy_82575(struct e1000_hw *hw); +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); +static void e1000_release_nvm_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_82575(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_init_hw_82575(struct e1000_hw *hw); +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_reset_hw_82575(struct e1000_hw *hw); +static s32 e1000_reset_hw_82580(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 *data); +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 data); +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, + bool active); +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, + u32 offset, u16 data); +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static bool e1000_sgmii_active_82575(struct e1000_hw *hw); +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); +static void e1000_config_collision_dist_82575(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); +static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); + +static const u16 e1000_82580_rxpbs_table[] = + { 36, 72, 144, 1, 2, 4, 8, 16, + 35, 70, 140 }; +#define E1000_82580_RXPBS_TABLE_SIZE \ + (sizeof(e1000_82580_rxpbs_table)/sizeof(u16)) + + +/** + * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = FALSE; + + DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = E1000_READ_REG(hw, E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + reg = E1000_READ_REG(hw, E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; + + DEBUGFUNC("e1000_init_phy_params_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82575; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_phy_82575; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_82575; + phy->ops.release = e1000_release_phy_82575; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + if (e1000_sgmii_active_82575(hw)) { + phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + e1000_reset_mdicnfg_82580(hw); + + if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; + phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; + } else if (hw->mac.type >= e1000_82580) { + phy->ops.read_reg = e1000_read_phy_reg_82580; + phy->ops.write_reg = e1000_write_phy_reg_82580; + } else { + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + } + + /* Set phy->phy_addr and phy->id. */ + ret_val = e1000_get_phy_id_82575(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID || + phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + case IGP03E1000_E_PHY_ID: + case IGP04E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82575"); + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + nvm->word_size = 1 << size; + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_82575; + nvm->ops.release = e1000_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = e1000_read_nvm_eerd; + else + nvm->ops.read = e1000_read_nvm_spi; + + nvm->ops.write = e1000_write_nvm_spi; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_82575; + + /* override genric family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = e1000_validate_nvm_checksum_82580; + nvm->ops.update = e1000_update_nvm_checksum_82580; + break; + case e1000_i350: + nvm->ops.validate = e1000_validate_nvm_checksum_i350; + nvm->ops.update = e1000_update_nvm_checksum_i350; + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + u32 ctrl_ext = 0; + + DEBUGFUNC("e1000_init_mac_params_82575"); + + /* Set media type */ + /* + * The 82575 uses bits 22:23 for link mode. The mode can be changed + * based on the EEPROM. We cannot rely upon device ID. There + * is no distinguishable difference between fiber and internal + * SerDes mode on the 82575. There can be an external PHY attached + * on the SGMII interface. For this, we'll set sgmii_active to TRUE. + */ + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = FALSE; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + dev_spec->sgmii_active = TRUE; + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + default: + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + if (mac->type == e1000_i350) { + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + /* Enable EEE default settings for i350 */ + dev_spec->eee_disable = FALSE; + } + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = TRUE; + /* FWSM register */ + mac->has_fwsm = TRUE; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = + (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK) + ? TRUE : FALSE; + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = e1000_reset_hw_82580; + else + mac->ops.reset_hw = e1000_reset_hw_82575; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_82575; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82575 + : e1000_setup_serdes_link_82575; + /* physical interface shutdown */ + mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; + /* physical interface power up */ + mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_82575; + /* receive address register setting */ + mac->ops.rar_set = e1000_rar_set_generic; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82575; + /* configure collision distance */ + mac->ops.config_collision_dist = e1000_config_collision_dist_82575; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_82575; + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82575"); + + hw->mac.ops.init_params = e1000_init_mac_params_82575; + hw->nvm.ops.init_params = e1000_init_nvm_params_82575; + hw->phy.ops.init_params = e1000_init_phy_params_82575; + hw->mbx.ops.init_params = e1000_init_mbx_params_pf; +} + +/** + * e1000_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. + **/ +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_acquire_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return e1000_acquire_swfw_sync_82575(hw, mask); +} + +/** + * e1000_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +static void e1000_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_release_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + e1000_release_swfw_sync_82575(hw, mask); +} + +/** + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + DEBUGFUNC("e1000_get_phy_id_82575"); + + /* + * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!e1000_sgmii_active_82575(hw)) { + phy->addr = 1; + ret_val = e1000_get_phy_id(hw); + goto out; + } + + if (e1000_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = E1000_READ_REG(hw, E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + mdic = E1000_READ_REG(hw, E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + ret_val = e1000_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + E1000_WRITE_FLUSH(hw); + msec_delay(300); + + /* + * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == E1000_SUCCESS) { + DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", + phy_id, + phy->addr); + /* + * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + DEBUGOUT1("PHY address %u was unreadable\n", + phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + } else { + ret_val = e1000_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); + + /* + * This isn't a TRUE "hard" reset, but is the only reset + * available to us at this time. + */ + + DEBUGOUT("Soft resetting SGMII attached PHY...\n"); + + if (!(hw->phy.ops.write_reg)) + goto out; + + /* + * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.commit(hw); + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82575"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: TRUE to enable LPLU, FALSE to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + data |= E1000_82580_PM_SPD; + } else if (phy->smart_speed == e1000_smart_speed_off) { + data &= ~E1000_82580_PM_SPD; + } + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return ret_val; +} + +/** + * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + data |= E1000_82580_PM_SPD; + } else if (phy->smart_speed == e1000_smart_speed_off) { + data &= ~E1000_82580_PM_SPD; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return ret_val; +} + +/** + * e1000_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_82575"); + + ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + /* + * Check if there is some access + * error this access may hook on + */ + if (hw->mac.type == e1000_i350) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | + E1000_EECD_TIMEOUT)) { + /* Clear all access error flags */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_ERROR_CLR); + DEBUGOUT("Nvm bit banging access error" + " detected and cleared.\n"); + } + } + if (hw->mac.type == e1000_82580) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_BLOCKED) { + /* Clear access error flag */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_BLOCKED); + DEBUGOUT("Nvm bit banging access" + " error detected and cleared.\n"); + } + } + + ret_val = e1000_acquire_nvm_generic(hw); + if (ret_val) + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void e1000_release_nvm_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82575"); + + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_82575"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS); + /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) && + (hw->phy.type == e1000_phy_igp_3)) + e1000_phy_init_script_igp3(hw); + + return ret_val; +} + +/** + * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + + return ret_val; +} + +/** + * e1000_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 e1000_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + DEBUGFUNC("e1000_check_for_link_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* + * Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + } else { + ret_val = e1000_check_for_copper_link_generic(hw); + } + + return ret_val; +} + +/** + * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_power_up_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); +} + +/** + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs; + + DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + + /* Set up defaults for the return values of this function */ + mac->serdes_has_link = FALSE; + *speed = 0; + *duplex = 0; + + /* + * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + /* + * The link up bit determines when link is up on autoneg. The sync ok + * gets set once both sides sync up and agree upon link. Stable link + * can be determined by checking for both link up and link sync ok + */ + if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) { + mac->serdes_has_link = TRUE; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) { + *speed = SPEED_1000; + } else if (pcs & E1000_PCS_LSTS_SPEED_100) { + *speed = SPEED_100; + } else { + *speed = SPEED_10; + } + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) { + *duplex = FULL_DUPLEX; + } else { + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of serdes shut down sfp and PCS on driver unload + * when management pass thru is not enabled. + **/ +void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_shutdown_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + if (!e1000_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); + } + + return; +} + +/** + * e1000_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_82575"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) { + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + } + + /* set the completion timeout for interface */ + ret_val = e1000_set_pcie_completion_timeout(hw); + if (ret_val) { + DEBUGOUT("PCI-E Set completion timeout has failed.\n"); + } + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) + e1000_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82575"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address */ + e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + DEBUGOUT("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82575(hw); + + return ret_val; +} + +/** + * e1000_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_copper_link_82575"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + ret_val = e1000_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msec_delay(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_m88: + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID) + ret_val = e1000_copper_link_setup_m88_gen2(hw); + else + ret_val = e1000_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_3: + ret_val = e1000_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = e1000_copper_link_setup_82577(hw); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); +out: + return ret_val; +} + +/** + * e1000_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg; + bool pcs_autoneg; + + DEBUGFUNC("e1000_setup_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return E1000_SUCCESS; + + /* + * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + /* set both sw defined pins on 82575/82576*/ + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = TRUE; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = FALSE; + /* fall through to default case */ + default: + /* + * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + + /* + * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + /* + * We force flow control to prevent the CTRL register values from being + * overwritten by the autonegotiated flow control values + */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + if (!e1000_sgmii_active_82575(hw)) + e1000_force_mac_fc_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_valid_led_default_82575 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82575"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch(hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * e1000_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool e1000_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * e1000_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 e1000_reset_init_script_82575(struct e1000_hw* hw) +{ + DEBUGFUNC("e1000_reset_init_script_82575"); + + if (hw->mac.type == e1000_82575) { + DEBUGOUT("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_mac_addr_82575"); + + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_config_collision_dist_82575 - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +static void e1000_config_collision_dist_82575(struct e1000_hw *hw) +{ + u32 tctl_ext; + + DEBUGFUNC("e1000_config_collision_dist_82575"); + + tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); + + tctl_ext &= ~E1000_TCTL_EXT_COLD; + tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); + + E1000_READ_REG(hw, E1000_CBTMPC); + E1000_READ_REG(hw, E1000_HTDPMC); + E1000_READ_REG(hw, E1000_CBRMPC); + E1000_READ_REG(hw, E1000_RPTHC); + E1000_READ_REG(hw, E1000_HGPTC); + E1000_READ_REG(hw, E1000_HTCBDPC); + E1000_READ_REG(hw, E1000_HGORCL); + E1000_READ_REG(hw, E1000_HGORCH); + E1000_READ_REG(hw, E1000_HGOTCL); + E1000_READ_REG(hw, E1000_HGOTCH); + E1000_READ_REG(hw, E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) || + e1000_sgmii_active_82575(hw)) + E1000_READ_REG(hw, E1000_SCVPC); +} + +/** + * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After rx enable if managability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + DEBUGFUNC("e1000_rx_fifo_workaround_82575"); + if (hw->mac.type != e1000_82575 || + !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + msec_delay(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + DEBUGOUT("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = E1000_READ_REG(hw, E1000_RLPML); + E1000_WRITE_REG(hw, E1000_RLPML, 0); + + rctl = E1000_READ_REG(hw, E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + msec_delay(2); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + E1000_WRITE_REG(hw, E1000_RLPML, rlpml); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_MPC); +} + +/** + * e1000_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = E1000_READ_REG(hw, E1000_GCR); + s32 ret_val = E1000_SUCCESS; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + E1000_WRITE_REG(hw, E1000_GCR, gcr); + return ret_val; +} + +/** + * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) { + dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs */ + dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; + case e1000_i350: + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) { + dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + default: + break; + } +} + +/** + * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; + case e1000_i350: + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + + +} + +/** + * e1000_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); +} + +/** + * e1000_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 mdicnfg; + u16 nvm_data = 0; + + DEBUGFUNC("e1000_reset_mdicnfg_82580"); + + if (hw->mac.type != e1000_82580) + goto out; + if (!e1000_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * e1000_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 e1000_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + DEBUGFUNC("e1000_reset_hw_82580"); + + hw->dev_spec._82575.global_device_reset = FALSE; + + /* Get current control state. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && + e1000_acquire_swfw_sync_82575(hw, swmbsw_mask)) + global_device_reset = FALSE; + + if (global_device_reset && + !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Add delay to insure DEV_RST has time to complete */ + if (global_device_reset) + msec_delay(5); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) + e1000_reset_init_script_82575(hw); + + /* clear global device reset status bit */ + E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + ret_val = e1000_reset_mdicnfg_82580(hw); + if (ret_val) + DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + /* Release semaphore */ + if (global_device_reset) + e1000_release_swfw_sync_82575(hw, swmbsw_mask); + + return ret_val; +} + +/** + * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 e1000_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if chekcsums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum" + " compatibility bit.\n"); + goto out; + } + + if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Write Error while updating checksum" + " compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) { + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 ipcnfg, eeer, ctrl_ext; + + DEBUGFUNC("e1000_set_eee_i350"); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + if ((hw->mac.type != e1000_i350) || + (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK)) + goto out; + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); + eeer = E1000_READ_REG(hw, E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer |= (E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | + E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | + E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); + E1000_WRITE_REG(hw, E1000_EEER, eeer); + E1000_READ_REG(hw, E1000_IPCNFG); + E1000_READ_REG(hw, E1000_EEER); +out: + + return ret_val; +} diff --git a/lib/librte_pmd_igb/igb/e1000_82575.h b/lib/librte_pmd_igb/igb/e1000_82575.h new file mode 100644 index 0000000000..415756e4d6 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_82575.h @@ -0,0 +1,487 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +/* + * For 82576, there are an additional set of RARs that begin at an offset + * separate from the first set of RARs. + */ +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +#ifdef E1000_BIT_FIELDS +struct e1000_adv_data_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen :16; /* Data buffer length */ + u32 rsvd :4; + u32 dtyp :4; /* Descriptor type */ + u32 dcmd :8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status :4; /* Descriptor status */ + u32 idx :4; + u32 popts :6; /* Packet Options */ + u32 paylen :18; /* Payload length */ + } options; + } upper; +}; + +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +/* Extended Device Control */ +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ + +struct e1000_adv_context_desc { + union { + u32 ip_config; + struct { + u32 iplen :9; + u32 maclen :7; + u32 vlan_tag :16; + } fields; + } ip_setup; + u32 seq_num; + union { + u64 l4_config; + struct { + u32 mkrloc :9; + u32 tucmd :11; + u32 dtyp :4; + u32 adv :8; + u32 rsvd :4; + u32 idx :4; + u32 l4len :8; + u32 mss :16; + } fields; + } l4_setup; +}; +#endif + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define E1000_TX_HEAD_WB_ENABLE 0x1 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 + +#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT) +#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE + +#define EIMS_ENABLE_MASK ( \ + E1000_EIMS_RX_QUEUE | \ + E1000_EIMS_TX_QUEUE | \ + E1000_EIMS_TCP_TIMER | \ + E1000_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F +#define E1000_RXDADV_RSSTYPE_SHIFT 12 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 +#define E1000_RXDADV_SPH 0x8000 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define E1000_RXDADV_ERR_HBO 0x00800000 + +/* RSS Hash results */ +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* LinkSec results */ +/* Security Processing bit Indication */ +#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on packet */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp packet */ +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF +/* Adv ctxt IPSec ESP len mask */ +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ +#define E1000_TXDCTL_PRIORITY 0x08000000 + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ + +/* Additional interrupt register bit definitions */ +#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ +#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ +#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_IMM_INT (1 << 29) +#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_QUEUE_ENABLE (1 << 31) +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + */ +#define E1000_ETQF_FILTER_EAPOL 0 + +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 +#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 8 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_LLE_SHIFT 16 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ +#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ +#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_TCTL_EXT_COLD 0x000FFC00 +#define E1000_TCTL_EXT_COLD_SHIFT 10 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define ALL_QUEUES 0xFFFF + +/* Rx packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); + +enum e1000_promisc_type { + e1000_promisc_disabled = 0, /* all promisc modes disabled */ + e1000_promisc_unicast = 1, /* unicast promiscuous enabled */ + e1000_promisc_multicast = 2, /* multicast promiscuous enabled */ + e1000_promisc_enabled = 3, /* both uni and multicast promisc */ + e1000_num_promisc_types +}; + +void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type); +u16 e1000_rxpbs_adjust_82580(u32 data); +s32 e1000_set_eee_i350(struct e1000_hw *); +#endif /* _E1000_82575_H_ */ diff --git a/lib/librte_pmd_igb/igb/e1000_api.c b/lib/librte_pmd_igb/igb/e1000_api.c new file mode 100644 index 0000000000..fc41f732c8 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_api.c @@ -0,0 +1,1152 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "e1000_api.h" + +/** + * e1000_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mac_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mac.ops.init_params) { + ret_val = hw->mac.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_nvm_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->nvm.ops.init_params) { + ret_val = hw->nvm.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_phy_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->phy.ops.init_params) { + ret_val = hw->phy.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_mbx_params - Initialize mailbox function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mbx_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mbx.ops.init_params) { + ret_val = hw->mbx.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("Mailbox Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mbx.init_mbx_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * e1000_setup_init_funcs()). + **/ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + case E1000_DEV_ID_I350_DA4: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_82576_VF: + mac->type = e1000_vfadapt; + break; + case E1000_DEV_ID_I350_VF: + mac->type = e1000_vfadapt_i350; + break; + default: + /* Should never have loaded on this device */ + ret_val = -E1000_ERR_MAC_INIT; + break; + } + + return ret_val; +} + +/** + * e1000_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: TRUE will initialize the rest of the function pointers + * getting the device ready for use. FALSE will only set + * MAC type and the function pointers for the other init + * functions. Passing FALSE will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + **/ +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = e1000_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Init function pointers to generic implementations. We do this first + * allowing a driver module to override it afterward. + */ + e1000_init_mac_ops_generic(hw); + e1000_init_phy_ops_generic(hw); + e1000_init_nvm_ops_generic(hw); + e1000_init_mbx_ops_generic(hw); + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + case e1000_82580: + case e1000_i350: + e1000_init_function_pointers_82575(hw); + break; + case e1000_vfadapt: + e1000_init_function_pointers_vf(hw); + break; + case e1000_vfadapt_i350: + e1000_init_function_pointers_vf(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -E1000_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = e1000_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_phy_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_mbx_params(hw); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + **/ +s32 e1000_get_bus_info(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_bus_info) + return hw->mac.ops.get_bus_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + **/ +void e1000_clear_vfta(struct e1000_hw *hw) +{ + if (hw->mac.ops.clear_vfta) + hw->mac.ops.clear_vfta(hw); +} + +/** + * e1000_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + **/ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + if (hw->mac.ops.write_vfta) + hw->mac.ops.write_vfta(hw, offset, value); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, + mc_addr_count); +} + +/** + * e1000_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + **/ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + return e1000_force_mac_fc_generic(hw); +} + +/** + * e1000_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_for_link) + return hw->mac.ops.check_for_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + **/ +bool e1000_check_mng_mode(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_mng_mode) + return hw->mac.ops.check_mng_mode(hw); + + return FALSE; +} + +/** + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + return e1000_mng_write_dhcp_info_generic(hw, buffer, length); +} + +/** + * e1000_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.reset_hw) + return hw->mac.ops.reset_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_hw) + return hw->mac.ops.init_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + **/ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_link) + return hw->mac.ops.setup_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->mac.ops.get_link_up_info) + return hw->mac.ops.get_link_up_info(hw, speed, duplex); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_led) + return hw->mac.ops.setup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * e1000_setup_led. This is a function pointer entry point called by drivers. + **/ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.cleanup_led) + return hw->mac.ops.cleanup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + **/ +s32 e1000_blink_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.blink_led) + return hw->mac.ops.blink_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init - store LED configurations in SW + * @hw: pointer to the HW structure + * + * Initializes the LED config in SW. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_id_led_init(struct e1000_hw *hw) +{ + if (hw->mac.ops.id_led_init) + return hw->mac.ops.id_led_init(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_on(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_on) + return hw->mac.ops.led_on(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_off(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_off) + return hw->mac.ops.led_off(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e1000_reset_adaptive_generic(hw); +} + +/** + * e1000_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + e1000_update_adaptive_generic(hw); +} + +/** + * e1000_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + **/ +s32 e1000_disable_pcie_master(struct e1000_hw *hw) +{ + return e1000_disable_pcie_master_generic(hw); +} + +/** + * e1000_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + if (hw->mac.ops.config_collision_dist) + hw->mac.ops.config_collision_dist(hw); +} + +/** + * e1000_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + **/ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + if (hw->mac.ops.rar_set) + hw->mac.ops.rar_set(hw, addr, index); +} + +/** + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + **/ +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (hw->mac.ops.validate_mdi_setting) + return hw->mac.ops.validate_mdi_setting(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + **/ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + return e1000_hash_mc_addr_generic(hw, mc_addr); +} + +/** + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + return e1000_enable_tx_pkt_filtering_generic(hw); +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + if (hw->mac.ops.mng_host_if_write) + return hw->mac.ops.mng_host_if_write(hw, buffer, length, + offset, sum); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + if (hw->mac.ops.mng_write_cmd_header) + return hw->mac.ops.mng_write_cmd_header(hw, hdr); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if(struct e1000_hw * hw) +{ + if (hw->mac.ops.mng_enable_host_if) + return hw->mac.ops.mng_enable_host_if(hw); + + return E1000_NOT_IMPLEMENTED; +} + +/** + * e1000_wait_autoneg - Waits for autonegotiation completion + * @hw: pointer to the HW structure + * + * Waits for autoneg to complete. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + if (hw->mac.ops.wait_autoneg) + return hw->mac.ops.wait_autoneg(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + **/ +s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + if (hw->phy.ops.check_reset_block) + return hw->phy.ops.check_reset_block(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + **/ +void e1000_release_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.release) + hw->phy.ops.release(hw); +} + +/** + * e1000_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + **/ +s32 e1000_acquire_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.acquire) + return hw->phy.ops.acquire(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return e1000_read_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return e1000_write_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cable_length) + return hw->phy.ops.get_cable_length(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_info) + return hw->phy.ops.get_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_phy_commit(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is TRUE, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d3_lplu_state) + return hw->phy.ops.set_d3_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_read_pba_string - Read device part number string + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return e1000_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * e1000_read_pba_length - Read device part number string length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) +{ + return e1000_read_pba_length_generic(hw, pba_num_size); +} + +/** + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.validate) + return hw->nvm.ops.validate(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_update_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.update) + return hw->nvm.ops.update(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000_reload_nvm(struct e1000_hw *hw) +{ + if (hw->nvm.ops.reload) + hw->nvm.ops.reload(hw); +} + +/** + * e1000_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.read) + return hw->nvm.ops.read(hw, offset, words, data); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.write) + return hw->nvm.ops.write(hw, offset, words, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data) +{ + return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); +} + +/** + * e1000_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_up_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_up) + hw->phy.ops.power_up(hw); + + e1000_setup_link(hw); +} + +/** + * e1000_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_down_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_down) + hw->phy.ops.power_down(hw); +} + +/** + * e1000_power_up_fiber_serdes_link - Power up serdes link + * @hw: pointer to the HW structure + * + * Power on the optics and PCS. + **/ +void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.power_up_serdes) + hw->mac.ops.power_up_serdes(hw); +} + +/** + * e1000_shutdown_fiber_serdes_link - Remove link during power down + * @hw: pointer to the HW structure + * + * Shutdown the optics and PCS on driver unload. + **/ +void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.shutdown_serdes) + hw->mac.ops.shutdown_serdes(hw); +} + diff --git a/lib/librte_pmd_igb/igb/e1000_api.h b/lib/librte_pmd_igb/igb/e1000_api.h new file mode 100644 index 0000000000..daf8642895 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_api.h @@ -0,0 +1,156 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_API_H_ +#define _E1000_API_H_ + +#include "e1000_hw.h" + +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); +extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); +extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); + +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +s32 e1000_init_mbx_params(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); +void e1000_clear_vfta(struct e1000_hw *hw); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_id_led_init(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); +void e1000_release_phy(struct e1000_hw *hw); +s32 e1000_acquire_phy(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); +void e1000_power_up_phy(struct e1000_hw *hw); +void e1000_power_down_phy(struct e1000_hw *hw); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); +void e1000_reload_nvm(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_wait_autoneg(struct e1000_hw *hw); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +bool e1000_check_mng_mode(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, + u8 *buffer, u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw * hw, + u8 *buffer, u16 length); + +/* + * TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the Rx descriptor with EOP set + * error = the 8 bit error field of the Rx descriptor with EOP set + * length = the sum of all the length fields of the Rx descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = TRUE; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = FALSE; + * } + * ... + */ + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= (max_frame_size + 1))) : \ + (((length) > min_frame_size) && \ + ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1))))) + +#endif diff --git a/lib/librte_pmd_igb/igb/e1000_defines.h b/lib/librte_pmd_igb/igb/e1000_defines.h new file mode 100644 index 0000000000..a7be67ccef --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_defines.h @@ -0,0 +1,1733 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_LSCWE 0x00000010 /* Link Status wake up enable */ +#define E1000_WUC_PPROXYE 0x00000010 /* Protocol Proxy Enable */ +#define E1000_WUC_LSCWO 0x00000020 /* Link Status wake up override */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_FW_RST 0x80000000 /* Wake on FW Reset Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /*Mask for the 4 flexible filters */ +/* + * For 82576 to utilize Extended filter masks in addition to + * existing (filter) masks + */ +#define E1000_WUFC_EXT_FLX_FILTERS 0x00300000 /* Ext. FLX filter mask */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC +#define E1000_WUS_ARP E1000_WUFC_ARP +#define E1000_WUS_IPV4 E1000_WUFC_IPV4 +#define E1000_WUS_IPV6 E1000_WUFC_IPV6 +#define E1000_WUS_FLX0 E1000_WUFC_FLX0 +#define E1000_WUS_FLX1 E1000_WUFC_FLX1 +#define E1000_WUS_FLX2 E1000_WUFC_FLX2 +#define E1000_WUS_FLX3 E1000_WUFC_FLX3 +#define E1000_WUS_FLX_FILTERS E1000_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 +/* Two Extended Flexible Filters are supported (82576) */ +#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 +#define E1000_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define E1000_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +/* Reserved (bits 4,5) in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Definable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ +/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_82580_MASK 0x01C00000 /*82580 bit 24:22*/ +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES 0x00800000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_CANC 0x04000000 /* Int delay cancellation */ +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +/* IAME enable bit (27) was removed in >= 82575 */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int acknowledge Auto-mask */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error + * detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity + * error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_REG_ADDR 0x00FF0000 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_PHY_ADDR 0x07000000 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_RESET 0x10000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_INTERRUPT_ENA 0x40000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_ENABLE_MASK 0x00000007 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +/* Enable Neighbor Discovery Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 +/* Enable IP address filtering */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ +#define E1000_MANC_MPROXYE 0x40000000 /* Mngment Proxy Enable */ +#define E1000_MANC_EN_BMC2OS 0x10000000 /* OS2BMC is enabled or not */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* Rx desc min thresh size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* + * Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x01 +#define E1000_SWFW_PHY0_SM 0x02 +#define E1000_SWFW_PHY1_SM 0x04 +#define E1000_SWFW_CSR_SM 0x08 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 +#define E1000_SWFW_SW_MNG_SM 0x400 + +/* FACTPS Definitions */ +#define E1000_FACTPS_LFS 0x40000000 /* LAN Function Select */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock + * indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through + * PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external + * LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +/* + * Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_LOW_LINK_LATCH 0x40 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_PCS_LCTL_AN_SGMII_BYPASS 0x80000 +#define E1000_PCS_LCTL_AN_SGMII_TRIGGER 0x100000 +#define E1000_PCS_LCTL_FAST_LINK_TIMER 0x1000000 +#define E1000_PCS_LCTL_LINK_OK_FIX 0x2000000 +#define E1000_PCS_LCTL_CRS_ON_NI 0x4000000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_10 0 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 +#define E1000_PCS_LSTS_AN_PAGE_RX 0x20000 +#define E1000_PCS_LSTS_AN_TIMED_OUT 0x40000 +#define E1000_PCS_LSTS_AN_REMOTE_FAULT 0x80000 +#define E1000_PCS_LSTS_AN_ERROR_RWS 0x100000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. + * Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution + * disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define PHY_FORCE_TIME 20 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_FULL_DUPLEX (ADVERTISE_10_FULL | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x00000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x00002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_SHIFT 8 /* POPTS shift */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +/* Extended desc bits for Linksec and timesync */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_6K 0x0006 /* 6KB */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_10K 0x000A /* 10KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_14K 0x000E /* 14KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_18K 0x0012 +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_26K 0x001A +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_35K 0x0023 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBS_16K E1000_PBA_16K +#define E1000_PBS_24K E1000_PBA_24K + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver + * should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW + * bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates + * an interrupt */ +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ +#define E1000_ICR_FER 0x00400000 /* Fatal Error */ + +#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ +#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* + * This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO + * parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO + * parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer + * parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity + * error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO + * parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO + * parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_IMS_EPRST E1000_ICR_EPRST +#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + +#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ +#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* Rx /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO + * parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO + * parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer + * parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity + * error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO + * parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO + * parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Extended Interrupt Cause Set */ +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of descriptors still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address */ +/* + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 +#define E1000_RAH_POOL_MASK 0x03FC0000 +#define E1000_RAH_POOL_SHIFT 18 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 +/* TUPLE Filtering Configuration */ +#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ +#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ +#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ +/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ +#define E1000_TTQF_PROTOCOL_TCP 0x0 +/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_UDP 0x1 +/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_SCTP 0x2 +#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ +#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ +#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ +#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ +#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ +#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ +#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Speed Throttle Event */ + +/* Powerville EEE defines */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Enable 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Enable 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEER Enable on Flow Control*/ +/* EEE status */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability negotiated */ +#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ +#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ +#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ +#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ +#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_EECD_TYPE 0x00002000 /* NVM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_NVM_GRANT_ATTEMPTS +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + +#define E1000_NVM_SWDPIN0 0x0001 /* SWDPIN 0 NVM Value */ +#define E1000_NVM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define NVM_SERDES_AMPLITUDE 0x0006 /* SERDES output amplitude */ +#define NVM_PHY_CLASS_WORD 0x0007 +#define NVM_INIT_CONTROL1_REG 0x000A +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_FLASH_VERSION 0x0032 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed external */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 +#define NVM_WORD0F_ANE 0x0800 +#define NVM_WORD0F_SWPDIO_EXT_MASK 0x00F0 +#define NVM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +#define NVM_MAC_ADDR_OFFSET 0 +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_PHY_CLASS_A 0x8000 +#define NVM_SERDES_AMPLITUDE_MASK 0x000F +#define NVM_SIZE_MASK 0x1C00 +#define NVM_SIZE_SHIFT 10 +#define NVM_WORD_SIZE_BASE_SHIFT 6 +#define NVM_SWDPIO_EXT_SHIFT 4 + +/* NVM Commands - Microwire */ +#define NVM_READ_OPCODE_MICROWIRE 0x6 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_MICROWIRE 0x5 /* NVM write opcode */ +#define NVM_ERASE_OPCODE_MICROWIRE 0x7 /* NVM erase opcode */ +#define NVM_EWEN_OPCODE_MICROWIRE 0x13 /* NVM erase/write enable */ +#define NVM_EWDS_OPCODE_MICROWIRE 0x10 /* NVM erase/write disable */ + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_WRDI_OPCODE_SPI 0x04 /* NVM reset Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ +#define NVM_WRSR_OPCODE_SPI 0x01 /* NVM write Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 +#define NVM_STATUS_WEN_SPI 0x02 +#define NVM_STATUS_BP0_SPI 0x04 +#define NVM_STATUS_BP1_SPI 0x08 +#define NVM_STATUS_WPEN_SPI 0x80 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 +#define PCIE_DEVICE_CONTROL2 0x28 + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 +#define PCIX_STATUS_LO_FUNC_MASK 0x7 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 +#define PCIE_LINK_SPEED_MASK 0x0F +#define PCIE_LINK_SPEED_2500 0x01 +#define PCIE_LINK_SPEED_5000 0x02 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. */ +/* + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define M88E1340M_E_PHY_ID 0x01410DF0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define IGP04E1000_E_PHY_ID 0x02A80391 +#define M88_VENDOR 0x0141 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +/* 1=CLK125 low, 0=CLK125 toggling */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +/* + * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold + * 0=Normal 10BASE-T Rx Threshold + */ +#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080 +/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* + * 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +/* + * 1 = Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88E1111 Specific Registers */ +#define M88E1111_PHY_PAGE_SELECT1 0x16 /* for registers 0-28 */ +#define M88E1111_PHY_PAGE_SELECT2 0x1D /* for registers 30-31 */ + +/* M88E1111 page select register mask */ +#define M88E1111_PHY_PAGE_SELECT_MASK1 0xFF +#define M88E1111_PHY_PAGE_SELECT_MASK2 0x3F + +/* Intel I347AT4 Registers */ + +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* I347AT4 Extended PHY Specific Control Register */ + +/* + * Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* I347AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* M88E1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* + * Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_SPEC_STATUS \ + GG82563_REG(0, 17) /* PHY Specific Status */ +#define GG82563_PHY_INT_ENABLE \ + GG82563_REG(0, 18) /* Interrupt Enable */ +#define GG82563_PHY_SPEC_STATUS_2 \ + GG82563_REG(0, 19) /* PHY Specific Status 2 */ +#define GG82563_PHY_RX_ERR_CNTR \ + GG82563_REG(0, 21) /* Receive Error Counter */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ +#define GG82563_PHY_TEST_CLK_CTRL \ + GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL_2 \ + GG82563_REG(2, 26) /* MAC Specific Control 2 */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PORT_RESET \ + GG82563_REG(193, 17) /* Port Reset */ +#define GG82563_PHY_REVISION_ID \ + GG82563_REG(193, 18) /* Revision ID */ +#define GG82563_PHY_DEVICE_ID \ + GG82563_REG(193, 19) /* Device ID */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ +#define GG82563_PHY_RATE_ADAPT_CTRL \ + GG82563_REG(193, 25) /* Rate Adaptation Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \ + GG82563_REG(194, 16) /* FIFO's Control/Status */ +#define GG82563_PHY_KMRN_CTRL \ + GG82563_REG(194, 17) /* Control */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ +#define GG82563_PHY_KMRN_DIAGNOSTIC \ + GG82563_REG(194, 19) /* Diagnostic */ +#define GG82563_PHY_ACK_TIMEOUTS \ + GG82563_REG(194, 20) /* Acknowledge Timeouts */ +#define GG82563_PHY_ADV_ABILITY \ + GG82563_REG(194, 21) /* Advertised Ability */ +#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \ + GG82563_REG(194, 23) /* Link Partner Advertised Ability */ +#define GG82563_PHY_ADV_NEXT_PAGE \ + GG82563_REG(194, 24) /* Advertised Next Page */ +#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \ + GG82563_REG(194, 25) /* Link Partner Advertised Next page */ +#define GG82563_PHY_KMRN_MISC \ + GG82563_REG(194, 26) /* Misc. */ + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +/* LinkSec register fields */ +#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECTXCAP_SUM_SHIFT 16 +#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECRXCAP_SUM_SHIFT 16 + +#define E1000_LSECTXCTRL_EN_MASK 0x00000003 +#define E1000_LSECTXCTRL_DISABLE 0x0 +#define E1000_LSECTXCTRL_AUTH 0x1 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define E1000_LSECTXCTRL_AISCI 0x00000020 +#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define E1000_LSECRXCTRL_EN_MASK 0x0000000C +#define E1000_LSECRXCTRL_EN_SHIFT 2 +#define E1000_LSECRXCTRL_DISABLE 0x0 +#define E1000_LSECRXCTRL_CHECK 0x1 +#define E1000_LSECRXCTRL_STRICT 0x2 +#define E1000_LSECRXCTRL_DROP 0x3 +#define E1000_LSECRXCTRL_PLSH 0x00000040 +#define E1000_LSECRXCTRL_RP 0x00000080 +#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +/* DMA Coalescing register fields */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing + * Watchdog Timer */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Rx + * Threshold */ +#define E1000_DMACR_DMACTHR_SHIFT 16 +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe + * transactions */ +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ + +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit + * Threshold */ + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate + * Threshold */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx packet rate in + * current window */ + +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Traffic + * Current Cnt */ + +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rx Threshold + * High val */ +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based + on DMA coal */ + +/* Proxy Filer Control */ +#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ +#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ +#define E1000_PROXYFC_MC 0x00000008 /* Directed Multicast + * Proxy */ +#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ +#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy + * Enable */ +#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ +#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ +#define E1000_PROXYFC_NS 0x00000200 /* IPv4 Neighborhood + * Solicitation */ +#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy + * Enable */ +/* Proxy Status */ +#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ + +/* Firmware Status */ +#define E1000_FWSTS_FWRI 0x80000000 /* Firmware Reset + * Indication */ + + +#endif /* _E1000_DEFINES_H_ */ diff --git a/lib/librte_pmd_igb/igb/e1000_hw.h b/lib/librte_pmd_igb/igb/e1000_hw.h new file mode 100644 index 0000000000..bed673b5b8 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_hw.h @@ -0,0 +1,767 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I350_DA4 0x1546 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_vfadapt, + e1000_vfadapt_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for TRUE count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_eeprom_microwire, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, + e1000_nvm_override_microwire_small, + e1000_nvm_override_microwire_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_vf, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" + +struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *hw); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + void (*shutdown_serdes)(struct e1000_hw *); + void (*power_up_serdes)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*); + s32 (*mng_write_cmd_header)(struct e1000_hw *hw, + struct e1000_host_mng_command_header*); + s32 (*mng_enable_host_if)(struct e1000_hw *); + s32 (*wait_autoneg)(struct e1000_hw *); +}; + +struct e1000_phy_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); +}; + +struct e1000_nvm_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ADDR_LEN]; + u8 perm_addr[ETH_ADDR_LEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ + #define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + enum e1000_serdes_link_state serdes_link_state; + bool serdes_has_link; + bool tx_pkt_filtering; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 *hw_addr; + u8 *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +#include "e1000_82575.h" + +/* These functions must be implemented by drivers */ +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +#endif diff --git a/lib/librte_pmd_igb/igb/e1000_mac.c b/lib/librte_pmd_igb/igb/e1000_mac.c new file mode 100644 index 0000000000..1fff57651c --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_mac.c @@ -0,0 +1,2170 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "e1000_api.h" + +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw); +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw); +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw); +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); + +/** + * e1000_init_mac_ops_generic - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_mac_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + DEBUGFUNC("e1000_init_mac_ops_generic"); + + /* General Setup */ + mac->ops.init_params = e1000_null_ops_generic; + mac->ops.init_hw = e1000_null_ops_generic; + mac->ops.reset_hw = e1000_null_ops_generic; + mac->ops.setup_physical_interface = e1000_null_ops_generic; + mac->ops.get_bus_info = e1000_null_ops_generic; + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; + mac->ops.read_mac_addr = e1000_read_mac_addr_generic; + mac->ops.config_collision_dist = e1000_config_collision_dist_generic; + mac->ops.clear_hw_cntrs = e1000_null_mac_generic; + /* LED */ + mac->ops.cleanup_led = e1000_null_ops_generic; + mac->ops.setup_led = e1000_null_ops_generic; + mac->ops.blink_led = e1000_null_ops_generic; + mac->ops.led_on = e1000_null_ops_generic; + mac->ops.led_off = e1000_null_ops_generic; + /* LINK */ + mac->ops.setup_link = e1000_null_ops_generic; + mac->ops.get_link_up_info = e1000_null_link_info; + mac->ops.check_for_link = e1000_null_ops_generic; + mac->ops.wait_autoneg = e1000_wait_autoneg_generic; + /* Management */ + mac->ops.check_mng_mode = e1000_null_mng_mode; + mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic; + mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic; + mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic; + /* VLAN, MC, etc. */ + mac->ops.update_mc_addr_list = e1000_null_update_mc; + mac->ops.clear_vfta = e1000_null_mac_generic; + mac->ops.write_vfta = e1000_null_write_vfta; + mac->ops.rar_set = e1000_rar_set_generic; + mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; +} + +/** + * e1000_null_ops_generic - No-op function, returns 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_ops_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_ops_generic"); + return E1000_SUCCESS; +} + +/** + * e1000_null_mac_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_mac_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_mac_generic"); + return; +} + +/** + * e1000_null_link_info - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d) +{ + DEBUGFUNC("e1000_null_link_info"); + return E1000_SUCCESS; +} + +/** + * e1000_null_mng_mode - No-op function, return FALSE + * @hw: pointer to the HW structure + **/ +bool e1000_null_mng_mode(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_mng_mode"); + return FALSE; +} + +/** + * e1000_null_update_mc - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a) +{ + DEBUGFUNC("e1000_null_update_mc"); + return; +} + +/** + * e1000_null_write_vfta - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b) +{ + DEBUGFUNC("e1000_null_write_vfta"); + return; +} + +/** + * e1000_null_rar_set - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a) +{ + DEBUGFUNC("e1000_null_rar_set"); + return; +} + +/** + * e1000_get_bus_info_pci_generic - Get PCI(x) bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCI/PCIx), and PCI(-x) function. + **/ +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + u32 status = E1000_READ_REG(hw, E1000_STATUS); + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_get_bus_info_pci_generic"); + + /* PCI or PCI-X? */ + bus->type = (status & E1000_STATUS_PCIX_MODE) + ? e1000_bus_type_pcix + : e1000_bus_type_pci; + + /* Bus speed */ + if (bus->type == e1000_bus_type_pci) { + bus->speed = (status & E1000_STATUS_PCI66) + ? e1000_bus_speed_66 + : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + bus->speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + bus->speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + bus->speed = e1000_bus_speed_133; + break; + default: + bus->speed = e1000_bus_speed_reserved; + break; + } + } + + /* Bus width */ + bus->width = (status & E1000_STATUS_BUS64) + ? e1000_bus_width_64 + : e1000_bus_width_32; + + /* Which PCI(-X) function? */ + mac->ops.set_lan_id(hw); + + return ret_val; +} + +/** + * e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u16 pcie_link_status; + + DEBUGFUNC("e1000_get_bus_info_pcie_generic"); + + bus->type = e1000_bus_type_pci_express; + + ret_val = e1000_read_pcie_cap_reg(hw, + PCIE_LINK_STATUS, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { + case PCIE_LINK_SPEED_2500: + bus->speed = e1000_bus_speed_2500; + break; + case PCIE_LINK_SPEED_5000: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* + * The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading PCI config space. + **/ +void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u16 pci_header_type; + u32 status; + + e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type); + if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) { + status = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (status & E1000_STATUS_FUNC_MASK) + >> E1000_STATUS_FUNC_SHIFT; + } else { + bus->func = 0; + } +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + DEBUGFUNC("e1000_clear_vfta_generic"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + DEBUGFUNC("e1000_write_vfta_generic"); + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ADDR_LEN] = {0}; + + DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val = E1000_SUCCESS; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("e1000_check_alt_mac_addr_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + goto out; + + if (!(nvm_data & NVM_COMPAT_LOM)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_alt_mac_addr_offset == 0xFFFF) { + /* There is no Alternate MAC Address */ + goto out; + } + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (alt_mac_addr[0] & 0x01) { + DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); + goto out; + } + + /* + * We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + +out: + return ret_val; +} + +/** + * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_generic"); + + /* + * HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | + ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* + * Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* + * The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value + * @hw: pointer to the HW structure + * + * In certain situations, a system BIOS may report that the PCIx maximum + * memory read byte count (MMRBC) value is higher than than the actual + * value. We check the PCIx command register with the current PCIx status + * register. + **/ +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) +{ + u16 cmd_mmrbc; + u16 pcix_cmd; + u16 pcix_stat_hi_word; + u16 stat_mmrbc; + + DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); + + /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ + if (hw->bus.type != e1000_bus_type_pcix) + return; + + e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + } +} + +/** + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); + + E1000_READ_REG(hw, E1000_CRCERRS); + E1000_READ_REG(hw, E1000_SYMERRS); + E1000_READ_REG(hw, E1000_MPC); + E1000_READ_REG(hw, E1000_SCC); + E1000_READ_REG(hw, E1000_ECOL); + E1000_READ_REG(hw, E1000_MCC); + E1000_READ_REG(hw, E1000_LATECOL); + E1000_READ_REG(hw, E1000_COLC); + E1000_READ_REG(hw, E1000_DC); + E1000_READ_REG(hw, E1000_SEC); + E1000_READ_REG(hw, E1000_RLEC); + E1000_READ_REG(hw, E1000_XONRXC); + E1000_READ_REG(hw, E1000_XONTXC); + E1000_READ_REG(hw, E1000_XOFFRXC); + E1000_READ_REG(hw, E1000_XOFFTXC); + E1000_READ_REG(hw, E1000_FCRUC); + E1000_READ_REG(hw, E1000_GPRC); + E1000_READ_REG(hw, E1000_BPRC); + E1000_READ_REG(hw, E1000_MPRC); + E1000_READ_REG(hw, E1000_GPTC); + E1000_READ_REG(hw, E1000_GORCL); + E1000_READ_REG(hw, E1000_GORCH); + E1000_READ_REG(hw, E1000_GOTCL); + E1000_READ_REG(hw, E1000_GOTCH); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_RUC); + E1000_READ_REG(hw, E1000_RFC); + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RJC); + E1000_READ_REG(hw, E1000_TORL); + E1000_READ_REG(hw, E1000_TORH); + E1000_READ_REG(hw, E1000_TOTL); + E1000_READ_REG(hw, E1000_TOTH); + E1000_READ_REG(hw, E1000_TPR); + E1000_READ_REG(hw, E1000_TPT); + E1000_READ_REG(hw, E1000_MPTC); + E1000_READ_REG(hw, E1000_BPTC); +} + +/** + * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link"); + + /* + * We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* + * First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) + goto out; /* No link detected */ + + mac->get_link_status = FALSE; + + /* + * Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* + * If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + +out: + return ret_val; +} + +/** + * e1000_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_fiber_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) && + (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + goto out; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = TRUE; + } + +out: + return ret_val; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_check_for_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* + * If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (mac->autoneg_failed == 0) { + mac->autoneg_failed = 1; + goto out; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* + * If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = TRUE; + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { + /* + * If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = TRUE; + DEBUGOUT("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = FALSE; + DEBUGOUT("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = TRUE; + DEBUGOUT("SERDES: Link up - autoneg " + "completed sucessfully.\n"); + } else { + mac->serdes_has_link = FALSE; + DEBUGOUT("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = FALSE; + DEBUGOUT("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = FALSE; + DEBUGOUT("SERDES: Link down - autoneg failed\n"); + } + } + +out: + return ret_val; +} + +/** + * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000_setup_link_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_link_generic"); + + /* + * In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (e1000_check_reset_block(hw)) + goto out; + + /* + * If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + goto out; + } + + /* + * Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + goto out; + + /* + * Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + ret_val = e1000_set_fc_watermarks_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + mac->ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + goto out; + + /* + * Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* + * For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + +out: + return ret_val; +} + +/** + * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + DEBUGFUNC("e1000_config_collision_dist_generic"); + + tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* + * If we have a signal (the cable is plugged in, or assumed TRUE for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = 1; + /* + * AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + goto out; + } + mac->autoneg_failed = 0; + } else { + mac->autoneg_failed = 0; + DEBUGOUT("Valid Link Found\n"); + } + +out: + return ret_val; +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* + * Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + break; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + +out: + return ret_val; +} + +/** + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + DEBUGFUNC("e1000_set_fc_watermarks_generic"); + + /* + * Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* + * We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + + return E1000_SUCCESS; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 nvm_data; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* + * Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + +out: + return ret_val; +} + +/** + * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_force_mac_fc_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + +out: + return ret_val; +} + +/** + * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + DEBUGFUNC("e1000_config_fc_after_link_up_generic"); + + /* + * Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + goto out; + } + + /* + * Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* + * Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + goto out; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + DEBUGOUT("Copper PHY and Auto Neg " + "has not completed.\n"); + goto out; + } + + /* + * The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + goto out; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + goto out; + + /* + * Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\r\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = " + "Rx PAUSE frames only.\r\n"); + } + } + /* + * For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\r\n"); + } + /* + * For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\r\n"); + } else { + /* + * Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\r\n"); + } + + /* + * Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + goto out; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* + * Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + s32 ret_val = E1000_SUCCESS; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_generic"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) +{ + s32 i = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; + msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + ret_val = -E1000_ERR_RESET; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + +out: + return ret_val; +} + +/** + * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + DEBUGFUNC("e1000_id_led_init_generic"); + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + goto out; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + +out: + return ret_val; +} + +/** + * e1000_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_setup_led_generic"); + + if (hw->mac.ops.setup_led != e1000_setup_led_generic) { + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + } + +out: + return ret_val; +} + +/** + * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000_cleanup_led_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_generic"); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + return E1000_SUCCESS; +} + +/** + * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + DEBUGFUNC("e1000_blink_led_generic"); + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* + * set the blink bit for each LED that's "on" (0x0E) + * in ledctl_mode2 + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << + (i * 8)); + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_on_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_off_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + if (no_snoop) { + gcr = E1000_READ_REG(hw, E1000_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + E1000_WRITE_REG(hw, E1000_GCR, gcr); + } +out: + return; +} + +/** + * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns E1000_SUCCESS if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + goto out; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { + if (!(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE)) + break; + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING; + } + +out: + return ret_val; +} + +/** + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000_reset_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + goto out; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = FALSE; + E1000_WRITE_REG(hw, E1000_AIT, 0); +out: + return; +} + +/** + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000_update_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + goto out; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = TRUE; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = FALSE; + E1000_WRITE_REG(hw, E1000_AIT, 0); + } + } +out: + return; +} + +/** + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotiation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + usec_delay(5); + regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} diff --git a/lib/librte_pmd_igb/igb/e1000_mac.h b/lib/librte_pmd_igb/igb/e1000_mac.h new file mode 100644 index 0000000000..a5a98d0276 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_mac.h @@ -0,0 +1,95 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +/* + * Functions that should not be called directly from drivers but can be used + * by other files in this 'shared code' + */ +void e1000_init_mac_ops_generic(struct e1000_hw *hw); +void e1000_null_mac_generic(struct e1000_hw *hw); +s32 e1000_null_ops_generic(struct e1000_hw *hw); +s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); +bool e1000_null_mng_mode(struct e1000_hw *hw); +void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); +void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); +void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); +s32 e1000_blink_led_generic(struct e1000_hw *hw); +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw); +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000_id_led_init_generic(struct e1000_hw *hw); +s32 e1000_led_on_generic(struct e1000_hw *hw); +s32 e1000_led_off_generic(struct e1000_hw *hw); +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_setup_led_generic(struct e1000_hw *hw); +s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); + +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000_config_collision_dist_generic(struct e1000_hw *hw); +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); +void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000_reset_adaptive_generic(struct e1000_hw *hw); +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); +void e1000_update_adaptive_generic(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +#endif diff --git a/lib/librte_pmd_igb/igb/e1000_manage.c b/lib/librte_pmd_igb/igb/e1000_manage.c new file mode 100644 index 0000000000..bb0a10b18d --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_manage.c @@ -0,0 +1,472 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "e1000_api.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("e1000_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) +{ + u32 hicr; + s32 ret_val = E1000_SUCCESS; + u8 i; + + DEBUGFUNC("e1000_mng_enable_host_if_generic"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("ARC subsystem not valid.\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if ((hicr & E1000_HICR_EN) == 0) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns TRUE (>0) if + * manageability is enabled, else FALSE (0). + **/ +bool e1000_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); + + DEBUGFUNC("e1000_check_mng_mode_generic"); + + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + + hw->mac.tx_pkt_filtering = TRUE; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = FALSE; + goto out; + } + + /* + * If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = hw->mac.ops.mng_enable_host_if(hw); + if (ret_val != E1000_SUCCESS) { + hw->mac.tx_pkt_filtering = FALSE; + goto out; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* + * If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = TRUE; + goto out; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { + hw->mac.tx_pkt_filtering = FALSE; + goto out; + } + +out: + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = hw->mac.ops.mng_enable_host_if(hw); + if (ret_val) + goto out; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + goto out; + + /* Write the manageability command header */ + ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr); + if (ret_val) + goto out; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + +out: + return ret_val; +} + +/** + * e1000_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + DEBUGFUNC("e1000_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + s32 ret_val = E1000_SUCCESS; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("e1000_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) { + ret_val = -E1000_ERR_PARAM; + goto out; + } + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data); + } + +out: + return ret_val; +} + +/** + * e1000_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + bool ret_val = FALSE; + + DEBUGFUNC("e1000_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + goto out; + + manc = E1000_READ_REG(hw, E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + goto out; + + if (hw->mac.has_fwsm) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + factps = E1000_READ_REG(hw, E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { + ret_val = TRUE; + goto out; + } + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + ret_val = TRUE; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_host_interface_command - Writes buffer to host interface + * @hw: pointer to the HW structure + * @buffer: contains a command to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS + * else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, i; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_host_interface_command"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("Hardware doesn't support host interface command.\n"); + goto out; + } + + if (!hw->mac.asf_firmware_present) { + DEBUGOUT("Firmware is not present.\n"); + goto out; + } + + if (length == 0 || length & 0x3 || + length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if ((hicr & E1000_HICR_EN) == 0) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* + * The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < length; i++) + E1000_WRITE_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + i, + *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == E1000_HI_COMMAND_TIMEOUT || + (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + for (i = 0; i < length; i++) + *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + i); + +out: + return ret_val; +} + diff --git a/lib/librte_pmd_igb/igb/e1000_manage.h b/lib/librte_pmd_igb/igb/e1000_manage.h new file mode 100644 index 0000000000..9a8d756020 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_manage.h @@ -0,0 +1,90 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_MANAGE_H_ +#define _E1000_MANAGE_H_ + +bool e1000_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, + u8 *buffer, u16 length); +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +u8 e1000_calculate_checksum(u8 *buffer, u32 length); +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/lib/librte_pmd_igb/igb/e1000_mbx.c b/lib/librte_pmd_igb/igb/e1000_mbx.c new file mode 100644 index 0000000000..67dbc64a29 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_mbx.c @@ -0,0 +1,764 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "e1000_mbx.h" + +/** + * e1000_null_mbx_check_for_flag - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +static s32 e1000_null_mbx_check_for_flag(struct e1000_hw *hw, u16 mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_check_flag"); + + return E1000_SUCCESS; +} + +/** + * e1000_null_mbx_transact - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +static s32 e1000_null_mbx_transact(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_rw_msg"); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_mbx"); + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_mbx"); + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg"); + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack"); + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst"); + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_msg"); + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_posted_mbx"); + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_write_posted_mbx"); + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * e1000_init_mbx_ops_generic - Initialize mbx function pointers + * @hw: pointer to the HW structure + * + * Sets the function pointers to no-op functions + **/ +void e1000_init_mbx_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + mbx->ops.init_params = e1000_null_ops_generic; + mbx->ops.read = e1000_null_mbx_transact; + mbx->ops.write = e1000_null_mbx_transact; + mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; +} + +/** + * e1000_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) +{ + u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0)); + + v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; + hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * e1000_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) +{ + u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); + s32 ret_val = -E1000_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = E1000_SUCCESS; + + hw->dev_spec.vf.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * e1000_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_msg_vf(struct e1000_hw *hw, u16 mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg_vf"); + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 e1000_check_for_ack_vf(struct e1000_hw *hw, u16 mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack_vf"); + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns TRUE if the PF has set the reset done bit or else FALSE + **/ +static s32 e1000_check_for_rst_vf(struct e1000_hw *hw, u16 mbx_id) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst_vf"); + + if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | + E1000_V2PMAILBOX_RSTI))) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_obtain_mbx_lock_vf"); + + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val; + u16 i; + + + DEBUGFUNC("e1000_write_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + e1000_check_for_msg_vf(hw, 0); + e1000_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val = E1000_SUCCESS; + u16 i; + + DEBUGFUNC("e1000_read_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i); + + /* Acknowledge receipt and release mailbox, then we're done */ + E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_vf; + mbx->ops.write = e1000_write_mbx_vf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_vf; + mbx->ops.check_for_ack = e1000_check_for_ack_vf; + mbx->ops.check_for_rst = e1000_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return E1000_SUCCESS; +} + +static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * e1000_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst_pf"); + + if (vflre & (1 << vf_number)) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + + DEBUGFUNC("e1000_obtain_mbx_lock_pf"); + + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + e1000_check_for_msg_pf(hw, vf_number); + e1000_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * e1000_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_read_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_pf; + mbx->ops.write = e1000_write_mbx_pf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_pf; + mbx->ops.check_for_ack = e1000_check_for_ack_pf; + mbx->ops.check_for_rst = e1000_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + default: + return E1000_SUCCESS; + } +} + diff --git a/lib/librte_pmd_igb/igb/e1000_mbx.h b/lib/librte_pmd_igb/igb/e1000_mbx.h new file mode 100644 index 0000000000..6e9d5381a4 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_mbx.h @@ -0,0 +1,106 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_api.h" + +/* Define mailbox register bits */ +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is TRUE if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_check_for_msg(struct e1000_hw *, u16); +s32 e1000_check_for_ack(struct e1000_hw *, u16); +s32 e1000_check_for_rst(struct e1000_hw *, u16); +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_vf(struct e1000_hw *); +s32 e1000_init_mbx_params_pf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/lib/librte_pmd_igb/igb/e1000_nvm.c b/lib/librte_pmd_igb/igb/e1000_nvm.c new file mode 100644 index 0000000000..1c442700bc --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_nvm.c @@ -0,0 +1,1071 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "e1000_api.h" + +static void e1000_stop_nvm(struct e1000_hw *hw); +static void e1000_reload_nvm_generic(struct e1000_hw *hw); + +/** + * e1000_init_nvm_ops_generic - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_nvm_ops_generic(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + DEBUGFUNC("e1000_init_nvm_ops_generic"); + + /* Initialize function pointers */ + nvm->ops.init_params = e1000_null_ops_generic; + nvm->ops.acquire = e1000_null_ops_generic; + nvm->ops.read = e1000_null_read_nvm; + nvm->ops.release = e1000_null_nvm_generic; + nvm->ops.reload = e1000_reload_nvm_generic; + nvm->ops.update = e1000_null_ops_generic; + nvm->ops.valid_led_default = e1000_null_led_default; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.write = e1000_null_write_nvm; +} + +/** + * e1000_null_nvm_read - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c) +{ + DEBUGFUNC("e1000_null_read_nvm"); + return E1000_SUCCESS; +} + +/** + * e1000_null_nvm_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_nvm_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_nvm_generic"); + return; +} + +/** + * e1000_null_led_default - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data) +{ + DEBUGFUNC("e1000_null_led_default"); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_nvm - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c) +{ + DEBUGFUNC("e1000_null_write_nvm"); + return E1000_SUCCESS; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + + DEBUGFUNC("e1000_shift_out_eec_bits"); + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_microwire) + eecd &= ~E1000_EECD_DO; + else + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + DEBUGFUNC("e1000_shift_in_eec_bits"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + s32 ret_val = -E1000_ERR_NVM; + + DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = E1000_READ_REG(hw, E1000_EERD); + else + reg = E1000_READ_REG(hw, E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) { + ret_val = E1000_SUCCESS; + break; + } + + usec_delay(5); + } + + return ret_val; +} + +/** + * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_nvm_generic"); + + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); + eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + usec_delay(5); + eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + ret_val = -E1000_ERR_NVM; + } + + return ret_val; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + + e1000_lower_eec_clk(hw, &eecd); + } else + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_stop_nvm"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) { + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000_release_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_release_nvm_generic"); + + e1000_stop_nvm(hw); + + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 ret_val = E1000_SUCCESS; + u8 spi_stat_reg; + + DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + /* Set CS */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + } else + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + usec_delay(1); + + /* + * Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SPI NVM Status error\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("e1000_read_nvm_spi"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* + * Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_read_nvm_microwire - Reads EEPROM's using microwire + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u8 read_opcode = NVM_READ_OPCODE_MICROWIRE; + + DEBUGFUNC("e1000_read_nvm_microwire"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset + i), + nvm->address_bits); + + /* + * Read the data. For microwire, each word requires the + * overhead of setup and tear-down. + */ + data[i] = e1000_shift_in_eec_bits(hw, 16); + e1000_standby_nvm(hw); + } + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_nvm_eerd"); + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_EERD, eerd); + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (E1000_READ_REG(hw, E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + +out: + return ret_val; +} + +/** + * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_spi"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + } + + msec_delay(10); +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_nvm_microwire - Writes EEPROM using microwire + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using microwire interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u32 eecd; + u16 words_written = 0; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_microwire"); + + /* + * A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + + e1000_standby_nvm(hw); + + while (words_written < words) { + e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE, + nvm->opcode_bits); + + e1000_shift_out_eec_bits(hw, (u16)(offset + words_written), + nvm->address_bits); + + e1000_shift_out_eec_bits(hw, data[words_written], 16); + + e1000_standby_nvm(hw); + + for (widx = 0; widx < 200; widx++) { + eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_DO) + break; + usec_delay(50); + } + + if (widx == 200) { + DEBUGOUT("NVM Write did not complete\n"); + ret_val = -E1000_ERR_NVM; + goto release; + } + + e1000_standby_nvm(hw); + + words_written++; + } + + e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE, + (u16)(nvm->opcode_bits + 2)); + + e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2)); + +release: + nvm->ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("e1000_read_pba_string_generic"); + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + /* + * if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + DEBUGOUT("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + ret_val = E1000_ERR_NO_SPACE; + goto out; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + +out: + return ret_val; +} + +/** + * e1000_read_pba_length_generic - Read device part number length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num_size. + **/ +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 length; + + DEBUGFUNC("e1000_read_pba_length_generic"); + + if (pba_num_size == NULL) { + DEBUGOUT("PBA buffer size was null\n"); + ret_val = E1000_ERR_INVALID_ARGUMENT; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + /* if data is not ptr guard the PBA must be in legacy format */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + *pba_num_size = 11; + goto out; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + ret_val = E1000_ERR_NVM_PBA_SECTION; + goto out; + } + + /* + * Convert from length in u16 values to u8 chars, add 1 for NULL, + * and subtract 2 because length field is included in length. + */ + *pba_num_size = ((u32)length * 2) - 1; + +out: + return ret_val; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = E1000_READ_REG(hw, E1000_RAH(0)); + rar_low = E1000_READ_REG(hw, E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_generic"); + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum"); + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +static void e1000_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("e1000_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); +} + diff --git a/lib/librte_pmd_igb/igb/e1000_nvm.h b/lib/librte_pmd_igb/igb/e1000_nvm.h new file mode 100644 index 0000000000..6bba6417b5 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_nvm.h @@ -0,0 +1,66 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +void e1000_init_nvm_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +void e1000_null_nvm_generic(struct e1000_hw *hw); +s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); + +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000_release_nvm_generic(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/lib/librte_pmd_igb/igb/e1000_osdep.c b/lib/librte_pmd_igb/igb/e1000_osdep.c new file mode 100644 index 0000000000..203dcc8a3a --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_osdep.c @@ -0,0 +1,72 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "e1000_api.h" + +/* + * NOTE: the following routines using the e1000 + * naming style are provided to the shared + * code but are OS specific + */ + +void +e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return; +} + +void +e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + *value = 0; + return; +} + +/* + * Read the PCI Express capabilities + */ +int32_t +e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return E1000_NOT_IMPLEMENTED; +} + +/* + * Write the PCI Express capabilities + */ +int32_t +e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) +{ + return E1000_NOT_IMPLEMENTED; +} diff --git a/lib/librte_pmd_igb/igb/e1000_osdep.h b/lib/librte_pmd_igb/igb/e1000_osdep.h new file mode 100644 index 0000000000..cf460d5d47 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_osdep.h @@ -0,0 +1,128 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "../e1000_logs.h" + +/* Remove some compiler warnings for the files in this dir */ +#ifdef __INTEL_COMPILER +#pragma warning(disable:2259) /* conversion may lose significant bits */ +#pragma warning(disable:869) /* Parameter was never referenced */ +#pragma warning(disable:181) /* Arg incompatible with format string */ +#else +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wformat" +#pragma GCC diagnostic ignored "-Wuninitialized" +#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7)) +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif +#endif + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000*(x)) +#define msec_delay_irq(x) DELAY(1000*(x)) + +#define DEBUGFUNC(F) DEBUGOUT(F); +#define DEBUGOUT(S, args...) PMD_DRV_LOG(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define FALSE 0 +#define TRUE 1 + +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; +typedef int64_t s64; +typedef int32_t s32; +typedef int16_t s16; +typedef int8_t s8; +typedef int bool; + +#define __le16 u16 +#define __le32 u32 +#define __le64 u64 + +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) + +#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg))) + +#define E1000_PCI_REG_WRITE(reg, value) do { \ + E1000_PCI_REG((reg)) = (value); \ +} while (0) + +#define E1000_PCI_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) + +#define E1000_PCI_REG_ARRAY_ADDR(hw, reg, index) \ + E1000_PCI_REG_ADDR((hw), (reg) + ((index) << 2)) + +static inline uint32_t e1000_read_addr(volatile void* addr) +{ + return E1000_PCI_REG(addr); +} + +/* Register READ/WRITE macros */ + +#define E1000_READ_REG(hw, reg) \ + e1000_read_addr(E1000_PCI_REG_ADDR((hw), (reg))) + +#define E1000_WRITE_REG(hw, reg, value) \ + E1000_PCI_REG_WRITE(E1000_PCI_REG_ADDR((hw), (reg)), (value)) + +#define E1000_READ_REG_ARRAY(hw, reg, index) \ + E1000_PCI_REG(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index))) + +#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \ + E1000_PCI_REG_WRITE(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value)) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#endif /* _E1000_OSDEP_H_ */ diff --git a/lib/librte_pmd_igb/igb/e1000_phy.c b/lib/librte_pmd_igb/igb/e1000_phy.c new file mode 100644 index 0000000000..aede670357 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_phy.c @@ -0,0 +1,2988 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "e1000_api.h" + +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw); +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * e1000_init_phy_ops_generic - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_phy_ops_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + DEBUGFUNC("e1000_init_phy_ops_generic"); + + /* Initialize function pointers */ + phy->ops.init_params = e1000_null_ops_generic; + phy->ops.acquire = e1000_null_ops_generic; + phy->ops.check_polarity = e1000_null_ops_generic; + phy->ops.check_reset_block = e1000_null_ops_generic; + phy->ops.commit = e1000_null_ops_generic; + phy->ops.force_speed_duplex = e1000_null_ops_generic; + phy->ops.get_cfg_done = e1000_null_ops_generic; + phy->ops.get_cable_length = e1000_null_ops_generic; + phy->ops.get_info = e1000_null_ops_generic; + phy->ops.read_reg = e1000_null_read_reg; + phy->ops.read_reg_locked = e1000_null_read_reg; + phy->ops.release = e1000_null_phy_generic; + phy->ops.reset = e1000_null_ops_generic; + phy->ops.set_d0_lplu_state = e1000_null_lplu_state; + phy->ops.set_d3_lplu_state = e1000_null_lplu_state; + phy->ops.write_reg = e1000_null_write_reg; + phy->ops.write_reg_locked = e1000_null_write_reg; + phy->ops.power_up = e1000_null_phy_generic; + phy->ops.power_down = e1000_null_phy_generic; +} + +/** + * e1000_null_read_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + DEBUGFUNC("e1000_null_read_reg"); + return E1000_SUCCESS; +} + +/** + * e1000_null_phy_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_phy_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_null_phy_generic"); + return; +} + +/** + * e1000_null_lplu_state - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active) +{ + DEBUGFUNC("e1000_null_lplu_state"); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + DEBUGFUNC("e1000_null_write_reg"); + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + DEBUGFUNC("e1000_check_reset_block"); + + manc = E1000_READ_REG(hw, E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/** + * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + + DEBUGFUNC("e1000_get_phy_id"); + + if (!(phy->ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + goto out; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + goto out; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + +out: + return ret_val; +} + +/** + * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_phy_reset_dsp_generic"); + + if (!(hw->phy.ops.write_reg)) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); + +out: + return ret_val; +} + +/** + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + *data = (u16) mdic; + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* + * Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* + * Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + DEBUGFUNC("e1000_read_phy_reg_i2c"); + + /* + * Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + DEBUGFUNC("e1000_write_phy_reg_i2c"); + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* + * Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_phy_reg_m88"); + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_m88"); + + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * __e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_read_phy_reg_igp"); + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) + goto release; + } + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + if (!locked) + hw->phy.ops.release(hw); +out: + return ret_val; +} + +/** + * e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, FALSE); +} + +/** + * e1000_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, TRUE); +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_igp"); + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (ret_val) + goto release; + } + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, FALSE); +} + +/** + * e1000_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, TRUE); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_read_kmrn_reg"); + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, FALSE); +} + +/** + * e1000_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, TRUE); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_kmrn_reg_generic"); + + if (!locked) { + if (!(hw->phy.ops.acquire)) + goto out; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + + usec_delay(2); + + if (!locked) + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, FALSE); +} + +/** + * e1000_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, TRUE); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_82577"); + + if (hw->phy.reset_disable) { + ret_val = E1000_SUCCESS; + goto out; + } + + if (hw->phy.type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + goto out; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + +out: + return ret_val; +} + +/** + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88"); + + if (phy->reset_disable) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + if (phy->revision < E1000_REVISION_4) { + /* + * Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + goto out; + } + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); + + if (phy->reset_disable) { + ret_val = E1000_SUCCESS; + goto out; + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* + * Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* + * Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_igp"); + + if (phy->reset_disable) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + + /* + * Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msec_delay(100); + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, FALSE); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + goto out; + } + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + goto out; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* + * when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + goto out; + + /* load defaults for future use */ + phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ? + ((data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : + e1000_ms_auto; + + switch (phy->ms_type) { + case e1000_ms_force_master: + data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + data |= CR_1000T_MS_ENABLE; + data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* + * Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* + * If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (phy->autoneg_advertised == 0) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + goto out; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* + * Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + /* + * Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = hw->mac.ops.wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for " + "autoneg to complete\n"); + goto out; + } + } + + hw->mac.get_link_status = TRUE; + +out: + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + goto out; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + + /* + * Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* + * Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* + * Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* + * Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* + * Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + goto out; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + ret_val = phy->ops.write_reg(hw, + PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_generic"); + + if (hw->mac.autoneg) { + /* + * Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + goto out; + } else { + /* + * PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + goto out; + } + } + + /* + * Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, + COPPER_LINK_UP_LIMIT, + 10, + &link); + if (ret_val) + goto out; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + e1000_config_collision_dist_generic(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + +out: + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* + * Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + goto out; + + DEBUGOUT1("IGP PSCR: %X\n", phy_data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); + + /* + * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + /* Reset the phy to commit changes. */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + goto out; + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + + if (!link) { + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) { + DEBUGOUT("Link taking longer than expected.\n"); + } else { + /* + * We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + goto out; + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + goto out; + } + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + goto out; + } + + if (hw->phy.type != e1000_phy_m88 || + hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + /* + * Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + goto out; + + /* + * In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + +out: + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_ife"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); + if (ret_val) + goto out; + + /* Disable MDI-X support for 10/100 */ + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + goto out; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + goto out; + + DEBUGOUT1("IFE PMC: %X\n", data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl |= MII_CR_SPEED_10; + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb\n"); + } + + e1000_config_collision_dist_generic(hw); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} + +/** + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is TRUE, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_generic"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + +out: + return ret_val; +} + +/** + * e1000_check_downshift_generic - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000_check_downshift_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_downshift_generic"); + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = FALSE; + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = (phy_data & mask) ? TRUE : FALSE; + +out: + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_igp"); + + /* + * Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* + * This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = (data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + +out: + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_ife"); + + /* + * Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = (phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_wait_autoneg_generic - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_wait_autoneg_generic"); + + if (!(hw->phy.ops.read_reg)) + return E1000_SUCCESS; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msec_delay(100); + } + + /* + * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_phy_has_link_generic"); + + if (!(hw->phy.ops.read_reg)) + return E1000_SUCCESS; + + for (i = 0; i < iterations; i++) { + /* + * Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + /* + * If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + usec_delay(usec_interval); + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + msec_delay_irq(usec_interval/1000); + else + usec_delay(usec_interval); + } + + *success = (i < iterations) ? TRUE : FALSE; + + return ret_val; +} + +/** + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, index, default_page, is_cm; + + DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + goto out; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + goto out; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + goto out; + + is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page selec to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + break; + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + goto out; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + goto out; + + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + DEBUGFUNC("e1000_get_cable_length_igp_2"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + goto out; + + /* + * Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK; + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0; + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_m88"); + + if (phy->media_type != e1000_media_type_copper) { + DEBUGOUT("Phy info is only valid for copper media\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + goto out; + + phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) + ? TRUE : FALSE; + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? TRUE : FALSE; + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + goto out; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_igp"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = TRUE; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? TRUE : FALSE; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_ife"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + goto out; + phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) + ? FALSE : TRUE; + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + goto out; + } else { + /* Polarity is forced */ + phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE; + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + +out: + return ret_val; +} + +/** + * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 phy_ctrl; + + DEBUGFUNC("e1000_phy_sw_reset_generic"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + goto out; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + goto out; + + usec_delay(1); + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u32 ctrl; + + DEBUGFUNC("e1000_phy_hw_reset_generic"); + + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) { + ret_val = E1000_SUCCESS; + goto out; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + goto out; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); + + usec_delay(phy->reset_delay_us); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + phy->ops.release(hw); + + ret_val = phy->ops.get_cfg_done(hw); + +out: + return ret_val; +} + +/** + * e1000_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_get_cfg_done_generic"); + + msec_delay_irq(10); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) +{ + DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* + * Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* + * Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case I82580_I_PHY_ID: + phy_type = e1000_phy_82580; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000_determine_phy_address(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_PHY_TYPE; + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000_get_phy_id(hw); + phy_type = e1000_get_phy_type_from_id(hw->phy.id); + + /* + * If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) { + ret_val = E1000_SUCCESS; + goto out; + } + msec_delay(1); + i++; + } while (i < 10); + } + +out: + return ret_val; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + msec_delay(1); +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal; + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_82577"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + goto out; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + goto out; + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, + PHY_FORCE_LIMIT, + 100000, + &link); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_82577"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + goto out; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + phy->polarity_correction = TRUE; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + goto out; + + phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? TRUE : FALSE; + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + goto out; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + +out: + return ret_val; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + DEBUGFUNC("e1000_get_cable_length_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + goto out; + + length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT; + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + ret_val = -E1000_ERR_PHY; + + phy->cable_length = length; + +out: + return ret_val; +} diff --git a/lib/librte_pmd_igb/igb/e1000_phy.h b/lib/librte_pmd_igb/igb/e1000_phy.h new file mode 100644 index 0000000000..1b21430d24 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_phy.h @@ -0,0 +1,217 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +void e1000_init_phy_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); +void e1000_null_phy_generic(struct e1000_hw *hw); +s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_check_downshift_generic(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000_get_phy_id(struct e1000_hw *hw); +s32 e1000_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); +s32 e1000_wait_autoneg_generic(struct e1000_hw *hw); +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); +s32 e1000_determine_phy_address(struct e1000_hw *hw); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 4 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 +#define I82577_PHY_STATUS2_SPEED_100MBPS 0x0100 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_AUTO_MDIX 0x0400 +#define I82577_PHY_CTRL2_FORCE_MDI_MDIX 0x0200 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +/* Enable flexible speed on link-up */ +#define IGP01E1000_GMII_FLEX_SPD 0x0010 +#define IGP01E1000_GMII_SPD 0x0020 /* Enable SPD */ + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course - 15:13, Fine - 12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define IGP03E1000_PHY_MISC_CTRL 0x1B +#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Manually Set Duplex */ + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Control */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */ + +#endif diff --git a/lib/librte_pmd_igb/igb/e1000_regs.h b/lib/librte_pmd_igb/igb/e1000_regs.h new file mode 100644 index 0000000000..6b902eae25 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_regs.h @@ -0,0 +1,574 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ +#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ +#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ +#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ +#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXT 0x0002C /* Future Extended - RW */ +#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* Tx Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ +#define E1000_ICR_V2 0x01500 /* Interrupt Cause - new location - RC */ +#define E1000_ICS_V2 0x01504 /* Interrupt Cause Set - new location - WO */ +#define E1000_IMS_V2 0x01508 /* Interrupt Mask Set/Read - new location - RW */ +#define E1000_IMC_V2 0x0150C /* Interrupt Mask Clear - new location - WO */ +#define E1000_IAM_V2 0x01510 /* Interrupt Ack Auto Mask - new location - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFPCQ(_n) (0x02430 + (0x4 * (_n))) +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer adapters - RW */ +#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +/* + * Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ + (0x0C030 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ + (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ + (0x0E03C + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* Tx DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_PBSLAC 0x03100 /* Packet Buffer Slave Access Control */ +#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Packet Buffer DWORD (_n) */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +#define E1000_ITPBS 0x03404 /* Same as TXPBS, renamed for newer adpaters - RW */ +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Descriptor uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Descriptor uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Descriptor uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Descriptor uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Descriptor uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ +#define E1000_DTXMXSZRQ 0x03540 /* DMA Tx Max Total Allow Size Requests - RW */ +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ + +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 +/* Virtualization statistical counters */ +#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) +#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) +#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) +#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) +#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) +#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) +#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) +#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) +#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) + +#define E1000_LSECTXUT 0x04300 /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */ +#define E1000_LSECTXPKTE 0x04304 /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */ +#define E1000_LSECTXPKTP 0x04308 /* LinkSec Protected Tx Packet Count - OutPktsProtected */ +#define E1000_LSECTXOCTE 0x0430C /* LinkSec Encrypted Tx Octets Count - OutOctetsEncrypted */ +#define E1000_LSECTXOCTP 0x04310 /* LinkSec Protected Tx Octets Count - OutOctetsProtected */ +#define E1000_LSECRXUT 0x04314 /* LinkSec Untagged non-Strict Rx Packet Count - InPktsUntagged/InPktsNoTag */ +#define E1000_LSECRXOCTD 0x0431C /* LinkSec Rx Octets Decrypted Count - InOctetsDecrypted */ +#define E1000_LSECRXOCTV 0x04320 /* LinkSec Rx Octets Validated - InOctetsValidated */ +#define E1000_LSECRXBAD 0x04324 /* LinkSec Rx Bad Tag - InPktsBadTag */ +#define E1000_LSECRXNOSCI 0x04328 /* LinkSec Rx Packet No SCI Count - InPktsNoSci */ +#define E1000_LSECRXUNSCI 0x0432C /* LinkSec Rx Packet Unknown SCI Count - InPktsUnknownSci */ +#define E1000_LSECRXUNCH 0x04330 /* LinkSec Rx Unchecked Packets Count - InPktsUnchecked */ +#define E1000_LSECRXDELAY 0x04340 /* LinkSec Rx Delayed Packet Count - InPktsDelayed */ +#define E1000_LSECRXLATE 0x04350 /* LinkSec Rx Late Packets Count - InPktsLate */ +#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* LinkSec Rx Packet OK Count - InPktsOk */ +#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* LinkSec Rx Invalid Count - InPktsInvalid */ +#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* LinkSec Rx Not Valid Count - InPktsNotValid */ +#define E1000_LSECRXUNSA 0x043C0 /* LinkSec Rx Unused SA Count - InPktsUnusedSa */ +#define E1000_LSECRXNUSA 0x043D0 /* LinkSec Rx Not Using SA Count - InPktsNotUsingSa */ +#define E1000_LSECTXCAP 0x0B000 /* LinkSec Tx Capabilities Register - RO */ +#define E1000_LSECRXCAP 0x0B300 /* LinkSec Rx Capabilities Register - RO */ +#define E1000_LSECTXCTRL 0x0B004 /* LinkSec Tx Control - RW */ +#define E1000_LSECRXCTRL 0x0B304 /* LinkSec Rx Control - RW */ +#define E1000_LSECTXSCL 0x0B008 /* LinkSec Tx SCI Low - RW */ +#define E1000_LSECTXSCH 0x0B00C /* LinkSec Tx SCI High - RW */ +#define E1000_LSECTXSA 0x0B010 /* LinkSec Tx SA0 - RW */ +#define E1000_LSECTXPN0 0x0B018 /* LinkSec Tx SA PN 0 - RW */ +#define E1000_LSECTXPN1 0x0B01C /* LinkSec Tx SA PN 1 - RW */ +#define E1000_LSECRXSCL 0x0B3D0 /* LinkSec Rx SCI Low - RW */ +#define E1000_LSECRXSCH 0x0B3E0 /* LinkSec Rx SCI High - RW */ +#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 0 - WO */ +#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */ +#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */ +#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */ +/* + * LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit + * key - RW. + */ +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +#define E1000_SSVPC 0x041A0 /* Switch Security Violation Packet Count */ +#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ +#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ +#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ +#define E1000_IPSRXIPADDR(_n) (0x0B420+ (0x04 * (_n))) /* IPSec Rx IPv4/v6 Address - RW */ +#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */ +#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ +#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ +#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) /* IPSec Tx 128-bit Key - RW */ +#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ +#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Page - RW */ +#define E1000_1GSTAT_RCV 0x04228 /* 1GSTAT Code Violation Packet Count - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ +#define E1000_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flexible Host Filter Table */ +#define E1000_FHFT_EXT(_n) (0x09A00 + (_n * 0x100)) /* Ext Flexible Host Filter Table */ + + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) /* Mngmt Decision Filters */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_SWSM2 0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ +#define E1000_FWSTS 0x08F0C /* FW Status */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate Interrupt Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register + * (_i) - RW */ +#define E1000_MSIXTADD(_i) (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr + * low reg - RW */ +#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr + * upper reg - RW */ +#define E1000_MSIXTMSG(_i) (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry + * message reg - RW */ +#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry + * vector ctrl reg - RW */ +#define E1000_MSIXPBA 0x0E000 /* MSI-X Pending bit array */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* VT Registers */ +#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ +#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ +#define E1000_MDFB 0x03558 /* Malicious Driver free block */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ +#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ +#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine + * Filter - RW */ +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ +/* Time Sync */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ +#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ +#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ +#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ +#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ +#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ +#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ +#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ +#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) /* Tx Desc plane TC Rate-scheduler config */ +#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */ +#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */ +#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */ +#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */ +#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/ +#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */ +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ +#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ +#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ +#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ +#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ +#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ +#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ +#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ +#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ +#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ +#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ +#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ +#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* PCIe Parity Status Register */ +#define E1000_PCIEERRSTS 0x05BA8 + +#define E1000_PROXYS 0x5F64 /* Proxying Status */ +#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/*Energy Efficient Ethernet "EEE" registers */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define E1000_EEE_SU 0x0E34 /* EEE Setup */ +#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ +#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#endif diff --git a/lib/librte_pmd_igb/igb/e1000_vf.c b/lib/librte_pmd_igb/igb/e1000_vf.c new file mode 100644 index 0000000000..8b81e4bee0 --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_vf.c @@ -0,0 +1,574 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#include "e1000_api.h" + + +static s32 e1000_init_phy_params_vf(struct e1000_hw *hw); +static s32 e1000_init_nvm_params_vf(struct e1000_hw *hw); +static void e1000_release_vf(struct e1000_hw *hw); +static s32 e1000_acquire_vf(struct e1000_hw *hw); +static s32 e1000_setup_link_vf(struct e1000_hw *hw); +static s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw); +static s32 e1000_init_mac_params_vf(struct e1000_hw *hw); +static s32 e1000_check_for_link_vf(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_init_hw_vf(struct e1000_hw *hw); +static s32 e1000_reset_hw_vf(struct e1000_hw *hw); +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32); +static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); +static s32 e1000_read_mac_addr_vf(struct e1000_hw *); + +/** + * e1000_init_phy_params_vf - Inits PHY params + * @hw: pointer to the HW structure + * + * Doesn't do much - there's no PHY available to the VF. + **/ +static s32 e1000_init_phy_params_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_phy_params_vf"); + hw->phy.type = e1000_phy_vf; + hw->phy.ops.acquire = e1000_acquire_vf; + hw->phy.ops.release = e1000_release_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_nvm_params_vf - Inits NVM params + * @hw: pointer to the HW structure + * + * Doesn't do much - there's no NVM available to the VF. + **/ +static s32 e1000_init_nvm_params_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_nvm_params_vf"); + hw->nvm.type = e1000_nvm_none; + hw->nvm.ops.acquire = e1000_acquire_vf; + hw->nvm.ops.release = e1000_release_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_vf - Inits MAC params + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_init_mac_params_vf"); + + /* Set media type */ + /* + * Virtual functions don't care what they're media type is as they + * have no direct access to the PHY, or the media. That is handled + * by the physical function driver. + */ + hw->phy.media_type = e1000_media_type_unknown; + + /* No ASF features for the VF driver */ + mac->asf_firmware_present = FALSE; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = FALSE; + /* Disable adaptive IFS mode so the generic funcs don't do anything */ + mac->adaptive_ifs = FALSE; + /* VF's have no MTA Registers - PF feature only */ + mac->mta_reg_count = 128; + /* VF's have no access to RAR entries */ + mac->rar_entry_count = 1; + + /* Function pointers */ + /* link setup */ + mac->ops.setup_link = e1000_setup_link_vf; + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_vf; + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_vf; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_vf; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_vf; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_vf; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; + /* set mac address */ + mac->ops.rar_set = e1000_rar_set_vf; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_vf; + + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_vf - Inits function pointers + * @hw: pointer to the HW structure + **/ +void e1000_init_function_pointers_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_vf"); + + hw->mac.ops.init_params = e1000_init_mac_params_vf; + hw->nvm.ops.init_params = e1000_init_nvm_params_vf; + hw->phy.ops.init_params = e1000_init_phy_params_vf; + hw->mbx.ops.init_params = e1000_init_mbx_params_vf; +} + +/** + * e1000_acquire_vf - Acquire rights to access PHY or NVM. + * @hw: pointer to the HW structure + * + * There is no PHY or NVM so we want all attempts to acquire these to fail. + * In addition, the MAC registers to access PHY/NVM don't exist so we don't + * even want any SW to attempt to use them. + **/ +static s32 e1000_acquire_vf(struct e1000_hw *hw) +{ + return -E1000_ERR_PHY; +} + +/** + * e1000_release_vf - Release PHY or NVM + * @hw: pointer to the HW structure + * + * There is no PHY or NVM so we want all attempts to acquire these to fail. + * In addition, the MAC registers to access PHY/NVM don't exist so we don't + * even want any SW to attempt to use them. + **/ +static void e1000_release_vf(struct e1000_hw *hw) +{ + return; +} + +/** + * e1000_setup_link_vf - Sets up link. + * @hw: pointer to the HW structure + * + * Virtual functions cannot change link. + **/ +static s32 e1000_setup_link_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_setup_link_vf"); + + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_pcie_vf - Gets the bus info. + * @hw: pointer to the HW structure + * + * Virtual functions are not really on their own bus. + **/ +static s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + DEBUGFUNC("e1000_get_bus_info_pcie_vf"); + + /* Do not set type PCI-E because we don't want disable master to run */ + bus->type = e1000_bus_type_reserved; + bus->speed = e1000_bus_speed_2500; + + return 0; +} + +/** + * e1000_get_link_up_info_vf - Gets link info. + * @hw: pointer to the HW structure + * @speed: pointer to 16 bit value to store link speed. + * @duplex: pointer to 16 bit value to store duplex. + * + * Since we cannot read the PHY and get accurate link info, we must rely upon + * the status register's data which is often stale and inaccurate. + **/ +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 status; + + DEBUGFUNC("e1000_get_link_up_info_vf"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_vf - Resets the HW + * @hw: pointer to the HW structure + * + * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. + * This is all the reset we can perform on a VF. + **/ +static s32 e1000_reset_hw_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 timeout = E1000_VF_INIT_TIMEOUT; + s32 ret_val = -E1000_ERR_MAC_INIT; + u32 ctrl, msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + + DEBUGFUNC("e1000_reset_hw_vf"); + + DEBUGOUT("Issuing a function level reset to MAC\n"); + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw, 0) && timeout) { + timeout--; + usec_delay(5); + } + + if (timeout) { + /* mailbox timeout can now become active */ + mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = E1000_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1, 0); + + msec_delay(10); + + /* set our "perm_addr" based on info provided by PF */ + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + if (!ret_val) { + if (msgbuf[0] == (E1000_VF_RESET | + E1000_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, 6); + else + ret_val = -E1000_ERR_MAC_INIT; + } + } + + return ret_val; +} + +/** + * e1000_init_hw_vf - Inits the HW + * @hw: pointer to the HW structure + * + * Not much to do here except clear the PF Reset indication if there is one. + **/ +static s32 e1000_init_hw_vf(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_hw_vf"); + + /* attempt to set and restore our mac address */ + e1000_rar_set_vf(hw, hw->mac.addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_vf - set device MAC address + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index receive address array register + **/ +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, 12); + msgbuf[0] = E1000_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) + e1000_read_mac_addr_vf(hw); +} + +/** + * e1000_hash_mc_addr_vf - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * The bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + u16 *hash_list = (u16 *)&msgbuf[1]; + u32 hash_value; + u32 i; + + DEBUGFUNC("e1000_update_mc_addr_list_vf"); + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count); + + if (mc_addr_count > 30) { + msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW; + mc_addr_count = 30; + } + + msgbuf[0] = E1000_VF_SET_MULTICAST; + msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT; + + for (i = 0; i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); + DEBUGOUT1("Hash value = 0x%03X\n", hash_value); + hash_list[i] = hash_value & 0x0FFF; + mc_addr_list += ETH_ADDR_LEN; + } + + mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE, 0); +} + +/** + * e1000_vfta_set_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vid: determines the vfta register and bit to set/unset + * @set: if TRUE then set bit, else clear bit + **/ +void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + if (set) + msgbuf[0] |= E1000_VF_SET_VLAN_ADD; + + mbx->ops.write_posted(hw, msgbuf, 2, 0); +} + +/** e1000_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_LPE; + msgbuf[1] = max_size; + + mbx->ops.write_posted(hw, msgbuf, 2, 0); +} + +/** + * e1000_promisc_set_vf - Set flags for Unicast or Multicast promisc + * @hw: pointer to the HW structure + * @uni: boolean indicating unicast promisc status + * @multi: boolean indicating multicast promisc status + **/ +s32 e1000_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf = E1000_VF_SET_PROMISC; + s32 ret_val; + + switch (type) { + case e1000_promisc_multicast: + msgbuf |= E1000_VF_SET_PROMISC_MULTICAST; + break; + case e1000_promisc_enabled: + msgbuf |= E1000_VF_SET_PROMISC_MULTICAST; + case e1000_promisc_unicast: + msgbuf |= E1000_VF_SET_PROMISC_UNICAST; + case e1000_promisc_disabled: + break; + default: + return -E1000_ERR_MAC_INIT; + } + + ret_val = mbx->ops.write_posted(hw, &msgbuf, 1, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, &msgbuf, 1, 0); + + if (!ret_val && !(msgbuf & E1000_VT_MSGTYPE_ACK)) + ret_val = -E1000_ERR_MAC_INIT; + + return ret_val; +} + +/** + * e1000_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_link_vf - Check for link for a virtual interface + * @hw: pointer to the HW structure + * + * Checks to see if the underlying PF is still talking to the VF and + * if it is then it reports the link state to the hardware, otherwise + * it reports link down and returns an error. + **/ +static s32 e1000_check_for_link_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 in_msg = 0; + + DEBUGFUNC("e1000_check_for_link_vf"); + + /* + * We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset or timeout drop the link */ + if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) + mac->get_link_status = TRUE; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) + goto out; + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error */ + if (mbx->ops.read(hw, &in_msg, 1, 0)) + goto out; + + /* if incoming message isn't clear to send we are waiting on response */ + if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { + /* message is not CTS and is NACK we have lost CTS status */ + if (in_msg & E1000_VT_MSGTYPE_NACK) + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* at this point we know the PF is talking to us, check and see if + * we are still accepting timeout or if we had a timeout failure. + * if we failed then we will need to reinit */ + if (!mbx->timeout) { + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = FALSE; + +out: + return ret_val; +} + diff --git a/lib/librte_pmd_igb/igb/e1000_vf.h b/lib/librte_pmd_igb/igb/e1000_vf.h new file mode 100644 index 0000000000..b2fd8a1a6d --- /dev/null +++ b/lib/librte_pmd_igb/igb/e1000_vf.h @@ -0,0 +1,294 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _E1000_VF_H_ +#define _E1000_VF_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_DEV_ID_I350_VF 0x1520 + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* Additional Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +/* Interrupt Defines */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + ((_n) << 2)) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_IVAR_VALID 0x80 + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + u32 data; + struct { + /* RSS type, Packet type */ + u16 pkt_info; + /* Split Header, header buffer len */ + u16 hdr_info; + } hs_rss; + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_vfadapt, + e1000_vfadapt_i350, + e1000_num_macs /* List is 1-based, so subtract 1 for TRUE count. */ +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +#include "e1000_mbx.h" + +struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u16 mta_reg_count; + u16 rar_entry_count; + + bool get_link_status; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 *hw_addr; + u8 *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_mbx_info mbx; + + union { + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +enum e1000_promisc_type { + e1000_promisc_disabled = 0, /* all promisc modes disabled */ + e1000_promisc_unicast = 1, /* unicast promiscuous enabled */ + e1000_promisc_multicast = 2, /* multicast promiscuous enabled */ + e1000_promisc_enabled = 3, /* both uni and multicast promisc */ + e1000_num_promisc_types +}; + +/* These functions must be implemented by drivers */ +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_vfta_set_vf(struct e1000_hw *, u16, bool); +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type); +#endif /* _E1000_VF_H_ */ diff --git a/lib/librte_pmd_igb/igb/if_igb.c b/lib/librte_pmd_igb/igb/if_igb.c new file mode 100644 index 0000000000..4aa08f611b --- /dev/null +++ b/lib/librte_pmd_igb/igb/if_igb.c @@ -0,0 +1,5567 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#ifdef HAVE_KERNEL_OPTION_HEADERS +#include "opt_device_polling.h" +#include "opt_inet.h" +#include "opt_altq.h" +#endif + +#include +#include +#if __FreeBSD_version >= 800000 +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "e1000_api.h" +#include "e1000_82575.h" +#include "if_igb.h" + +/********************************************************************* + * Set this to one to display debug statistics + *********************************************************************/ +int igb_display_debug_stats = 0; + +/********************************************************************* + * Driver version: + *********************************************************************/ +char igb_driver_version[] = "version - 2.2.3"; + + +/********************************************************************* + * PCI Device ID Table + * + * Used by probe to select devices to load on + * Last field stores an index into e1000_strings + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } + *********************************************************************/ + +static igb_vendor_info_t igb_vendor_info_array[] = +{ + { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82576_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82580_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_82580_QUAD_FIBER, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_DH89XXCC_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_DH89XXCC_SFP, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, + PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_I350_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_I350_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_I350_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_I350_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, + { 0x8086, E1000_DEV_ID_I350_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, + /* required last entry */ + { 0, 0, 0, 0, 0} +}; + +/********************************************************************* + * Table of branding strings for all supported NICs. + *********************************************************************/ + +static char *igb_strings[] = { + "Intel(R) PRO/1000 Network Connection" +}; + +/********************************************************************* + * Function prototypes + *********************************************************************/ +static int igb_probe(device_t); +static int igb_attach(device_t); +static int igb_detach(device_t); +static int igb_shutdown(device_t); +static int igb_suspend(device_t); +static int igb_resume(device_t); +static void igb_start(struct ifnet *); +static void igb_start_locked(struct tx_ring *, struct ifnet *ifp); +#if __FreeBSD_version >= 800000 +static int igb_mq_start(struct ifnet *, struct mbuf *); +static int igb_mq_start_locked(struct ifnet *, + struct tx_ring *, struct mbuf *); +static void igb_qflush(struct ifnet *); +#endif +static int igb_ioctl(struct ifnet *, u_long, caddr_t); +static void igb_init(void *); +static void igb_init_locked(struct adapter *); +static void igb_stop(void *); +static void igb_media_status(struct ifnet *, struct ifmediareq *); +static int igb_media_change(struct ifnet *); +static void igb_identify_hardware(struct adapter *); +static int igb_allocate_pci_resources(struct adapter *); +static int igb_allocate_msix(struct adapter *); +static int igb_allocate_legacy(struct adapter *); +static int igb_setup_msix(struct adapter *); +static void igb_free_pci_resources(struct adapter *); +static void igb_local_timer(void *); +static void igb_reset(struct adapter *); +static int igb_setup_interface(device_t, struct adapter *); +static int igb_allocate_queues(struct adapter *); +static void igb_configure_queues(struct adapter *); + +static int igb_allocate_transmit_buffers(struct tx_ring *); +static void igb_setup_transmit_structures(struct adapter *); +static void igb_setup_transmit_ring(struct tx_ring *); +static void igb_initialize_transmit_units(struct adapter *); +static void igb_free_transmit_structures(struct adapter *); +static void igb_free_transmit_buffers(struct tx_ring *); + +static int igb_allocate_receive_buffers(struct rx_ring *); +static int igb_setup_receive_structures(struct adapter *); +static int igb_setup_receive_ring(struct rx_ring *); +static void igb_initialize_receive_units(struct adapter *); +static void igb_free_receive_structures(struct adapter *); +static void igb_free_receive_buffers(struct rx_ring *); +static void igb_free_receive_ring(struct rx_ring *); + +static void igb_enable_intr(struct adapter *); +static void igb_disable_intr(struct adapter *); +static void igb_update_stats_counters(struct adapter *); +static bool igb_txeof(struct tx_ring *); + +static __inline void igb_rx_discard(struct rx_ring *, int); +static __inline void igb_rx_input(struct rx_ring *, + struct ifnet *, struct mbuf *, u32); + +static bool igb_rxeof(struct igb_queue *, int, int *); +static void igb_rx_checksum(u32, struct mbuf *, u32); +static int igb_tx_ctx_setup(struct tx_ring *, struct mbuf *); +static bool igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *); +static void igb_set_promisc(struct adapter *); +static void igb_disable_promisc(struct adapter *); +static void igb_set_multi(struct adapter *); +static void igb_update_link_status(struct adapter *); +static void igb_refresh_mbufs(struct rx_ring *, int); + +static void igb_register_vlan(void *, struct ifnet *, u16); +static void igb_unregister_vlan(void *, struct ifnet *, u16); +static void igb_setup_vlan_hw_support(struct adapter *); + +static int igb_xmit(struct tx_ring *, struct mbuf **); +static int igb_dma_malloc(struct adapter *, bus_size_t, + struct igb_dma_alloc *, int); +static void igb_dma_free(struct adapter *, struct igb_dma_alloc *); +static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); +static void igb_print_nvm_info(struct adapter *); +static int igb_is_valid_ether_addr(u8 *); +static void igb_add_hw_stats(struct adapter *); + +static void igb_vf_init_stats(struct adapter *); +static void igb_update_vf_stats_counters(struct adapter *); + +/* Management and WOL Support */ +static void igb_init_manageability(struct adapter *); +static void igb_release_manageability(struct adapter *); +static void igb_get_hw_control(struct adapter *); +static void igb_release_hw_control(struct adapter *); +static void igb_enable_wakeup(device_t); +static void igb_led_func(void *, int); + +static int igb_irq_fast(void *); +static void igb_msix_que(void *); +static void igb_msix_link(void *); +static void igb_handle_que(void *context, int pending); +static void igb_handle_link(void *context, int pending); + +static void igb_set_sysctl_value(struct adapter *, const char *, + const char *, int *, int); +static int igb_set_flowcntl(SYSCTL_HANDLER_ARGS); + +#ifdef DEVICE_POLLING +static poll_handler_t igb_poll; +#endif /* POLLING */ + +/********************************************************************* + * FreeBSD Device Interface Entry Points + *********************************************************************/ + +static device_method_t igb_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, igb_probe), + DEVMETHOD(device_attach, igb_attach), + DEVMETHOD(device_detach, igb_detach), + DEVMETHOD(device_shutdown, igb_shutdown), + DEVMETHOD(device_suspend, igb_suspend), + DEVMETHOD(device_resume, igb_resume), + {0, 0} +}; + +static driver_t igb_driver = { + "igb", igb_methods, sizeof(struct adapter), +}; + +static devclass_t igb_devclass; +DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0); +MODULE_DEPEND(igb, pci, 1, 1, 1); +MODULE_DEPEND(igb, ether, 1, 1, 1); + +/********************************************************************* + * Tunable default values. + *********************************************************************/ + +/* Descriptor defaults */ +static int igb_rxd = IGB_DEFAULT_RXD; +static int igb_txd = IGB_DEFAULT_TXD; +TUNABLE_INT("hw.igb.rxd", &igb_rxd); +TUNABLE_INT("hw.igb.txd", &igb_txd); + +/* +** AIM: Adaptive Interrupt Moderation +** which means that the interrupt rate +** is varied over time based on the +** traffic for that interrupt vector +*/ +static int igb_enable_aim = TRUE; +TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim); + +/* + * MSIX should be the default for best performance, + * but this allows it to be forced off for testing. + */ +static int igb_enable_msix = 1; +TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix); + +/* +** Tuneable Interrupt rate +*/ +static int igb_max_interrupt_rate = 8000; +TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate); + +/* +** Header split causes the packet header to +** be dma'd to a seperate mbuf from the payload. +** this can have memory alignment benefits. But +** another plus is that small packets often fit +** into the header and thus use no cluster. Its +** a very workload dependent type feature. +*/ +static bool igb_header_split = FALSE; +TUNABLE_INT("hw.igb.hdr_split", &igb_header_split); + +/* +** This will autoconfigure based on +** the number of CPUs if left at 0. +*/ +static int igb_num_queues = 0; +TUNABLE_INT("hw.igb.num_queues", &igb_num_queues); + +/* How many packets rxeof tries to clean at a time */ +static int igb_rx_process_limit = 100; +TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit); + +/* Flow control setting - default to FULL */ +static int igb_fc_setting = e1000_fc_full; +TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting); + +/* Energy Efficient Ethernet - default to off */ +static int igb_eee_disabled = TRUE; +TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled); + +/* +** DMA Coalescing, only for i350 - default to off, +** this feature is for power savings +*/ +static int igb_dma_coalesce = FALSE; +TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce); + +/********************************************************************* + * Device identification routine + * + * igb_probe determines if the driver should be loaded on + * adapter based on PCI vendor/device id of the adapter. + * + * return BUS_PROBE_DEFAULT on success, positive on failure + *********************************************************************/ + +static int +igb_probe(device_t dev) +{ + char adapter_name[60]; + uint16_t pci_vendor_id = 0; + uint16_t pci_device_id = 0; + uint16_t pci_subvendor_id = 0; + uint16_t pci_subdevice_id = 0; + igb_vendor_info_t *ent; + + INIT_DEBUGOUT("igb_probe: begin"); + + pci_vendor_id = pci_get_vendor(dev); + if (pci_vendor_id != IGB_VENDOR_ID) + return (ENXIO); + + pci_device_id = pci_get_device(dev); + pci_subvendor_id = pci_get_subvendor(dev); + pci_subdevice_id = pci_get_subdevice(dev); + + ent = igb_vendor_info_array; + while (ent->vendor_id != 0) { + if ((pci_vendor_id == ent->vendor_id) && + (pci_device_id == ent->device_id) && + + ((pci_subvendor_id == ent->subvendor_id) || + (ent->subvendor_id == PCI_ANY_ID)) && + + ((pci_subdevice_id == ent->subdevice_id) || + (ent->subdevice_id == PCI_ANY_ID))) { + sprintf(adapter_name, "%s %s", + igb_strings[ent->index], + igb_driver_version); + device_set_desc_copy(dev, adapter_name); + return (BUS_PROBE_DEFAULT); + } + ent++; + } + + return (ENXIO); +} + +/********************************************************************* + * Device initialization routine + * + * The attach entry point is called when the driver is being loaded. + * This routine identifies the type of hardware, allocates all resources + * and initializes the hardware. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +igb_attach(device_t dev) +{ + struct adapter *adapter; + int error = 0; + u16 eeprom_data; + + INIT_DEBUGOUT("igb_attach: begin"); + + adapter = device_get_softc(dev); + adapter->dev = adapter->osdep.dev = dev; + IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); + + /* SYSCTL stuff */ + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, + igb_sysctl_nvm_info, "I", "NVM Information"); + + SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW, + &igb_enable_aim, 1, "Interrupt Moderation"); + + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW, + adapter, 0, igb_set_flowcntl, "I", "Flow Control"); + + callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); + + /* Determine hardware and mac info */ + igb_identify_hardware(adapter); + + /* Setup PCI resources */ + if (igb_allocate_pci_resources(adapter)) { + device_printf(dev, "Allocation of PCI resources failed\n"); + error = ENXIO; + goto err_pci; + } + + /* Do Shared Code initialization */ + if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { + device_printf(dev, "Setup of Shared code failed\n"); + error = ENXIO; + goto err_pci; + } + + e1000_get_bus_info(&adapter->hw); + + /* Sysctl for limiting the amount of work done in the taskqueue */ + igb_set_sysctl_value(adapter, "rx_processing_limit", + "max number of rx packets to process", &adapter->rx_process_limit, + igb_rx_process_limit); + + /* + * Validate number of transmit and receive descriptors. It + * must not exceed hardware maximum, and must be multiple + * of E1000_DBA_ALIGN. + */ + if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 || + (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) { + device_printf(dev, "Using %d TX descriptors instead of %d!\n", + IGB_DEFAULT_TXD, igb_txd); + adapter->num_tx_desc = IGB_DEFAULT_TXD; + } else + adapter->num_tx_desc = igb_txd; + if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 || + (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) { + device_printf(dev, "Using %d RX descriptors instead of %d!\n", + IGB_DEFAULT_RXD, igb_rxd); + adapter->num_rx_desc = IGB_DEFAULT_RXD; + } else + adapter->num_rx_desc = igb_rxd; + + adapter->hw.mac.autoneg = DO_AUTO_NEG; + adapter->hw.phy.autoneg_wait_to_complete = FALSE; + adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; + + /* Copper options */ + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = FALSE; + adapter->hw.phy.ms_type = IGB_MASTER_SLAVE; + } + + /* + * Set the frame limits assuming + * standard ethernet sized frames. + */ + adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; + adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; + + /* + ** Allocate and Setup Queues + */ + if (igb_allocate_queues(adapter)) { + error = ENOMEM; + goto err_pci; + } + + /* Allocate the appropriate stats memory */ + if (adapter->vf_ifp) { + adapter->stats = + (struct e1000_vf_stats *)malloc(sizeof \ + (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO); + igb_vf_init_stats(adapter); + } else + adapter->stats = + (struct e1000_hw_stats *)malloc(sizeof \ + (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO); + if (adapter->stats == NULL) { + device_printf(dev, "Can not allocate stats memory\n"); + error = ENOMEM; + goto err_late; + } + + /* Allocate multicast array memory. */ + adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * + MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); + if (adapter->mta == NULL) { + device_printf(dev, "Can not allocate multicast setup array\n"); + error = ENOMEM; + goto err_late; + } + + /* Some adapter-specific advanced features */ + if (adapter->hw.mac.type >= e1000_i350) { + igb_set_sysctl_value(adapter, "dma_coalesce", + "configure dma coalesce", + &adapter->dma_coalesce, igb_dma_coalesce); + igb_set_sysctl_value(adapter, "eee_disabled", + "enable Energy Efficient Ethernet", + &adapter->hw.dev_spec._82575.eee_disable, + igb_eee_disabled); + e1000_set_eee_i350(&adapter->hw); + } + + /* + ** Start from a known state, this is + ** important in reading the nvm and + ** mac from that. + */ + e1000_reset_hw(&adapter->hw); + + /* Make sure we have a good EEPROM before we read from it */ + if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { + /* + ** Some PCI-E parts fail the first check due to + ** the link being in sleep state, call it again, + ** if it fails a second time its a real issue. + */ + if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { + device_printf(dev, + "The EEPROM Checksum Is Not Valid\n"); + error = EIO; + goto err_late; + } + } + + /* + ** Copy the permanent MAC address out of the EEPROM + */ + if (e1000_read_mac_addr(&adapter->hw) < 0) { + device_printf(dev, "EEPROM read error while reading MAC" + " address\n"); + error = EIO; + goto err_late; + } + /* Check its sanity */ + if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) { + device_printf(dev, "Invalid MAC address\n"); + error = EIO; + goto err_late; + } + + /* + ** Configure Interrupts + */ + if ((adapter->msix > 1) && (igb_enable_msix)) + error = igb_allocate_msix(adapter); + else /* MSI or Legacy */ + error = igb_allocate_legacy(adapter); + if (error) + goto err_late; + + /* Setup OS specific network interface */ + if (igb_setup_interface(dev, adapter) != 0) + goto err_late; + + /* Now get a good starting state */ + igb_reset(adapter); + + /* Initialize statistics */ + igb_update_stats_counters(adapter); + + adapter->hw.mac.get_link_status = 1; + igb_update_link_status(adapter); + + /* Indicate SOL/IDER usage */ + if (e1000_check_reset_block(&adapter->hw)) + device_printf(dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* Determine if we have to control management hardware */ + adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); + + /* + * Setup Wake-on-Lan + */ + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME; + if (eeprom_data) + adapter->wol = E1000_WUFC_MAG; + + /* Register for VLAN events */ + adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, + igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); + adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, + igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); + + igb_add_hw_stats(adapter); + + /* Tell the stack that the interface is not active */ + adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + + adapter->led_dev = led_create(igb_led_func, adapter, + device_get_nameunit(dev)); + + INIT_DEBUGOUT("igb_attach: end"); + + return (0); + +err_late: + igb_free_transmit_structures(adapter); + igb_free_receive_structures(adapter); + igb_release_hw_control(adapter); + if (adapter->ifp != NULL) + if_free(adapter->ifp); +err_pci: + igb_free_pci_resources(adapter); + free(adapter->mta, M_DEVBUF); + IGB_CORE_LOCK_DESTROY(adapter); + + return (error); +} + +/********************************************************************* + * Device removal routine + * + * The detach entry point is called when the driver is being removed. + * This routine stops the adapter and deallocates all the resources + * that were allocated for driver operation. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +igb_detach(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + struct ifnet *ifp = adapter->ifp; + + INIT_DEBUGOUT("igb_detach: begin"); + + /* Make sure VLANS are not using driver */ + if (adapter->ifp->if_vlantrunk != NULL) { + device_printf(dev,"Vlan in use, detach first\n"); + return (EBUSY); + } + + if (adapter->led_dev != NULL) + led_destroy(adapter->led_dev); + +#ifdef DEVICE_POLLING + if (ifp->if_capenable & IFCAP_POLLING) + ether_poll_deregister(ifp); +#endif + + IGB_CORE_LOCK(adapter); + adapter->in_detach = 1; + igb_stop(adapter); + IGB_CORE_UNLOCK(adapter); + + e1000_phy_hw_reset(&adapter->hw); + + /* Give control back to firmware */ + igb_release_manageability(adapter); + igb_release_hw_control(adapter); + + if (adapter->wol) { + E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); + E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); + igb_enable_wakeup(dev); + } + + /* Unregister VLAN events */ + if (adapter->vlan_attach != NULL) + EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); + if (adapter->vlan_detach != NULL) + EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); + + ether_ifdetach(adapter->ifp); + + callout_drain(&adapter->timer); + + igb_free_pci_resources(adapter); + bus_generic_detach(dev); + if_free(ifp); + + igb_free_transmit_structures(adapter); + igb_free_receive_structures(adapter); + free(adapter->mta, M_DEVBUF); + + IGB_CORE_LOCK_DESTROY(adapter); + + return (0); +} + +/********************************************************************* + * + * Shutdown entry point + * + **********************************************************************/ + +static int +igb_shutdown(device_t dev) +{ + return igb_suspend(dev); +} + +/* + * Suspend/resume device methods. + */ +static int +igb_suspend(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + + IGB_CORE_LOCK(adapter); + + igb_stop(adapter); + + igb_release_manageability(adapter); + igb_release_hw_control(adapter); + + if (adapter->wol) { + E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); + E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); + igb_enable_wakeup(dev); + } + + IGB_CORE_UNLOCK(adapter); + + return bus_generic_suspend(dev); +} + +static int +igb_resume(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + struct ifnet *ifp = adapter->ifp; + + IGB_CORE_LOCK(adapter); + igb_init_locked(adapter); + igb_init_manageability(adapter); + + if ((ifp->if_flags & IFF_UP) && + (ifp->if_drv_flags & IFF_DRV_RUNNING)) + igb_start(ifp); + + IGB_CORE_UNLOCK(adapter); + + return bus_generic_resume(dev); +} + + +/********************************************************************* + * Transmit entry point + * + * igb_start is called by the stack to initiate a transmit. + * The driver will remain in this routine as long as there are + * packets to transmit and transmit resources are available. + * In case resources are not available stack is notified and + * the packet is requeued. + **********************************************************************/ + +static void +igb_start_locked(struct tx_ring *txr, struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct mbuf *m_head; + + IGB_TX_LOCK_ASSERT(txr); + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING) + return; + if (!adapter->link_active) + return; + + /* Call cleanup if number of TX descriptors low */ + if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) + igb_txeof(txr); + + while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + if (txr->tx_avail <= IGB_MAX_SCATTER) { + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) + break; + /* + * Encapsulation can modify our pointer, and or make it + * NULL on failure. In that event, we can't requeue. + */ + if (igb_xmit(txr, &m_head)) { + if (m_head == NULL) + break; + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + break; + } + + /* Send a copy of the frame to the BPF listener */ + ETHER_BPF_MTAP(ifp, m_head); + + /* Set watchdog on */ + txr->watchdog_time = ticks; + txr->queue_status = IGB_QUEUE_WORKING; + } +} + +/* + * Legacy TX driver routine, called from the + * stack, always uses tx[0], and spins for it. + * Should not be used with multiqueue tx + */ +static void +igb_start(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IGB_TX_LOCK(txr); + igb_start_locked(txr, ifp); + IGB_TX_UNLOCK(txr); + } + return; +} + +#if __FreeBSD_version >= 800000 +/* +** Multiqueue Transmit driver +** +*/ +static int +igb_mq_start(struct ifnet *ifp, struct mbuf *m) +{ + struct adapter *adapter = ifp->if_softc; + struct igb_queue *que; + struct tx_ring *txr; + int i = 0, err = 0; + + /* Which queue to use */ + if ((m->m_flags & M_FLOWID) != 0) + i = m->m_pkthdr.flowid % adapter->num_queues; + + txr = &adapter->tx_rings[i]; + que = &adapter->queues[i]; + + if (IGB_TX_TRYLOCK(txr)) { + err = igb_mq_start_locked(ifp, txr, m); + IGB_TX_UNLOCK(txr); + } else { + err = drbr_enqueue(ifp, txr->br, m); + taskqueue_enqueue(que->tq, &que->que_task); + } + + return (err); +} + +static int +igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m) +{ + struct adapter *adapter = txr->adapter; + struct mbuf *next; + int err = 0, enq; + + IGB_TX_LOCK_ASSERT(txr); + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING || adapter->link_active == 0) { + if (m != NULL) + err = drbr_enqueue(ifp, txr->br, m); + return (err); + } + + enq = 0; + if (m == NULL) { + next = drbr_dequeue(ifp, txr->br); + } else if (drbr_needs_enqueue(ifp, txr->br)) { + if ((err = drbr_enqueue(ifp, txr->br, m)) != 0) + return (err); + next = drbr_dequeue(ifp, txr->br); + } else + next = m; + + /* Process the queue */ + while (next != NULL) { + if ((err = igb_xmit(txr, &next)) != 0) { + if (next != NULL) + err = drbr_enqueue(ifp, txr->br, next); + break; + } + enq++; + drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags); + ETHER_BPF_MTAP(ifp, next); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; + if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) + igb_txeof(txr); + if (txr->tx_avail <= IGB_MAX_SCATTER) { + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + next = drbr_dequeue(ifp, txr->br); + } + if (enq > 0) { + /* Set the watchdog */ + txr->queue_status = IGB_QUEUE_WORKING; + txr->watchdog_time = ticks; + } + return (err); +} + +/* +** Flush all ring buffers +*/ +static void +igb_qflush(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + struct mbuf *m; + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IGB_TX_LOCK(txr); + while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) + m_freem(m); + IGB_TX_UNLOCK(txr); + } + if_qflush(ifp); +} +#endif /* __FreeBSD_version >= 800000 */ + +/********************************************************************* + * Ioctl entry point + * + * igb_ioctl is called when the user wants to configure the + * interface. + * + * return 0 on success, positive on failure + **********************************************************************/ + +static int +igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) +{ + struct adapter *adapter = ifp->if_softc; + struct ifreq *ifr = (struct ifreq *)data; +#ifdef INET + struct ifaddr *ifa = (struct ifaddr *)data; +#endif + int error = 0; + + if (adapter->in_detach) + return (error); + + switch (command) { + case SIOCSIFADDR: +#ifdef INET + if (ifa->ifa_addr->sa_family == AF_INET) { + /* + * XXX + * Since resetting hardware takes a very long time + * and results in link renegotiation we only + * initialize the hardware only when it is absolutely + * required. + */ + ifp->if_flags |= IFF_UP; + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { + IGB_CORE_LOCK(adapter); + igb_init_locked(adapter); + IGB_CORE_UNLOCK(adapter); + } + if (!(ifp->if_flags & IFF_NOARP)) + arp_ifinit(ifp, ifa); + } else +#endif + error = ether_ioctl(ifp, command, data); + break; + case SIOCSIFMTU: + { + int max_frame_size; + + IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); + + IGB_CORE_LOCK(adapter); + max_frame_size = 9234; + if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - + ETHER_CRC_LEN) { + IGB_CORE_UNLOCK(adapter); + error = EINVAL; + break; + } + + ifp->if_mtu = ifr->ifr_mtu; + adapter->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + igb_init_locked(adapter); + IGB_CORE_UNLOCK(adapter); + break; + } + case SIOCSIFFLAGS: + IOCTL_DEBUGOUT("ioctl rcv'd:\ + SIOCSIFFLAGS (Set Interface Flags)"); + IGB_CORE_LOCK(adapter); + if (ifp->if_flags & IFF_UP) { + if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { + if ((ifp->if_flags ^ adapter->if_flags) & + (IFF_PROMISC | IFF_ALLMULTI)) { + igb_disable_promisc(adapter); + igb_set_promisc(adapter); + } + } else + igb_init_locked(adapter); + } else + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + igb_stop(adapter); + adapter->if_flags = ifp->if_flags; + IGB_CORE_UNLOCK(adapter); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IGB_CORE_LOCK(adapter); + igb_disable_intr(adapter); + igb_set_multi(adapter); +#ifdef DEVICE_POLLING + if (!(ifp->if_capenable & IFCAP_POLLING)) +#endif + igb_enable_intr(adapter); + IGB_CORE_UNLOCK(adapter); + } + break; + case SIOCSIFMEDIA: + /* + ** As the speed/duplex settings are being + ** changed, we need toreset the PHY. + */ + adapter->hw.phy.reset_disable = FALSE; + /* Check SOL/IDER usage */ + IGB_CORE_LOCK(adapter); + if (e1000_check_reset_block(&adapter->hw)) { + IGB_CORE_UNLOCK(adapter); + device_printf(adapter->dev, "Media change is" + " blocked due to SOL/IDER session.\n"); + break; + } + IGB_CORE_UNLOCK(adapter); + case SIOCGIFMEDIA: + IOCTL_DEBUGOUT("ioctl rcv'd: \ + SIOCxIFMEDIA (Get/Set Interface Media)"); + error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); + break; + case SIOCSIFCAP: + { + int mask, reinit; + + IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); + reinit = 0; + mask = ifr->ifr_reqcap ^ ifp->if_capenable; +#ifdef DEVICE_POLLING + if (mask & IFCAP_POLLING) { + if (ifr->ifr_reqcap & IFCAP_POLLING) { + error = ether_poll_register(igb_poll, ifp); + if (error) + return (error); + IGB_CORE_LOCK(adapter); + igb_disable_intr(adapter); + ifp->if_capenable |= IFCAP_POLLING; + IGB_CORE_UNLOCK(adapter); + } else { + error = ether_poll_deregister(ifp); + /* Enable interrupt even in error case */ + IGB_CORE_LOCK(adapter); + igb_enable_intr(adapter); + ifp->if_capenable &= ~IFCAP_POLLING; + IGB_CORE_UNLOCK(adapter); + } + } +#endif + if (mask & IFCAP_HWCSUM) { + ifp->if_capenable ^= IFCAP_HWCSUM; + reinit = 1; + } + if (mask & IFCAP_TSO4) { + ifp->if_capenable ^= IFCAP_TSO4; + reinit = 1; + } + if (mask & IFCAP_VLAN_HWTAGGING) { + ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; + reinit = 1; + } + if (mask & IFCAP_VLAN_HWFILTER) { + ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; + reinit = 1; + } + if (mask & IFCAP_LRO) { + ifp->if_capenable ^= IFCAP_LRO; + reinit = 1; + } + if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) + igb_init(adapter); + VLAN_CAPABILITIES(ifp); + break; + } + + default: + error = ether_ioctl(ifp, command, data); + break; + } + + return (error); +} + + +/********************************************************************* + * Init entry point + * + * This routine is used in two ways. It is used by the stack as + * init entry point in network interface structure. It is also used + * by the driver as a hw/sw initialization routine to get to a + * consistent state. + * + * return 0 on success, positive on failure + **********************************************************************/ + +static void +igb_init_locked(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + device_t dev = adapter->dev; + + INIT_DEBUGOUT("igb_init: begin"); + + IGB_CORE_LOCK_ASSERT(adapter); + + igb_disable_intr(adapter); + callout_stop(&adapter->timer); + + /* Get the latest mac address, User can use a LAA */ + bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, + ETHER_ADDR_LEN); + + /* Put the address into the Receive Address Array */ + e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + igb_reset(adapter); + igb_update_link_status(adapter); + + E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); + + /* Set hardware offload abilities */ + ifp->if_hwassist = 0; + if (ifp->if_capenable & IFCAP_TXCSUM) { + ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); +#if __FreeBSD_version >= 800000 + if (adapter->hw.mac.type == e1000_82576) + ifp->if_hwassist |= CSUM_SCTP; +#endif + } + + if (ifp->if_capenable & IFCAP_TSO4) + ifp->if_hwassist |= CSUM_TSO; + + /* Configure for OS presence */ + igb_init_manageability(adapter); + + /* Prepare transmit descriptors and buffers */ + igb_setup_transmit_structures(adapter); + igb_initialize_transmit_units(adapter); + + /* Setup Multicast table */ + igb_set_multi(adapter); + + /* + ** Figure out the desired mbuf pool + ** for doing jumbo/packetsplit + */ + if (adapter->max_frame_size <= 2048) + adapter->rx_mbuf_sz = MCLBYTES; + else if (adapter->max_frame_size <= 4096) + adapter->rx_mbuf_sz = MJUMPAGESIZE; + else + adapter->rx_mbuf_sz = MJUM9BYTES; + + /* Prepare receive descriptors and buffers */ + if (igb_setup_receive_structures(adapter)) { + device_printf(dev, "Could not setup receive structures\n"); + return; + } + igb_initialize_receive_units(adapter); + + /* Enable VLAN support */ + if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) + igb_setup_vlan_hw_support(adapter); + + /* Don't lose promiscuous settings */ + igb_set_promisc(adapter); + + ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + + callout_reset(&adapter->timer, hz, igb_local_timer, adapter); + e1000_clear_hw_cntrs_base_generic(&adapter->hw); + + if (adapter->msix > 1) /* Set up queue routing */ + igb_configure_queues(adapter); + + /* this clears any pending interrupts */ + E1000_READ_REG(&adapter->hw, E1000_ICR); +#ifdef DEVICE_POLLING + /* + * Only enable interrupts if we are not polling, make sure + * they are off otherwise. + */ + if (ifp->if_capenable & IFCAP_POLLING) + igb_disable_intr(adapter); + else +#endif /* DEVICE_POLLING */ + { + igb_enable_intr(adapter); + E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); + } + + /* Set Energy Efficient Ethernet */ + e1000_set_eee_i350(&adapter->hw); + + /* Don't reset the phy next time init gets called */ + adapter->hw.phy.reset_disable = TRUE; +} + +static void +igb_init(void *arg) +{ + struct adapter *adapter = arg; + + IGB_CORE_LOCK(adapter); + igb_init_locked(adapter); + IGB_CORE_UNLOCK(adapter); +} + + +static void +igb_handle_que(void *context, int pending) +{ + struct igb_queue *que = context; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; + struct ifnet *ifp = adapter->ifp; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + bool more; + + more = igb_rxeof(que, -1, NULL); + + IGB_TX_LOCK(txr); + if (igb_txeof(txr)) + more = TRUE; +#if __FreeBSD_version >= 800000 + if (!drbr_empty(ifp, txr->br)) + igb_mq_start_locked(ifp, txr, NULL); +#else + igb_start_locked(txr, ifp); +#endif + IGB_TX_UNLOCK(txr); + if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) { + taskqueue_enqueue(que->tq, &que->que_task); + return; + } + } + +#ifdef DEVICE_POLLING + if (ifp->if_capenable & IFCAP_POLLING) + return; +#endif + /* Reenable this interrupt */ + if (que->eims) + E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); + else + igb_enable_intr(adapter); +} + +/* Deal with link in a sleepable context */ +static void +igb_handle_link(void *context, int pending) +{ + struct adapter *adapter = context; + + adapter->hw.mac.get_link_status = 1; + igb_update_link_status(adapter); +} + +/********************************************************************* + * + * MSI/Legacy Deferred + * Interrupt Service routine + * + *********************************************************************/ +static int +igb_irq_fast(void *arg) +{ + struct adapter *adapter = arg; + struct igb_queue *que = adapter->queues; + u32 reg_icr; + + + reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); + + /* Hot eject? */ + if (reg_icr == 0xffffffff) + return FILTER_STRAY; + + /* Definitely not our interrupt. */ + if (reg_icr == 0x0) + return FILTER_STRAY; + + if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) + return FILTER_STRAY; + + /* + * Mask interrupts until the taskqueue is finished running. This is + * cheap, just assume that it is needed. This also works around the + * MSI message reordering errata on certain systems. + */ + igb_disable_intr(adapter); + taskqueue_enqueue(que->tq, &que->que_task); + + /* Link status change */ + if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) + taskqueue_enqueue(que->tq, &adapter->link_task); + + if (reg_icr & E1000_ICR_RXO) + adapter->rx_overruns++; + return FILTER_HANDLED; +} + +#ifdef DEVICE_POLLING +/********************************************************************* + * + * Legacy polling routine : if using this code you MUST be sure that + * multiqueue is not defined, ie, set igb_num_queues to 1. + * + *********************************************************************/ +#if __FreeBSD_version >= 800000 +#define POLL_RETURN_COUNT(a) (a) +static int +#else +#define POLL_RETURN_COUNT(a) +static void +#endif +igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) +{ + struct adapter *adapter = ifp->if_softc; + struct igb_queue *que = adapter->queues; + struct tx_ring *txr = adapter->tx_rings; + u32 reg_icr, rx_done = 0; + u32 loop = IGB_MAX_LOOP; + bool more; + + IGB_CORE_LOCK(adapter); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { + IGB_CORE_UNLOCK(adapter); + return POLL_RETURN_COUNT(rx_done); + } + + if (cmd == POLL_AND_CHECK_STATUS) { + reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); + /* Link status change */ + if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) + igb_handle_link(adapter, 0); + + if (reg_icr & E1000_ICR_RXO) + adapter->rx_overruns++; + } + IGB_CORE_UNLOCK(adapter); + + igb_rxeof(que, count, &rx_done); + + IGB_TX_LOCK(txr); + do { + more = igb_txeof(txr); + } while (loop-- && more); +#if __FreeBSD_version >= 800000 + if (!drbr_empty(ifp, txr->br)) + igb_mq_start_locked(ifp, txr, NULL); +#else + igb_start_locked(txr, ifp); +#endif + IGB_TX_UNLOCK(txr); + return POLL_RETURN_COUNT(rx_done); +} +#endif /* DEVICE_POLLING */ + +/********************************************************************* + * + * MSIX TX Interrupt Service routine + * + **********************************************************************/ +static void +igb_msix_que(void *arg) +{ + struct igb_queue *que = arg; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; + struct rx_ring *rxr = que->rxr; + u32 newitr = 0; + bool more_tx, more_rx; + + E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims); + ++que->irqs; + + IGB_TX_LOCK(txr); + more_tx = igb_txeof(txr); + IGB_TX_UNLOCK(txr); + + more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL); + + if (igb_enable_aim == FALSE) + goto no_calc; + /* + ** Do Adaptive Interrupt Moderation: + ** - Write out last calculated setting + ** - Calculate based on average size over + ** the last interval. + */ + if (que->eitr_setting) + E1000_WRITE_REG(&adapter->hw, + E1000_EITR(que->msix), que->eitr_setting); + + que->eitr_setting = 0; + + /* Idle, do nothing */ + if ((txr->bytes == 0) && (rxr->bytes == 0)) + goto no_calc; + + /* Used half Default if sub-gig */ + if (adapter->link_speed != 1000) + newitr = IGB_DEFAULT_ITR / 2; + else { + if ((txr->bytes) && (txr->packets)) + newitr = txr->bytes/txr->packets; + if ((rxr->bytes) && (rxr->packets)) + newitr = max(newitr, + (rxr->bytes / rxr->packets)); + newitr += 24; /* account for hardware frame, crc */ + /* set an upper boundary */ + newitr = min(newitr, 3000); + /* Be nice to the mid range */ + if ((newitr > 300) && (newitr < 1200)) + newitr = (newitr / 3); + else + newitr = (newitr / 2); + } + newitr &= 0x7FFC; /* Mask invalid bits */ + if (adapter->hw.mac.type == e1000_82575) + newitr |= newitr << 16; + else + newitr |= E1000_EITR_CNT_IGNR; + + /* save for next interrupt */ + que->eitr_setting = newitr; + + /* Reset state */ + txr->bytes = 0; + txr->packets = 0; + rxr->bytes = 0; + rxr->packets = 0; + +no_calc: + /* Schedule a clean task if needed*/ + if (more_tx || more_rx || + (adapter->ifp->if_drv_flags & IFF_DRV_OACTIVE)) + taskqueue_enqueue(que->tq, &que->que_task); + else + /* Reenable this interrupt */ + E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); + return; +} + + +/********************************************************************* + * + * MSIX Link Interrupt Service routine + * + **********************************************************************/ + +static void +igb_msix_link(void *arg) +{ + struct adapter *adapter = arg; + u32 icr; + + ++adapter->link_irq; + icr = E1000_READ_REG(&adapter->hw, E1000_ICR); + if (!(icr & E1000_ICR_LSC)) + goto spurious; + igb_handle_link(adapter, 0); + +spurious: + /* Rearm */ + E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); + E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask); + return; +} + + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called whenever the user queries the status of + * the interface using ifconfig. + * + **********************************************************************/ +static void +igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) +{ + struct adapter *adapter = ifp->if_softc; + u_char fiber_type = IFM_1000_SX; + + INIT_DEBUGOUT("igb_media_status: begin"); + + IGB_CORE_LOCK(adapter); + igb_update_link_status(adapter); + + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!adapter->link_active) { + IGB_CORE_UNLOCK(adapter); + return; + } + + ifmr->ifm_status |= IFM_ACTIVE; + + if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || + (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) + ifmr->ifm_active |= fiber_type | IFM_FDX; + else { + switch (adapter->link_speed) { + case 10: + ifmr->ifm_active |= IFM_10_T; + break; + case 100: + ifmr->ifm_active |= IFM_100_TX; + break; + case 1000: + ifmr->ifm_active |= IFM_1000_T; + break; + } + if (adapter->link_duplex == FULL_DUPLEX) + ifmr->ifm_active |= IFM_FDX; + else + ifmr->ifm_active |= IFM_HDX; + } + IGB_CORE_UNLOCK(adapter); +} + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called when the user changes speed/duplex using + * media/mediopt option with ifconfig. + * + **********************************************************************/ +static int +igb_media_change(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct ifmedia *ifm = &adapter->media; + + INIT_DEBUGOUT("igb_media_change: begin"); + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + IGB_CORE_LOCK(adapter); + switch (IFM_SUBTYPE(ifm->ifm_media)) { + case IFM_AUTO: + adapter->hw.mac.autoneg = DO_AUTO_NEG; + adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; + break; + case IFM_1000_LX: + case IFM_1000_SX: + case IFM_1000_T: + adapter->hw.mac.autoneg = DO_AUTO_NEG; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case IFM_100_TX: + adapter->hw.mac.autoneg = FALSE; + adapter->hw.phy.autoneg_advertised = 0; + if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) + adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; + else + adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; + break; + case IFM_10_T: + adapter->hw.mac.autoneg = FALSE; + adapter->hw.phy.autoneg_advertised = 0; + if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) + adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; + else + adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; + break; + default: + device_printf(adapter->dev, "Unsupported media type\n"); + } + + igb_init_locked(adapter); + IGB_CORE_UNLOCK(adapter); + + return (0); +} + + +/********************************************************************* + * + * This routine maps the mbufs to Advanced TX descriptors. + * used by the 82575 adapter. + * + **********************************************************************/ + +static int +igb_xmit(struct tx_ring *txr, struct mbuf **m_headp) +{ + struct adapter *adapter = txr->adapter; + bus_dma_segment_t segs[IGB_MAX_SCATTER]; + bus_dmamap_t map; + struct igb_tx_buffer *tx_buffer, *tx_buffer_mapped; + union e1000_adv_tx_desc *txd = NULL; + struct mbuf *m_head; + u32 olinfo_status = 0, cmd_type_len = 0; + int nsegs, i, j, error, first, last = 0; + u32 hdrlen = 0; + + m_head = *m_headp; + + + /* Set basic descriptor constants */ + cmd_type_len |= E1000_ADVTXD_DTYP_DATA; + cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; + if (m_head->m_flags & M_VLANTAG) + cmd_type_len |= E1000_ADVTXD_DCMD_VLE; + + /* + * Map the packet for DMA. + * + * Capture the first descriptor index, + * this descriptor will have the index + * of the EOP which is the only one that + * now gets a DONE bit writeback. + */ + first = txr->next_avail_desc; + tx_buffer = &txr->tx_buffers[first]; + tx_buffer_mapped = tx_buffer; + map = tx_buffer->map; + + error = bus_dmamap_load_mbuf_sg(txr->txtag, map, + *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); + + if (error == EFBIG) { + struct mbuf *m; + + m = m_defrag(*m_headp, M_DONTWAIT); + if (m == NULL) { + adapter->mbuf_defrag_failed++; + m_freem(*m_headp); + *m_headp = NULL; + return (ENOBUFS); + } + *m_headp = m; + + /* Try it again */ + error = bus_dmamap_load_mbuf_sg(txr->txtag, map, + *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); + + if (error == ENOMEM) { + adapter->no_tx_dma_setup++; + return (error); + } else if (error != 0) { + adapter->no_tx_dma_setup++; + m_freem(*m_headp); + *m_headp = NULL; + return (error); + } + } else if (error == ENOMEM) { + adapter->no_tx_dma_setup++; + return (error); + } else if (error != 0) { + adapter->no_tx_dma_setup++; + m_freem(*m_headp); + *m_headp = NULL; + return (error); + } + + /* Check again to be sure we have enough descriptors */ + if (nsegs > (txr->tx_avail - 2)) { + txr->no_desc_avail++; + bus_dmamap_unload(txr->txtag, map); + return (ENOBUFS); + } + m_head = *m_headp; + + /* + * Set up the context descriptor: + * used when any hardware offload is done. + * This includes CSUM, VLAN, and TSO. It + * will use the first descriptor. + */ + if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { + if (igb_tso_setup(txr, m_head, &hdrlen)) { + cmd_type_len |= E1000_ADVTXD_DCMD_TSE; + olinfo_status |= E1000_TXD_POPTS_IXSM << 8; + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + } else + return (ENXIO); + } else if (igb_tx_ctx_setup(txr, m_head)) + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + + /* Calculate payload length */ + olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) + << E1000_ADVTXD_PAYLEN_SHIFT); + + /* 82575 needs the queue index added */ + if (adapter->hw.mac.type == e1000_82575) + olinfo_status |= txr->me << 4; + + /* Set up our transmit descriptors */ + i = txr->next_avail_desc; + for (j = 0; j < nsegs; j++) { + bus_size_t seg_len; + bus_addr_t seg_addr; + + tx_buffer = &txr->tx_buffers[i]; + txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; + seg_addr = segs[j].ds_addr; + seg_len = segs[j].ds_len; + + txd->read.buffer_addr = htole64(seg_addr); + txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); + txd->read.olinfo_status = htole32(olinfo_status); + last = i; + if (++i == adapter->num_tx_desc) + i = 0; + tx_buffer->m_head = NULL; + tx_buffer->next_eop = -1; + } + + txr->next_avail_desc = i; + txr->tx_avail -= nsegs; + + tx_buffer->m_head = m_head; + tx_buffer_mapped->map = tx_buffer->map; + tx_buffer->map = map; + bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); + + /* + * Last Descriptor of Packet + * needs End Of Packet (EOP) + * and Report Status (RS) + */ + txd->read.cmd_type_len |= + htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS); + /* + * Keep track in the first buffer which + * descriptor will be written back + */ + tx_buffer = &txr->tx_buffers[first]; + tx_buffer->next_eop = last; + txr->watchdog_time = ticks; + + /* + * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 + * that this frame is available to transmit. + */ + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); + ++txr->tx_packets; + + return (0); + +} + +static void +igb_set_promisc(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + if (adapter->vf_ifp) { + e1000_promisc_set_vf(hw, e1000_promisc_enabled); + return; + } + + reg = E1000_READ_REG(hw, E1000_RCTL); + if (ifp->if_flags & IFF_PROMISC) { + reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, reg); + } else if (ifp->if_flags & IFF_ALLMULTI) { + reg |= E1000_RCTL_MPE; + reg &= ~E1000_RCTL_UPE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); + } +} + +static void +igb_disable_promisc(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 reg; + + if (adapter->vf_ifp) { + e1000_promisc_set_vf(hw, e1000_promisc_disabled); + return; + } + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= (~E1000_RCTL_UPE); + reg &= (~E1000_RCTL_MPE); + E1000_WRITE_REG(hw, E1000_RCTL, reg); +} + + +/********************************************************************* + * Multicast Update + * + * This routine is called whenever multicast address list is updated. + * + **********************************************************************/ + +static void +igb_set_multi(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + struct ifmultiaddr *ifma; + u32 reg_rctl = 0; + u8 *mta; + + int mcnt = 0; + + IOCTL_DEBUGOUT("igb_set_multi: begin"); + + mta = adapter->mta; + bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN * + MAX_NUM_MULTICAST_ADDRESSES); + +#if __FreeBSD_version < 800000 + IF_ADDR_LOCK(ifp); +#else + if_maddr_rlock(ifp); +#endif + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + + if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) + break; + + bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), + &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); + mcnt++; + } +#if __FreeBSD_version < 800000 + IF_ADDR_UNLOCK(ifp); +#else + if_maddr_runlock(ifp); +#endif + + if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { + reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); + reg_rctl |= E1000_RCTL_MPE; + E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); + } else + e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); +} + + +/********************************************************************* + * Timer routine: + * This routine checks for link status, + * updates statistics, and does the watchdog. + * + **********************************************************************/ + +static void +igb_local_timer(void *arg) +{ + struct adapter *adapter = arg; + device_t dev = adapter->dev; + struct tx_ring *txr = adapter->tx_rings; + + + IGB_CORE_LOCK_ASSERT(adapter); + + igb_update_link_status(adapter); + igb_update_stats_counters(adapter); + + /* + ** If flow control has paused us since last checking + ** it invalidates the watchdog timing, so dont run it. + */ + if (adapter->pause_frames) { + adapter->pause_frames = 0; + goto out; + } + + /* + ** Watchdog: check for time since any descriptor was cleaned + */ + for (int i = 0; i < adapter->num_queues; i++, txr++) + if (txr->queue_status == IGB_QUEUE_HUNG) + goto timeout; +out: + callout_reset(&adapter->timer, hz, igb_local_timer, adapter); +#ifndef DEVICE_POLLING + /* Schedule all queue interrupts - deadlock protection */ + E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask); +#endif + return; + +timeout: + device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); + device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, + E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)), + E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me))); + device_printf(dev,"TX(%d) desc avail = %d," + "Next TX to Clean = %d\n", + txr->me, txr->tx_avail, txr->next_to_clean); + adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + adapter->watchdog_events++; + igb_init_locked(adapter); +} + +static void +igb_update_link_status(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct ifnet *ifp = adapter->ifp; + device_t dev = adapter->dev; + struct tx_ring *txr = adapter->tx_rings; + u32 link_check, thstat, ctrl; + + link_check = thstat = ctrl = 0; + + /* Get the cached link value or read for real */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + /* Do the work to read phy */ + e1000_check_for_link(hw); + link_check = !hw->mac.get_link_status; + } else + link_check = TRUE; + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_check = (E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_check = adapter->hw.mac.serdes_has_link; + break; + /* VF device is type_unknown */ + case e1000_media_type_unknown: + e1000_check_for_link(hw); + link_check = !hw->mac.get_link_status; + /* Fall thru */ + default: + break; + } + + /* Check for thermal downshift or shutdown */ + if (hw->mac.type == e1000_i350) { + thstat = E1000_READ_REG(hw, E1000_THSTAT); + ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT); + } + + /* Now we check if a transition has happened */ + if (link_check && (adapter->link_active == 0)) { + e1000_get_speed_and_duplex(&adapter->hw, + &adapter->link_speed, &adapter->link_duplex); + if (bootverbose) + device_printf(dev, "Link is up %d Mbps %s\n", + adapter->link_speed, + ((adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex")); + adapter->link_active = 1; + ifp->if_baudrate = adapter->link_speed * 1000000; + if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && + (thstat & E1000_THSTAT_LINK_THROTTLE)) + device_printf(dev, "Link: thermal downshift\n"); + /* This can sleep */ + if_link_state_change(ifp, LINK_STATE_UP); + } else if (!link_check && (adapter->link_active == 1)) { + ifp->if_baudrate = adapter->link_speed = 0; + adapter->link_duplex = 0; + if (bootverbose) + device_printf(dev, "Link is Down\n"); + if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) && + (thstat & E1000_THSTAT_PWR_DOWN)) + device_printf(dev, "Link: thermal shutdown\n"); + adapter->link_active = 0; + /* This can sleep */ + if_link_state_change(ifp, LINK_STATE_DOWN); + /* Turn off watchdogs */ + for (int i = 0; i < adapter->num_queues; i++, txr++) + txr->queue_status = IGB_QUEUE_IDLE; + } +} + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC and deallocates TX/RX buffers. + * + **********************************************************************/ + +static void +igb_stop(void *arg) +{ + struct adapter *adapter = arg; + struct ifnet *ifp = adapter->ifp; + struct tx_ring *txr = adapter->tx_rings; + + IGB_CORE_LOCK_ASSERT(adapter); + + INIT_DEBUGOUT("igb_stop: begin"); + + igb_disable_intr(adapter); + + callout_stop(&adapter->timer); + + /* Tell the stack that the interface is no longer active */ + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + + /* Unarm watchdog timer. */ + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IGB_TX_LOCK(txr); + txr->queue_status = IGB_QUEUE_IDLE; + IGB_TX_UNLOCK(txr); + } + + e1000_reset_hw(&adapter->hw); + E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); + + e1000_led_off(&adapter->hw); + e1000_cleanup_led(&adapter->hw); +} + + +/********************************************************************* + * + * Determine hardware revision. + * + **********************************************************************/ +static void +igb_identify_hardware(struct adapter *adapter) +{ + device_t dev = adapter->dev; + + /* Make sure our PCI config space has the necessary stuff set */ + adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); + if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && + (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) { + INIT_DEBUGOUT("Memory Access and/or Bus Master " + "bits were not set!\n"); + adapter->hw.bus.pci_cmd_word |= + (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); + pci_write_config(dev, PCIR_COMMAND, + adapter->hw.bus.pci_cmd_word, 2); + } + + /* Save off the information about this board */ + adapter->hw.vendor_id = pci_get_vendor(dev); + adapter->hw.device_id = pci_get_device(dev); + adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); + adapter->hw.subsystem_vendor_id = + pci_read_config(dev, PCIR_SUBVEND_0, 2); + adapter->hw.subsystem_device_id = + pci_read_config(dev, PCIR_SUBDEV_0, 2); + + /* Set MAC type early for PCI setup */ + e1000_set_mac_type(&adapter->hw); + + /* Are we a VF device? */ + if ((adapter->hw.mac.type == e1000_vfadapt) || + (adapter->hw.mac.type == e1000_vfadapt_i350)) + adapter->vf_ifp = 1; + else + adapter->vf_ifp = 0; +} + +static int +igb_allocate_pci_resources(struct adapter *adapter) +{ + device_t dev = adapter->dev; + int rid; + + rid = PCIR_BAR(0); + adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); + if (adapter->pci_mem == NULL) { + device_printf(dev, "Unable to allocate bus resource: memory\n"); + return (ENXIO); + } + adapter->osdep.mem_bus_space_tag = + rman_get_bustag(adapter->pci_mem); + adapter->osdep.mem_bus_space_handle = + rman_get_bushandle(adapter->pci_mem); + adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; + + adapter->num_queues = 1; /* Defaults for Legacy or MSI */ + + /* This will setup either MSI/X or MSI */ + adapter->msix = igb_setup_msix(adapter); + adapter->hw.back = &adapter->osdep; + + return (0); +} + +/********************************************************************* + * + * Setup the Legacy or MSI Interrupt handler + * + **********************************************************************/ +static int +igb_allocate_legacy(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct igb_queue *que = adapter->queues; + int error, rid = 0; + + /* Turn off all interrupts */ + E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); + + /* MSI RID is 1 */ + if (adapter->msix == 1) + rid = 1; + + /* We allocate a single interrupt resource */ + adapter->res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); + if (adapter->res == NULL) { + device_printf(dev, "Unable to allocate bus resource: " + "interrupt\n"); + return (ENXIO); + } + + /* + * Try allocating a fast interrupt and the associated deferred + * processing contexts. + */ + TASK_INIT(&que->que_task, 0, igb_handle_que, que); + /* Make tasklet for deferred link handling */ + TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter); + que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT, + taskqueue_thread_enqueue, &que->tq); + taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq", + device_get_nameunit(adapter->dev)); + if ((error = bus_setup_intr(dev, adapter->res, + INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL, + adapter, &adapter->tag)) != 0) { + device_printf(dev, "Failed to register fast interrupt " + "handler: %d\n", error); + taskqueue_free(que->tq); + que->tq = NULL; + return (error); + } + + return (0); +} + + +/********************************************************************* + * + * Setup the MSIX Queue Interrupt handlers: + * + **********************************************************************/ +static int +igb_allocate_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct igb_queue *que = adapter->queues; + int error, rid, vector = 0; + + + for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { + rid = vector +1; + que->res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); + if (que->res == NULL) { + device_printf(dev, + "Unable to allocate bus resource: " + "MSIX Queue Interrupt\n"); + return (ENXIO); + } + error = bus_setup_intr(dev, que->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + igb_msix_que, que, &que->tag); + if (error) { + que->res = NULL; + device_printf(dev, "Failed to register Queue handler"); + return (error); + } +#if __FreeBSD_version >= 800504 + bus_describe_intr(dev, que->res, que->tag, "que %d", i); +#endif + que->msix = vector; + if (adapter->hw.mac.type == e1000_82575) + que->eims = E1000_EICR_TX_QUEUE0 << i; + else + que->eims = 1 << vector; + /* + ** Bind the msix vector, and thus the + ** rings to the corresponding cpu. + */ + if (adapter->num_queues > 1) + bus_bind_intr(dev, que->res, i); + /* Make tasklet for deferred handling */ + TASK_INIT(&que->que_task, 0, igb_handle_que, que); + que->tq = taskqueue_create_fast("igb_que", M_NOWAIT, + taskqueue_thread_enqueue, &que->tq); + taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", + device_get_nameunit(adapter->dev)); + } + + /* And Link */ + rid = vector + 1; + adapter->res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); + if (adapter->res == NULL) { + device_printf(dev, + "Unable to allocate bus resource: " + "MSIX Link Interrupt\n"); + return (ENXIO); + } + if ((error = bus_setup_intr(dev, adapter->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + igb_msix_link, adapter, &adapter->tag)) != 0) { + device_printf(dev, "Failed to register Link handler"); + return (error); + } +#if __FreeBSD_version >= 800504 + bus_describe_intr(dev, adapter->res, adapter->tag, "link"); +#endif + adapter->linkvec = vector; + + return (0); +} + + +static void +igb_configure_queues(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igb_queue *que; + u32 tmp, ivar = 0, newitr = 0; + + /* First turn on RSS capability */ + if (adapter->hw.mac.type != e1000_82575) + E1000_WRITE_REG(hw, E1000_GPIE, + E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME | + E1000_GPIE_PBA | E1000_GPIE_NSICR); + + /* Turn on MSIX */ + switch (adapter->hw.mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_vfadapt: + case e1000_vfadapt_i350: + /* RX entries */ + for (int i = 0; i < adapter->num_queues; i++) { + u32 index = i >> 1; + ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + que = &adapter->queues[i]; + if (i & 1) { + ivar &= 0xFF00FFFF; + ivar |= (que->msix | E1000_IVAR_VALID) << 16; + } else { + ivar &= 0xFFFFFF00; + ivar |= que->msix | E1000_IVAR_VALID; + } + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); + } + /* TX entries */ + for (int i = 0; i < adapter->num_queues; i++) { + u32 index = i >> 1; + ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + que = &adapter->queues[i]; + if (i & 1) { + ivar &= 0x00FFFFFF; + ivar |= (que->msix | E1000_IVAR_VALID) << 24; + } else { + ivar &= 0xFFFF00FF; + ivar |= (que->msix | E1000_IVAR_VALID) << 8; + } + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); + adapter->que_mask |= que->eims; + } + + /* And for the link interrupt */ + ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; + adapter->link_mask = 1 << adapter->linkvec; + E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); + break; + case e1000_82576: + /* RX entries */ + for (int i = 0; i < adapter->num_queues; i++) { + u32 index = i & 0x7; /* Each IVAR has two entries */ + ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + que = &adapter->queues[i]; + if (i < 8) { + ivar &= 0xFFFFFF00; + ivar |= que->msix | E1000_IVAR_VALID; + } else { + ivar &= 0xFF00FFFF; + ivar |= (que->msix | E1000_IVAR_VALID) << 16; + } + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); + adapter->que_mask |= que->eims; + } + /* TX entries */ + for (int i = 0; i < adapter->num_queues; i++) { + u32 index = i & 0x7; /* Each IVAR has two entries */ + ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); + que = &adapter->queues[i]; + if (i < 8) { + ivar &= 0xFFFF00FF; + ivar |= (que->msix | E1000_IVAR_VALID) << 8; + } else { + ivar &= 0x00FFFFFF; + ivar |= (que->msix | E1000_IVAR_VALID) << 24; + } + E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); + adapter->que_mask |= que->eims; + } + + /* And for the link interrupt */ + ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; + adapter->link_mask = 1 << adapter->linkvec; + E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); + break; + + case e1000_82575: + /* enable MSI-X support*/ + tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); + tmp |= E1000_CTRL_EXT_PBA_CLR; + /* Auto-Mask interrupts upon ICR read. */ + tmp |= E1000_CTRL_EXT_EIAME; + tmp |= E1000_CTRL_EXT_IRCA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); + + /* Queues */ + for (int i = 0; i < adapter->num_queues; i++) { + que = &adapter->queues[i]; + tmp = E1000_EICR_RX_QUEUE0 << i; + tmp |= E1000_EICR_TX_QUEUE0 << i; + que->eims = tmp; + E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), + i, que->eims); + adapter->que_mask |= que->eims; + } + + /* Link */ + E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec), + E1000_EIMS_OTHER); + adapter->link_mask |= E1000_EIMS_OTHER; + default: + break; + } + + /* Set the starting interrupt rate */ + if (igb_max_interrupt_rate > 0) + newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC; + + if (hw->mac.type == e1000_82575) + newitr |= newitr << 16; + else + newitr |= E1000_EITR_CNT_IGNR; + + for (int i = 0; i < adapter->num_queues; i++) { + que = &adapter->queues[i]; + E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr); + } + + return; +} + + +static void +igb_free_pci_resources(struct adapter *adapter) +{ + struct igb_queue *que = adapter->queues; + device_t dev = adapter->dev; + int rid; + + /* + ** There is a slight possibility of a failure mode + ** in attach that will result in entering this function + ** before interrupt resources have been initialized, and + ** in that case we do not want to execute the loops below + ** We can detect this reliably by the state of the adapter + ** res pointer. + */ + if (adapter->res == NULL) + goto mem; + + /* + * First release all the interrupt resources: + */ + for (int i = 0; i < adapter->num_queues; i++, que++) { + rid = que->msix + 1; + if (que->tag != NULL) { + bus_teardown_intr(dev, que->res, que->tag); + que->tag = NULL; + } + if (que->res != NULL) + bus_release_resource(dev, + SYS_RES_IRQ, rid, que->res); + } + + /* Clean the Legacy or Link interrupt last */ + if (adapter->linkvec) /* we are doing MSIX */ + rid = adapter->linkvec + 1; + else + (adapter->msix != 0) ? (rid = 1):(rid = 0); + + if (adapter->tag != NULL) { + bus_teardown_intr(dev, adapter->res, adapter->tag); + adapter->tag = NULL; + } + if (adapter->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); + +mem: + if (adapter->msix) + pci_release_msi(dev); + + if (adapter->msix_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); + + if (adapter->pci_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(0), adapter->pci_mem); + +} + +/* + * Setup Either MSI/X or MSI + */ +static int +igb_setup_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + int rid, want, queues, msgs; + + /* tuneable override */ + if (igb_enable_msix == 0) + goto msi; + + /* First try MSI/X */ + rid = PCIR_BAR(IGB_MSIX_BAR); + adapter->msix_mem = bus_alloc_resource_any(dev, + SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (!adapter->msix_mem) { + /* May not be enabled */ + device_printf(adapter->dev, + "Unable to map MSIX table \n"); + goto msi; + } + + msgs = pci_msix_count(dev); + if (msgs == 0) { /* system has msix disabled */ + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); + adapter->msix_mem = NULL; + goto msi; + } + + /* Figure out a reasonable auto config value */ + queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; + + /* Manual override */ + if (igb_num_queues != 0) + queues = igb_num_queues; + if (queues > 8) /* max queues */ + queues = 8; + + /* Can have max of 4 queues on 82575 */ + if ((adapter->hw.mac.type == e1000_82575) && (queues > 4)) + queues = 4; + + /* Limit the VF devices to one queue */ + if (adapter->vf_ifp) + queues = 1; + + /* + ** One vector (RX/TX pair) per queue + ** plus an additional for Link interrupt + */ + want = queues + 1; + if (msgs >= want) + msgs = want; + else { + device_printf(adapter->dev, + "MSIX Configuration Problem, " + "%d vectors configured, but %d queues wanted!\n", + msgs, want); + return (ENXIO); + } + if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) { + device_printf(adapter->dev, + "Using MSIX interrupts with %d vectors\n", msgs); + adapter->num_queues = queues; + return (msgs); + } +msi: + msgs = pci_msi_count(dev); + if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0) + device_printf(adapter->dev,"Using MSI interrupt\n"); + return (msgs); +} + +/********************************************************************* + * + * Set up an fresh starting state + * + **********************************************************************/ +static void +igb_reset(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_fc_info *fc = &hw->fc; + struct ifnet *ifp = adapter->ifp; + u32 pba = 0; + u16 hwm; + + INIT_DEBUGOUT("igb_reset: begin"); + + /* Let the firmware know the OS is in control */ + igb_get_hw_control(adapter); + + /* + * Packet Buffer Allocation (PBA) + * Writing PBA sets the receive portion of the buffer + * the remainder is used for the transmit buffer. + */ + switch (hw->mac.type) { + case e1000_82575: + pba = E1000_PBA_32K; + break; + case e1000_82576: + case e1000_vfadapt: + pba = E1000_READ_REG(hw, E1000_RXPBS); + pba &= E1000_RXPBS_SIZE_MASK_82576; + break; + case e1000_82580: + case e1000_i350: + case e1000_vfadapt_i350: + pba = E1000_READ_REG(hw, E1000_RXPBS); + pba = e1000_rxpbs_adjust_82580(pba); + break; + pba = E1000_PBA_35K; + default: + break; + } + + /* Special needs in case of Jumbo frames */ + if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) { + u32 tx_space, min_tx, min_rx; + pba = E1000_READ_REG(hw, E1000_PBA); + tx_space = pba >> 16; + pba &= 0xffff; + min_tx = (adapter->max_frame_size + + sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2; + min_tx = roundup2(min_tx, 1024); + min_tx >>= 10; + min_rx = adapter->max_frame_size; + min_rx = roundup2(min_rx, 1024); + min_rx >>= 10; + if (tx_space < min_tx && + ((min_tx - tx_space) < pba)) { + pba = pba - (min_tx - tx_space); + /* + * if short on rx space, rx wins + * and must trump tx adjustment + */ + if (pba < min_rx) + pba = min_rx; + } + E1000_WRITE_REG(hw, E1000_PBA, pba); + } + + INIT_DEBUGOUT1("igb_init: pba=%dK",pba); + + /* + * These parameters control the automatic generation (Tx) and + * response (Rx) to Ethernet PAUSE frames. + * - High water mark should allow for at least two frames to be + * received after sending an XOFF. + * - Low water mark works best when it is very near the high water mark. + * This allows the receiver to restart by sending XON when it has + * drained a bit. + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - 2 * adapter->max_frame_size)); + + if (hw->mac.type < e1000_82576) { + fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + } else { + fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ + fc->low_water = fc->high_water - 16; + } + + fc->pause_time = IGB_FC_PAUSE_TIME; + fc->send_xon = TRUE; + + /* Issue a global reset */ + e1000_reset_hw(hw); + E1000_WRITE_REG(hw, E1000_WUC, 0); + + if (e1000_init_hw(hw) < 0) + device_printf(dev, "Hardware Initialization Failed\n"); + + /* Setup DMA Coalescing */ + if ((hw->mac.type == e1000_i350) && + (adapter->dma_coalesce == TRUE)) { + u32 reg; + + hwm = (pba - 4) << 10; + reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT) + & E1000_DMACR_DMACTHR_MASK); + + /* transition to L0x or L1 if available..*/ + reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK); + + /* timer = +-1000 usec in 32usec intervals */ + reg |= (1000 >> 5); + E1000_WRITE_REG(hw, E1000_DMACR, reg); + + /* No lower threshold */ + E1000_WRITE_REG(hw, E1000_DMCRTRH, 0); + + /* set hwm to PBA - 2 * max frame size */ + E1000_WRITE_REG(hw, E1000_FCRTC, hwm); + + /* Set the interval before transition */ + reg = E1000_READ_REG(hw, E1000_DMCTLX); + reg |= 0x800000FF; /* 255 usec */ + E1000_WRITE_REG(hw, E1000_DMCTLX, reg); + + /* free space in tx packet buffer to wake from DMA coal */ + E1000_WRITE_REG(hw, E1000_DMCTXTH, + (20480 - (2 * adapter->max_frame_size)) >> 6); + + /* make low power state decision controlled by DMA coal */ + reg = E1000_READ_REG(hw, E1000_PCIEMISC); + E1000_WRITE_REG(hw, E1000_PCIEMISC, + reg | E1000_PCIEMISC_LX_DECISION); + device_printf(dev, "DMA Coalescing enabled\n"); + } + + E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); + e1000_get_phy_info(hw); + e1000_check_for_link(hw); + return; +} + +/********************************************************************* + * + * Setup networking device structure and register an interface. + * + **********************************************************************/ +static int +igb_setup_interface(device_t dev, struct adapter *adapter) +{ + struct ifnet *ifp; + + INIT_DEBUGOUT("igb_setup_interface: begin"); + + ifp = adapter->ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(dev, "can not allocate ifnet structure\n"); + return (-1); + } + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_mtu = ETHERMTU; + ifp->if_init = igb_init; + ifp->if_softc = adapter; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_ioctl = igb_ioctl; + ifp->if_start = igb_start; +#if __FreeBSD_version >= 800000 + ifp->if_transmit = igb_mq_start; + ifp->if_qflush = igb_qflush; +#endif + IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); + ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; + IFQ_SET_READY(&ifp->if_snd); + + ether_ifattach(ifp, adapter->hw.mac.addr); + + ifp->if_capabilities = ifp->if_capenable = 0; + + ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; + ifp->if_capabilities |= IFCAP_TSO4; + ifp->if_capabilities |= IFCAP_JUMBO_MTU; + ifp->if_capenable = ifp->if_capabilities; + + /* Don't enable LRO by default */ + ifp->if_capabilities |= IFCAP_LRO; + +#ifdef DEVICE_POLLING + ifp->if_capabilities |= IFCAP_POLLING; +#endif + + /* + * Tell the upper layer(s) we + * support full VLAN capability. + */ + ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; + ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; + + /* + ** Dont turn this on by default, if vlans are + ** created on another pseudo device (eg. lagg) + ** then vlan events are not passed thru, breaking + ** operation, but with HW FILTER off it works. If + ** using vlans directly on the em driver you can + ** enable this and get full hardware tag filtering. + */ + ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; + + /* + * Specify the media types supported by this adapter and register + * callbacks to update media and link information + */ + ifmedia_init(&adapter->media, IFM_IMASK, + igb_media_change, igb_media_status); + if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || + (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { + ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, + 0, NULL); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); + } else { + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, + 0, NULL); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, + 0, NULL); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, + 0, NULL); + if (adapter->hw.phy.type != e1000_phy_ife) { + ifmedia_add(&adapter->media, + IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); + ifmedia_add(&adapter->media, + IFM_ETHER | IFM_1000_T, 0, NULL); + } + } + ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); + return (0); +} + + +/* + * Manage DMA'able memory. + */ +static void +igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + if (error) + return; + *(bus_addr_t *) arg = segs[0].ds_addr; +} + +static int +igb_dma_malloc(struct adapter *adapter, bus_size_t size, + struct igb_dma_alloc *dma, int mapflags) +{ + int error; + + error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ + IGB_DBA_ALIGN, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + size, /* maxsize */ + 1, /* nsegments */ + size, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockarg */ + &dma->dma_tag); + if (error) { + device_printf(adapter->dev, + "%s: bus_dma_tag_create failed: %d\n", + __func__, error); + goto fail_0; + } + + error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, + BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); + if (error) { + device_printf(adapter->dev, + "%s: bus_dmamem_alloc(%ju) failed: %d\n", + __func__, (uintmax_t)size, error); + goto fail_2; + } + + dma->dma_paddr = 0; + error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, + size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); + if (error || dma->dma_paddr == 0) { + device_printf(adapter->dev, + "%s: bus_dmamap_load failed: %d\n", + __func__, error); + goto fail_3; + } + + return (0); + +fail_3: + bus_dmamap_unload(dma->dma_tag, dma->dma_map); +fail_2: + bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); + bus_dma_tag_destroy(dma->dma_tag); +fail_0: + dma->dma_map = NULL; + dma->dma_tag = NULL; + + return (error); +} + +static void +igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma) +{ + if (dma->dma_tag == NULL) + return; + if (dma->dma_map != NULL) { + bus_dmamap_sync(dma->dma_tag, dma->dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(dma->dma_tag, dma->dma_map); + bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); + dma->dma_map = NULL; + } + bus_dma_tag_destroy(dma->dma_tag); + dma->dma_tag = NULL; +} + + +/********************************************************************* + * + * Allocate memory for the transmit and receive rings, and then + * the descriptors associated with each, called only once at attach. + * + **********************************************************************/ +static int +igb_allocate_queues(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct igb_queue *que = NULL; + struct tx_ring *txr = NULL; + struct rx_ring *rxr = NULL; + int rsize, tsize, error = E1000_SUCCESS; + int txconf = 0, rxconf = 0; + + /* First allocate the top level queue structs */ + if (!(adapter->queues = + (struct igb_queue *) malloc(sizeof(struct igb_queue) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate queue memory\n"); + error = ENOMEM; + goto fail; + } + + /* Next allocate the TX ring struct memory */ + if (!(adapter->tx_rings = + (struct tx_ring *) malloc(sizeof(struct tx_ring) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate TX ring memory\n"); + error = ENOMEM; + goto tx_fail; + } + + /* Now allocate the RX */ + if (!(adapter->rx_rings = + (struct rx_ring *) malloc(sizeof(struct rx_ring) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate RX ring memory\n"); + error = ENOMEM; + goto rx_fail; + } + + tsize = roundup2(adapter->num_tx_desc * + sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN); + /* + * Now set up the TX queues, txconf is needed to handle the + * possibility that things fail midcourse and we need to + * undo memory gracefully + */ + for (int i = 0; i < adapter->num_queues; i++, txconf++) { + /* Set up some basics */ + txr = &adapter->tx_rings[i]; + txr->adapter = adapter; + txr->me = i; + + /* Initialize the TX lock */ + snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", + device_get_nameunit(dev), txr->me); + mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); + + if (igb_dma_malloc(adapter, tsize, + &txr->txdma, BUS_DMA_NOWAIT)) { + device_printf(dev, + "Unable to allocate TX Descriptor memory\n"); + error = ENOMEM; + goto err_tx_desc; + } + txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr; + bzero((void *)txr->tx_base, tsize); + + /* Now allocate transmit buffers for the ring */ + if (igb_allocate_transmit_buffers(txr)) { + device_printf(dev, + "Critical Failure setting up transmit buffers\n"); + error = ENOMEM; + goto err_tx_desc; + } +#if __FreeBSD_version >= 800000 + /* Allocate a buf ring */ + txr->br = buf_ring_alloc(IGB_BR_SIZE, M_DEVBUF, + M_WAITOK, &txr->tx_mtx); +#endif + } + + /* + * Next the RX queues... + */ + rsize = roundup2(adapter->num_rx_desc * + sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); + for (int i = 0; i < adapter->num_queues; i++, rxconf++) { + rxr = &adapter->rx_rings[i]; + rxr->adapter = adapter; + rxr->me = i; + + /* Initialize the RX lock */ + snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", + device_get_nameunit(dev), txr->me); + mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); + + if (igb_dma_malloc(adapter, rsize, + &rxr->rxdma, BUS_DMA_NOWAIT)) { + device_printf(dev, + "Unable to allocate RxDescriptor memory\n"); + error = ENOMEM; + goto err_rx_desc; + } + rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr; + bzero((void *)rxr->rx_base, rsize); + + /* Allocate receive buffers for the ring*/ + if (igb_allocate_receive_buffers(rxr)) { + device_printf(dev, + "Critical Failure setting up receive buffers\n"); + error = ENOMEM; + goto err_rx_desc; + } + } + + /* + ** Finally set up the queue holding structs + */ + for (int i = 0; i < adapter->num_queues; i++) { + que = &adapter->queues[i]; + que->adapter = adapter; + que->txr = &adapter->tx_rings[i]; + que->rxr = &adapter->rx_rings[i]; + } + + return (0); + +err_rx_desc: + for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) + igb_dma_free(adapter, &rxr->rxdma); +err_tx_desc: + for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) + igb_dma_free(adapter, &txr->txdma); + free(adapter->rx_rings, M_DEVBUF); +rx_fail: +#if __FreeBSD_version >= 800000 + buf_ring_free(txr->br, M_DEVBUF); +#endif + free(adapter->tx_rings, M_DEVBUF); +tx_fail: + free(adapter->queues, M_DEVBUF); +fail: + return (error); +} + +/********************************************************************* + * + * Allocate memory for tx_buffer structures. The tx_buffer stores all + * the information needed to transmit a packet on the wire. This is + * called only once at attach, setup is done every reset. + * + **********************************************************************/ +static int +igb_allocate_transmit_buffers(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + device_t dev = adapter->dev; + struct igb_tx_buffer *txbuf; + int error, i; + + /* + * Setup DMA descriptor areas. + */ + if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + IGB_TSO_SIZE, /* maxsize */ + IGB_MAX_SCATTER, /* nsegments */ + PAGE_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &txr->txtag))) { + device_printf(dev,"Unable to allocate TX DMA tag\n"); + goto fail; + } + + if (!(txr->tx_buffers = + (struct igb_tx_buffer *) malloc(sizeof(struct igb_tx_buffer) * + adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate tx_buffer memory\n"); + error = ENOMEM; + goto fail; + } + + /* Create the descriptor buffer dma maps */ + txbuf = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { + error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); + if (error != 0) { + device_printf(dev, "Unable to create TX DMA map\n"); + goto fail; + } + } + + return 0; +fail: + /* We free all, it handles case where we are in the middle */ + igb_free_transmit_structures(adapter); + return (error); +} + +/********************************************************************* + * + * Initialize a transmit ring. + * + **********************************************************************/ +static void +igb_setup_transmit_ring(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct igb_tx_buffer *txbuf; + int i; + + /* Clear the old descriptor contents */ + IGB_TX_LOCK(txr); + bzero((void *)txr->tx_base, + (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc); + /* Reset indices */ + txr->next_avail_desc = 0; + txr->next_to_clean = 0; + + /* Free any existing tx buffers. */ + txbuf = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { + if (txbuf->m_head != NULL) { + bus_dmamap_sync(txr->txtag, txbuf->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, txbuf->map); + m_freem(txbuf->m_head); + txbuf->m_head = NULL; + } + /* clear the watch index */ + txbuf->next_eop = -1; + } + + /* Set number of descriptors available */ + txr->tx_avail = adapter->num_tx_desc; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + IGB_TX_UNLOCK(txr); +} + +/********************************************************************* + * + * Initialize all transmit rings. + * + **********************************************************************/ +static void +igb_setup_transmit_structures(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + + for (int i = 0; i < adapter->num_queues; i++, txr++) + igb_setup_transmit_ring(txr); + + return; +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +static void +igb_initialize_transmit_units(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, txdctl; + + INIT_DEBUGOUT("igb_initialize_transmit_units: begin"); + tctl = txdctl = 0; + + /* Setup the Tx Descriptor Rings */ + for (int i = 0; i < adapter->num_queues; i++, txr++) { + u64 bus_addr = txr->txdma.dma_paddr; + + E1000_WRITE_REG(hw, E1000_TDLEN(i), + adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); + E1000_WRITE_REG(hw, E1000_TDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_TDBAL(i), + (uint32_t)bus_addr); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + E1000_WRITE_REG(hw, E1000_TDT(i), 0); + E1000_WRITE_REG(hw, E1000_TDH(i), 0); + + HW_DEBUGOUT2("Base = %x, Length = %x\n", + E1000_READ_REG(hw, E1000_TDBAL(i)), + E1000_READ_REG(hw, E1000_TDLEN(i))); + + txr->queue_status = IGB_QUEUE_IDLE; + + txdctl |= IGB_TX_PTHRESH; + txdctl |= IGB_TX_HTHRESH << 8; + txdctl |= IGB_TX_WTHRESH << 16; + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); + } + + if (adapter->vf_ifp) + return; + + e1000_config_collision_dist(hw); + + /* Program the Transmit Control Register */ + tctl = E1000_READ_REG(hw, E1000_TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); + + /* This write will effectively turn on the transmit unit. */ + E1000_WRITE_REG(hw, E1000_TCTL, tctl); +} + +/********************************************************************* + * + * Free all transmit rings. + * + **********************************************************************/ +static void +igb_free_transmit_structures(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IGB_TX_LOCK(txr); + igb_free_transmit_buffers(txr); + igb_dma_free(adapter, &txr->txdma); + IGB_TX_UNLOCK(txr); + IGB_TX_LOCK_DESTROY(txr); + } + free(adapter->tx_rings, M_DEVBUF); +} + +/********************************************************************* + * + * Free transmit ring related data structures. + * + **********************************************************************/ +static void +igb_free_transmit_buffers(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct igb_tx_buffer *tx_buffer; + int i; + + INIT_DEBUGOUT("free_transmit_ring: begin"); + + if (txr->tx_buffers == NULL) + return; + + tx_buffer = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { + if (tx_buffer->m_head != NULL) { + bus_dmamap_sync(txr->txtag, tx_buffer->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + m_freem(tx_buffer->m_head); + tx_buffer->m_head = NULL; + if (tx_buffer->map != NULL) { + bus_dmamap_destroy(txr->txtag, + tx_buffer->map); + tx_buffer->map = NULL; + } + } else if (tx_buffer->map != NULL) { + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + bus_dmamap_destroy(txr->txtag, + tx_buffer->map); + tx_buffer->map = NULL; + } + } +#if __FreeBSD_version >= 800000 + if (txr->br != NULL) + buf_ring_free(txr->br, M_DEVBUF); +#endif + if (txr->tx_buffers != NULL) { + free(txr->tx_buffers, M_DEVBUF); + txr->tx_buffers = NULL; + } + if (txr->txtag != NULL) { + bus_dma_tag_destroy(txr->txtag); + txr->txtag = NULL; + } + return; +} + +/********************************************************************** + * + * Setup work for hardware segmentation offload (TSO) + * + **********************************************************************/ +static boolean_t +igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen) +{ + struct adapter *adapter = txr->adapter; + struct e1000_adv_tx_context_desc *TXD; + struct igb_tx_buffer *tx_buffer; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + u32 mss_l4len_idx = 0; + u16 vtag = 0; + int ctxd, ehdrlen, ip_hlen, tcp_hlen; + struct ether_vlan_header *eh; + struct ip *ip; + struct tcphdr *th; + + + /* + * Determine where frame payload starts. + * Jump over vlan headers if already present + */ + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + else + ehdrlen = ETHER_HDR_LEN; + + /* Ensure we have at least the IP+TCP header in the first mbuf. */ + if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr)) + return FALSE; + + /* Only supports IPV4 for now */ + ctxd = txr->next_avail_desc; + tx_buffer = &txr->tx_buffers[ctxd]; + TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; + + ip = (struct ip *)(mp->m_data + ehdrlen); + if (ip->ip_p != IPPROTO_TCP) + return FALSE; /* 0 */ + ip->ip_sum = 0; + ip_hlen = ip->ip_hl << 2; + th = (struct tcphdr *)((caddr_t)ip + ip_hlen); + th->th_sum = in_pseudo(ip->ip_src.s_addr, + ip->ip_dst.s_addr, htons(IPPROTO_TCP)); + tcp_hlen = th->th_off << 2; + /* + * Calculate header length, this is used + * in the transmit desc in igb_xmit + */ + *hdrlen = ehdrlen + ip_hlen + tcp_hlen; + + /* VLAN MACLEN IPLEN */ + if (mp->m_flags & M_VLANTAG) { + vtag = htole16(mp->m_pkthdr.ether_vtag); + vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); + } + + vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT); + vlan_macip_lens |= ip_hlen; + TXD->vlan_macip_lens |= htole32(vlan_macip_lens); + + /* ADV DTYPE TUCMD */ + type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; + TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); + + /* MSS L4LEN IDX */ + mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); + /* 82575 needs the queue index added */ + if (adapter->hw.mac.type == e1000_82575) + mss_l4len_idx |= txr->me << 4; + TXD->mss_l4len_idx = htole32(mss_l4len_idx); + + TXD->seqnum_seed = htole32(0); + tx_buffer->m_head = NULL; + tx_buffer->next_eop = -1; + + if (++ctxd == adapter->num_tx_desc) + ctxd = 0; + + txr->tx_avail--; + txr->next_avail_desc = ctxd; + return TRUE; +} + + +/********************************************************************* + * + * Context Descriptor setup for VLAN or CSUM + * + **********************************************************************/ + +static bool +igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) +{ + struct adapter *adapter = txr->adapter; + struct e1000_adv_tx_context_desc *TXD; + struct igb_tx_buffer *tx_buffer; + u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; + struct ether_vlan_header *eh; + struct ip *ip = NULL; + struct ip6_hdr *ip6; + int ehdrlen, ctxd, ip_hlen = 0; + u16 etype, vtag = 0; + u8 ipproto = 0; + bool offload = TRUE; + + if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) + offload = FALSE; + + vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; + ctxd = txr->next_avail_desc; + tx_buffer = &txr->tx_buffers[ctxd]; + TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; + + /* + ** In advanced descriptors the vlan tag must + ** be placed into the context descriptor, thus + ** we need to be here just for that setup. + */ + if (mp->m_flags & M_VLANTAG) { + vtag = htole16(mp->m_pkthdr.ether_vtag); + vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); + } else if (offload == FALSE) + return FALSE; + + /* + * Determine where frame payload starts. + * Jump over vlan headers if already present, + * helpful for QinQ too. + */ + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + etype = ntohs(eh->evl_proto); + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + } else { + etype = ntohs(eh->evl_encap_proto); + ehdrlen = ETHER_HDR_LEN; + } + + /* Set the ether header length */ + vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; + + switch (etype) { + case ETHERTYPE_IP: + ip = (struct ip *)(mp->m_data + ehdrlen); + ip_hlen = ip->ip_hl << 2; + if (mp->m_len < ehdrlen + ip_hlen) { + offload = FALSE; + break; + } + ipproto = ip->ip_p; + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; + break; + case ETHERTYPE_IPV6: + ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); + ip_hlen = sizeof(struct ip6_hdr); + ipproto = ip6->ip6_nxt; + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; + break; + default: + offload = FALSE; + break; + } + + vlan_macip_lens |= ip_hlen; + type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + switch (ipproto) { + case IPPROTO_TCP: + if (mp->m_pkthdr.csum_flags & CSUM_TCP) + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + case IPPROTO_UDP: + if (mp->m_pkthdr.csum_flags & CSUM_UDP) + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; + break; +#if __FreeBSD_version >= 800000 + case IPPROTO_SCTP: + if (mp->m_pkthdr.csum_flags & CSUM_SCTP) + type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; + break; +#endif + default: + offload = FALSE; + break; + } + + /* 82575 needs the queue index added */ + if (adapter->hw.mac.type == e1000_82575) + mss_l4len_idx = txr->me << 4; + + /* Now copy bits into descriptor */ + TXD->vlan_macip_lens |= htole32(vlan_macip_lens); + TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); + TXD->seqnum_seed = htole32(0); + TXD->mss_l4len_idx = htole32(mss_l4len_idx); + + tx_buffer->m_head = NULL; + tx_buffer->next_eop = -1; + + /* We've consumed the first desc, adjust counters */ + if (++ctxd == adapter->num_tx_desc) + ctxd = 0; + txr->next_avail_desc = ctxd; + --txr->tx_avail; + + return (offload); +} + + +/********************************************************************** + * + * Examine each tx_buffer in the used queue. If the hardware is done + * processing the packet then free associated resources. The + * tx_buffer is put back on the free queue. + * + * TRUE return means there's work in the ring to clean, FALSE its empty. + **********************************************************************/ +static bool +igb_txeof(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + int first, last, done, processed; + struct igb_tx_buffer *tx_buffer; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct ifnet *ifp = adapter->ifp; + + IGB_TX_LOCK_ASSERT(txr); + + if (txr->tx_avail == adapter->num_tx_desc) { + txr->queue_status = IGB_QUEUE_IDLE; + return FALSE; + } + + processed = 0; + first = txr->next_to_clean; + tx_desc = &txr->tx_base[first]; + tx_buffer = &txr->tx_buffers[first]; + last = tx_buffer->next_eop; + eop_desc = &txr->tx_base[last]; + + /* + * What this does is get the index of the + * first descriptor AFTER the EOP of the + * first packet, that way we can do the + * simple comparison on the inner while loop. + */ + if (++last == adapter->num_tx_desc) + last = 0; + done = last; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { + /* We clean the range of the packet */ + while (first != done) { + tx_desc->upper.data = 0; + tx_desc->lower.data = 0; + tx_desc->buffer_addr = 0; + ++txr->tx_avail; + ++processed; + + if (tx_buffer->m_head) { + txr->bytes += + tx_buffer->m_head->m_pkthdr.len; + bus_dmamap_sync(txr->txtag, + tx_buffer->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + + m_freem(tx_buffer->m_head); + tx_buffer->m_head = NULL; + } + tx_buffer->next_eop = -1; + txr->watchdog_time = ticks; + + if (++first == adapter->num_tx_desc) + first = 0; + + tx_buffer = &txr->tx_buffers[first]; + tx_desc = &txr->tx_base[first]; + } + ++txr->packets; + ++ifp->if_opackets; + /* See if we can continue to the next packet */ + last = tx_buffer->next_eop; + if (last != -1) { + eop_desc = &txr->tx_base[last]; + /* Get new done point */ + if (++last == adapter->num_tx_desc) last = 0; + done = last; + } else + break; + } + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + txr->next_to_clean = first; + + /* + ** Watchdog calculation, we know there's + ** work outstanding or the first return + ** would have been taken, so none processed + ** for too long indicates a hang. + */ + if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG)) + txr->queue_status = IGB_QUEUE_HUNG; + + /* + * If we have a minimum free, clear IFF_DRV_OACTIVE + * to tell the stack that it is OK to send packets. + */ + if (txr->tx_avail > IGB_TX_CLEANUP_THRESHOLD) { + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + /* All clean, turn off the watchdog */ + if (txr->tx_avail == adapter->num_tx_desc) { + txr->queue_status = IGB_QUEUE_IDLE; + return (FALSE); + } + } + return (TRUE); +} + +/********************************************************************* + * + * Refresh mbuf buffers for RX descriptor rings + * - now keeps its own state so discards due to resource + * exhaustion are unnecessary, if an mbuf cannot be obtained + * it just returns, keeping its placeholder, thus it can simply + * be recalled to try again. + * + **********************************************************************/ +static void +igb_refresh_mbufs(struct rx_ring *rxr, int limit) +{ + struct adapter *adapter = rxr->adapter; + bus_dma_segment_t hseg[1]; + bus_dma_segment_t pseg[1]; + struct igb_rx_buf *rxbuf; + struct mbuf *mh, *mp; + int i, j, nsegs, error; + bool refreshed = FALSE; + + i = j = rxr->next_to_refresh; + /* + ** Get one descriptor beyond + ** our work mark to control + ** the loop. + */ + if (++j == adapter->num_rx_desc) + j = 0; + + while (j != limit) { + rxbuf = &rxr->rx_buffers[i]; + /* No hdr mbuf used with header split off */ + if (rxr->hdr_split == FALSE) + goto no_split; + if (rxbuf->m_head == NULL) { + mh = m_gethdr(M_DONTWAIT, MT_DATA); + if (mh == NULL) + goto update; + } else + mh = rxbuf->m_head; + + mh->m_pkthdr.len = mh->m_len = MHLEN; + mh->m_len = MHLEN; + mh->m_flags |= M_PKTHDR; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->htag, + rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); + if (error != 0) { + printf("Refresh mbufs: hdr dmamap load" + " failure - %d\n", error); + m_free(mh); + rxbuf->m_head = NULL; + goto update; + } + rxbuf->m_head = mh; + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_PREREAD); + rxr->rx_base[i].read.hdr_addr = + htole64(hseg[0].ds_addr); +no_split: + if (rxbuf->m_pack == NULL) { + mp = m_getjcl(M_DONTWAIT, MT_DATA, + M_PKTHDR, adapter->rx_mbuf_sz); + if (mp == NULL) + goto update; + } else + mp = rxbuf->m_pack; + + mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->ptag, + rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); + if (error != 0) { + printf("Refresh mbufs: payload dmamap load" + " failure - %d\n", error); + m_free(mp); + rxbuf->m_pack = NULL; + goto update; + } + rxbuf->m_pack = mp; + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_PREREAD); + rxr->rx_base[i].read.pkt_addr = + htole64(pseg[0].ds_addr); + refreshed = TRUE; /* I feel wefreshed :) */ + + i = j; /* our next is precalculated */ + rxr->next_to_refresh = i; + if (++j == adapter->num_rx_desc) + j = 0; + } +update: + if (refreshed) /* update tail */ + E1000_WRITE_REG(&adapter->hw, + E1000_RDT(rxr->me), rxr->next_to_refresh); + return; +} + + +/********************************************************************* + * + * Allocate memory for rx_buffer structures. Since we use one + * rx_buffer per received packet, the maximum number of rx_buffer's + * that we'll need is equal to the number of receive descriptors + * that we've allocated. + * + **********************************************************************/ +static int +igb_allocate_receive_buffers(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + device_t dev = adapter->dev; + struct igb_rx_buf *rxbuf; + int i, bsize, error; + + bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc; + if (!(rxr->rx_buffers = + (struct igb_rx_buf *) malloc(bsize, + M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate rx_buffer memory\n"); + error = ENOMEM; + goto fail; + } + + if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MSIZE, /* maxsize */ + 1, /* nsegments */ + MSIZE, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &rxr->htag))) { + device_printf(dev, "Unable to create RX DMA tag\n"); + goto fail; + } + + if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MJUM9BYTES, /* maxsize */ + 1, /* nsegments */ + MJUM9BYTES, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &rxr->ptag))) { + device_printf(dev, "Unable to create RX payload DMA tag\n"); + goto fail; + } + + for (i = 0; i < adapter->num_rx_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + error = bus_dmamap_create(rxr->htag, + BUS_DMA_NOWAIT, &rxbuf->hmap); + if (error) { + device_printf(dev, + "Unable to create RX head DMA maps\n"); + goto fail; + } + error = bus_dmamap_create(rxr->ptag, + BUS_DMA_NOWAIT, &rxbuf->pmap); + if (error) { + device_printf(dev, + "Unable to create RX packet DMA maps\n"); + goto fail; + } + } + + return (0); + +fail: + /* Frees all, but can handle partial completion */ + igb_free_receive_structures(adapter); + return (error); +} + + +static void +igb_free_receive_ring(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + struct igb_rx_buf *rxbuf; + + + for (int i = 0; i < adapter->num_rx_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->m_head != NULL) { + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->htag, rxbuf->hmap); + rxbuf->m_head->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_head); + } + if (rxbuf->m_pack != NULL) { + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + rxbuf->m_pack->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_pack); + } + rxbuf->m_head = NULL; + rxbuf->m_pack = NULL; + } +} + + +/********************************************************************* + * + * Initialize a receive ring and its buffers. + * + **********************************************************************/ +static int +igb_setup_receive_ring(struct rx_ring *rxr) +{ + struct adapter *adapter; + struct ifnet *ifp; + device_t dev; + struct igb_rx_buf *rxbuf; + bus_dma_segment_t pseg[1], hseg[1]; + struct lro_ctrl *lro = &rxr->lro; + int rsize, nsegs, error = 0; + + adapter = rxr->adapter; + dev = adapter->dev; + ifp = adapter->ifp; + + /* Clear the ring contents */ + IGB_RX_LOCK(rxr); + rsize = roundup2(adapter->num_rx_desc * + sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); + bzero((void *)rxr->rx_base, rsize); + + /* + ** Free current RX buffer structures and their mbufs + */ + igb_free_receive_ring(rxr); + + /* Configure for header split? */ + if (igb_header_split) + rxr->hdr_split = TRUE; + + /* Now replenish the ring mbufs */ + for (int j = 0; j < adapter->num_rx_desc; ++j) { + struct mbuf *mh, *mp; + + rxbuf = &rxr->rx_buffers[j]; + if (rxr->hdr_split == FALSE) + goto skip_head; + + /* First the header */ + rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA); + if (rxbuf->m_head == NULL) { + error = ENOBUFS; + goto fail; + } + m_adj(rxbuf->m_head, ETHER_ALIGN); + mh = rxbuf->m_head; + mh->m_len = mh->m_pkthdr.len = MHLEN; + mh->m_flags |= M_PKTHDR; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->htag, + rxbuf->hmap, rxbuf->m_head, hseg, + &nsegs, BUS_DMA_NOWAIT); + if (error != 0) /* Nothing elegant to do here */ + goto fail; + bus_dmamap_sync(rxr->htag, + rxbuf->hmap, BUS_DMASYNC_PREREAD); + /* Update descriptor */ + rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); + +skip_head: + /* Now the payload cluster */ + rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA, + M_PKTHDR, adapter->rx_mbuf_sz); + if (rxbuf->m_pack == NULL) { + error = ENOBUFS; + goto fail; + } + mp = rxbuf->m_pack; + mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->ptag, + rxbuf->pmap, mp, pseg, + &nsegs, BUS_DMA_NOWAIT); + if (error != 0) + goto fail; + bus_dmamap_sync(rxr->ptag, + rxbuf->pmap, BUS_DMASYNC_PREREAD); + /* Update descriptor */ + rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); + } + + /* Setup our descriptor indices */ + rxr->next_to_check = 0; + rxr->next_to_refresh = adapter->num_rx_desc - 1; + rxr->lro_enabled = FALSE; + rxr->rx_split_packets = 0; + rxr->rx_bytes = 0; + + rxr->fmp = NULL; + rxr->lmp = NULL; + rxr->discard = FALSE; + + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* + ** Now set up the LRO interface, we + ** also only do head split when LRO + ** is enabled, since so often they + ** are undesireable in similar setups. + */ + if (ifp->if_capenable & IFCAP_LRO) { + error = tcp_lro_init(lro); + if (error) { + device_printf(dev, "LRO Initialization failed!\n"); + goto fail; + } + INIT_DEBUGOUT("RX LRO Initialized\n"); + rxr->lro_enabled = TRUE; + lro->ifp = adapter->ifp; + } + + IGB_RX_UNLOCK(rxr); + return (0); + +fail: + igb_free_receive_ring(rxr); + IGB_RX_UNLOCK(rxr); + return (error); +} + + +/********************************************************************* + * + * Initialize all receive rings. + * + **********************************************************************/ +static int +igb_setup_receive_structures(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + int i; + + for (i = 0; i < adapter->num_queues; i++, rxr++) + if (igb_setup_receive_ring(rxr)) + goto fail; + + return (0); +fail: + /* + * Free RX buffers allocated so far, we will only handle + * the rings that completed, the failing case will have + * cleaned up for itself. 'i' is the endpoint. + */ + for (int j = 0; j > i; ++j) { + rxr = &adapter->rx_rings[i]; + IGB_RX_LOCK(rxr); + igb_free_receive_ring(rxr); + IGB_RX_UNLOCK(rxr); + } + + return (ENOBUFS); +} + +/********************************************************************* + * + * Enable receive unit. + * + **********************************************************************/ +static void +igb_initialize_receive_units(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + struct ifnet *ifp = adapter->ifp; + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rxcsum, psize, srrctl = 0; + + INIT_DEBUGOUT("igb_initialize_receive_unit: begin"); + + /* + * Make sure receives are disabled while setting + * up the descriptor ring + */ + rctl = E1000_READ_REG(hw, E1000_RCTL); + E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); + + /* + ** Set up for header split + */ + if (igb_header_split) { + /* Use a standard mbuf for the header */ + srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* + ** Set up for jumbo frames + */ + if (ifp->if_mtu > ETHERMTU) { + rctl |= E1000_RCTL_LPE; + if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { + srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; + } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { + srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; + } + /* Set maximum packet len */ + psize = adapter->max_frame_size; + /* are we on a vlan? */ + if (adapter->ifp->if_vlantrunk != NULL) + psize += VLAN_TAG_SIZE; + E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); + } else { + rctl &= ~E1000_RCTL_LPE; + srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; + rctl |= E1000_RCTL_SZ_2048; + } + + /* Setup the Base and Length of the Rx Descriptor Rings */ + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + u64 bus_addr = rxr->rxdma.dma_paddr; + u32 rxdctl; + + E1000_WRITE_REG(hw, E1000_RDLEN(i), + adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); + E1000_WRITE_REG(hw, E1000_RDBAH(i), + (uint32_t)(bus_addr >> 32)); + E1000_WRITE_REG(hw, E1000_RDBAL(i), + (uint32_t)bus_addr); + E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); + /* Enable this Queue */ + rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= IGB_RX_PTHRESH; + rxdctl |= IGB_RX_HTHRESH << 8; + rxdctl |= IGB_RX_WTHRESH << 16; + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); + } + + /* + ** Setup for RX MultiQueue + */ + rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); + if (adapter->num_queues >1) { + u32 random[10], mrqc, shift = 0; + union igb_reta { + u32 dword; + u8 bytes[4]; + } reta; + + arc4rand(&random, sizeof(random), 0); + if (adapter->hw.mac.type == e1000_82575) + shift = 6; + /* Warning FM follows */ + for (int i = 0; i < 128; i++) { + reta.bytes[i & 3] = + (i % adapter->num_queues) << shift; + if ((i & 3) == 3) + E1000_WRITE_REG(hw, + E1000_RETA(i >> 2), reta.dword); + } + /* Now fill in hash table */ + mrqc = E1000_MRQC_ENABLE_RSS_4Q; + for (int i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, + E1000_RSSRK(0), i, random[i]); + + mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP); + mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP); + mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP | + E1000_MRQC_RSS_FIELD_IPV6_UDP); + mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); + + E1000_WRITE_REG(hw, E1000_MRQC, mrqc); + + /* + ** NOTE: Receive Full-Packet Checksum Offload + ** is mutually exclusive with Multiqueue. However + ** this is not the same as TCP/IP checksums which + ** still work. + */ + rxcsum |= E1000_RXCSUM_PCSD; +#if __FreeBSD_version >= 800000 + /* For SCTP Offload */ + if ((hw->mac.type == e1000_82576) + && (ifp->if_capenable & IFCAP_RXCSUM)) + rxcsum |= E1000_RXCSUM_CRCOFL; +#endif + } else { + /* Non RSS setup */ + if (ifp->if_capenable & IFCAP_RXCSUM) { + rxcsum |= E1000_RXCSUM_IPPCSE; +#if __FreeBSD_version >= 800000 + if (adapter->hw.mac.type == e1000_82576) + rxcsum |= E1000_RXCSUM_CRCOFL; +#endif + } else + rxcsum &= ~E1000_RXCSUM_TUOFL; + } + E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); + + /* Setup the Receive Control Register */ + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + /* Strip CRC bytes. */ + rctl |= E1000_RCTL_SECRC; + /* Make sure VLAN Filters are off */ + rctl &= ~E1000_RCTL_VFE; + /* Don't store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Receives */ + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers + * - needs to be after enable + */ + for (int i = 0; i < adapter->num_queues; i++) { + rxr = &adapter->rx_rings[i]; + E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check); + E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh); + } + return; +} + +/********************************************************************* + * + * Free receive rings. + * + **********************************************************************/ +static void +igb_free_receive_structures(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + struct lro_ctrl *lro = &rxr->lro; + igb_free_receive_buffers(rxr); + tcp_lro_free(lro); + igb_dma_free(adapter, &rxr->rxdma); + } + + free(adapter->rx_rings, M_DEVBUF); +} + +/********************************************************************* + * + * Free receive ring data structures. + * + **********************************************************************/ +static void +igb_free_receive_buffers(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + struct igb_rx_buf *rxbuf; + int i; + + INIT_DEBUGOUT("free_receive_structures: begin"); + + /* Cleanup any existing buffers */ + if (rxr->rx_buffers != NULL) { + for (i = 0; i < adapter->num_rx_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->m_head != NULL) { + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->htag, rxbuf->hmap); + rxbuf->m_head->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_head); + } + if (rxbuf->m_pack != NULL) { + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + rxbuf->m_pack->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_pack); + } + rxbuf->m_head = NULL; + rxbuf->m_pack = NULL; + if (rxbuf->hmap != NULL) { + bus_dmamap_destroy(rxr->htag, rxbuf->hmap); + rxbuf->hmap = NULL; + } + if (rxbuf->pmap != NULL) { + bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); + rxbuf->pmap = NULL; + } + } + if (rxr->rx_buffers != NULL) { + free(rxr->rx_buffers, M_DEVBUF); + rxr->rx_buffers = NULL; + } + } + + if (rxr->htag != NULL) { + bus_dma_tag_destroy(rxr->htag); + rxr->htag = NULL; + } + if (rxr->ptag != NULL) { + bus_dma_tag_destroy(rxr->ptag); + rxr->ptag = NULL; + } +} + +static __inline void +igb_rx_discard(struct rx_ring *rxr, int i) +{ + struct igb_rx_buf *rbuf; + + rbuf = &rxr->rx_buffers[i]; + + /* Partially received? Free the chain */ + if (rxr->fmp != NULL) { + rxr->fmp->m_flags |= M_PKTHDR; + m_freem(rxr->fmp); + rxr->fmp = NULL; + rxr->lmp = NULL; + } + + /* + ** With advanced descriptors the writeback + ** clobbers the buffer addrs, so its easier + ** to just free the existing mbufs and take + ** the normal refresh path to get new buffers + ** and mapping. + */ + if (rbuf->m_head) { + m_free(rbuf->m_head); + rbuf->m_head = NULL; + } + + if (rbuf->m_pack) { + m_free(rbuf->m_pack); + rbuf->m_pack = NULL; + } + + return; +} + +static __inline void +igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) +{ + + /* + * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet + * should be computed by hardware. Also it should not have VLAN tag in + * ethernet header. + */ + if (rxr->lro_enabled && + (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && + (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && + (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) == + (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) && + (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { + /* + * Send to the stack if: + ** - LRO not enabled, or + ** - no LRO resources, or + ** - lro enqueue fails + */ + if (rxr->lro.lro_cnt != 0) + if (tcp_lro_rx(&rxr->lro, m, 0) == 0) + return; + } + IGB_RX_UNLOCK(rxr); + (*ifp->if_input)(ifp, m); + IGB_RX_LOCK(rxr); +} + +/********************************************************************* + * + * This routine executes in interrupt context. It replenishes + * the mbufs in the descriptor and sends data which has been + * dma'ed into host memory to upper layer. + * + * We loop at most count times if count is > 0, or until done if + * count < 0. + * + * Return TRUE if more to clean, FALSE otherwise + *********************************************************************/ +static bool +igb_rxeof(struct igb_queue *que, int count, int *done) +{ + struct adapter *adapter = que->adapter; + struct rx_ring *rxr = que->rxr; + struct ifnet *ifp = adapter->ifp; + struct lro_ctrl *lro = &rxr->lro; + struct lro_entry *queued; + int i, processed = 0, rxdone = 0; + u32 ptype, staterr = 0; + union e1000_adv_rx_desc *cur; + + IGB_RX_LOCK(rxr); + /* Sync the ring. */ + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + /* Main clean loop */ + for (i = rxr->next_to_check; count != 0;) { + struct mbuf *sendmp, *mh, *mp; + struct igb_rx_buf *rxbuf; + u16 hlen, plen, hdr, vtag; + bool eop = FALSE; + + cur = &rxr->rx_base[i]; + staterr = le32toh(cur->wb.upper.status_error); + if ((staterr & E1000_RXD_STAT_DD) == 0) + break; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; + count--; + sendmp = mh = mp = NULL; + cur->wb.upper.status_error = 0; + rxbuf = &rxr->rx_buffers[i]; + plen = le16toh(cur->wb.upper.length); + ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; + if ((adapter->hw.mac.type == e1000_i350) && + (staterr & E1000_RXDEXT_STATERR_LB)) + vtag = be16toh(cur->wb.upper.vlan); + else + vtag = le16toh(cur->wb.upper.vlan); + hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); + eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); + + /* Make sure all segments of a bad packet are discarded */ + if (((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0) || + (rxr->discard)) { + ifp->if_ierrors++; + ++rxr->rx_discarded; + if (!eop) /* Catch subsequent segs */ + rxr->discard = TRUE; + else + rxr->discard = FALSE; + igb_rx_discard(rxr, i); + goto next_desc; + } + + /* + ** The way the hardware is configured to + ** split, it will ONLY use the header buffer + ** when header split is enabled, otherwise we + ** get normal behavior, ie, both header and + ** payload are DMA'd into the payload buffer. + ** + ** The fmp test is to catch the case where a + ** packet spans multiple descriptors, in that + ** case only the first header is valid. + */ + if (rxr->hdr_split && rxr->fmp == NULL) { + hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >> + E1000_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > IGB_HDR_BUF) + hlen = IGB_HDR_BUF; + mh = rxr->rx_buffers[i].m_head; + mh->m_len = hlen; + /* clear buf pointer for refresh */ + rxbuf->m_head = NULL; + /* + ** Get the payload length, this + ** could be zero if its a small + ** packet. + */ + if (plen > 0) { + mp = rxr->rx_buffers[i].m_pack; + mp->m_len = plen; + mh->m_next = mp; + /* clear buf pointer */ + rxbuf->m_pack = NULL; + rxr->rx_split_packets++; + } + } else { + /* + ** Either no header split, or a + ** secondary piece of a fragmented + ** split packet. + */ + mh = rxr->rx_buffers[i].m_pack; + mh->m_len = plen; + /* clear buf info for refresh */ + rxbuf->m_pack = NULL; + } + + ++processed; /* So we know when to refresh */ + + /* Initial frame - setup */ + if (rxr->fmp == NULL) { + mh->m_pkthdr.len = mh->m_len; + /* Save the head of the chain */ + rxr->fmp = mh; + rxr->lmp = mh; + if (mp != NULL) { + /* Add payload if split */ + mh->m_pkthdr.len += mp->m_len; + rxr->lmp = mh->m_next; + } + } else { + /* Chain mbuf's together */ + rxr->lmp->m_next = mh; + rxr->lmp = rxr->lmp->m_next; + rxr->fmp->m_pkthdr.len += mh->m_len; + } + + if (eop) { + rxr->fmp->m_pkthdr.rcvif = ifp; + ifp->if_ipackets++; + rxr->rx_packets++; + /* capture data for AIM */ + rxr->packets++; + rxr->bytes += rxr->fmp->m_pkthdr.len; + rxr->rx_bytes += rxr->fmp->m_pkthdr.len; + + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) + igb_rx_checksum(staterr, rxr->fmp, ptype); + + if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && + (staterr & E1000_RXD_STAT_VP) != 0) { + rxr->fmp->m_pkthdr.ether_vtag = vtag; + rxr->fmp->m_flags |= M_VLANTAG; + } +#if __FreeBSD_version >= 800000 + rxr->fmp->m_pkthdr.flowid = que->msix; + rxr->fmp->m_flags |= M_FLOWID; +#endif + sendmp = rxr->fmp; + /* Make sure to set M_PKTHDR. */ + sendmp->m_flags |= M_PKTHDR; + rxr->fmp = NULL; + rxr->lmp = NULL; + } + +next_desc: + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* Advance our pointers to the next descriptor. */ + if (++i == adapter->num_rx_desc) + i = 0; + /* + ** Send to the stack or LRO + */ + if (sendmp != NULL) { + rxr->next_to_check = i; + igb_rx_input(rxr, ifp, sendmp, ptype); + i = rxr->next_to_check; + rxdone++; + } + + /* Every 8 descriptors we go to refresh mbufs */ + if (processed == 8) { + igb_refresh_mbufs(rxr, i); + processed = 0; + } + } + + /* Catch any remainders */ + if (igb_rx_unrefreshed(rxr)) + igb_refresh_mbufs(rxr, i); + + rxr->next_to_check = i; + + /* + * Flush any outstanding LRO work + */ + while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { + SLIST_REMOVE_HEAD(&lro->lro_active, next); + tcp_lro_flush(lro, queued); + } + + if (done != NULL) + *done = rxdone; + + IGB_RX_UNLOCK(rxr); + return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE); +} + +/********************************************************************* + * + * Verify that the hardware indicated that the checksum is valid. + * Inform the stack about the status of checksum so that stack + * doesn't spend time verifying the checksum. + * + *********************************************************************/ +static void +igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype) +{ + u16 status = (u16)staterr; + u8 errors = (u8) (staterr >> 24); + int sctp; + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) { + mp->m_pkthdr.csum_flags = 0; + return; + } + + if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && + (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) + sctp = 1; + else + sctp = 0; + if (status & E1000_RXD_STAT_IPCS) { + /* Did it pass? */ + if (!(errors & E1000_RXD_ERR_IPE)) { + /* IP Checksum Good */ + mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; + mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; + } else + mp->m_pkthdr.csum_flags = 0; + } + + if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { + u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); +#if __FreeBSD_version >= 800000 + if (sctp) /* reassign */ + type = CSUM_SCTP_VALID; +#endif + /* Did it pass? */ + if (!(errors & E1000_RXD_ERR_TCPE)) { + mp->m_pkthdr.csum_flags |= type; + if (sctp == 0) + mp->m_pkthdr.csum_data = htons(0xffff); + } + } + return; +} + +/* + * This routine is run via an vlan + * config EVENT + */ +static void +igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct adapter *adapter = ifp->if_softc; + u32 index, bit; + + if (ifp->if_softc != arg) /* Not our event */ + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + IGB_CORE_LOCK(adapter); + index = (vtag >> 5) & 0x7F; + bit = vtag & 0x1F; + adapter->shadow_vfta[index] |= (1 << bit); + ++adapter->num_vlans; + /* Change hw filter setting */ + if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) + igb_setup_vlan_hw_support(adapter); + IGB_CORE_UNLOCK(adapter); +} + +/* + * This routine is run via an vlan + * unconfig EVENT + */ +static void +igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct adapter *adapter = ifp->if_softc; + u32 index, bit; + + if (ifp->if_softc != arg) + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + IGB_CORE_LOCK(adapter); + index = (vtag >> 5) & 0x7F; + bit = vtag & 0x1F; + adapter->shadow_vfta[index] &= ~(1 << bit); + --adapter->num_vlans; + /* Change hw filter setting */ + if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) + igb_setup_vlan_hw_support(adapter); + IGB_CORE_UNLOCK(adapter); +} + +static void +igb_setup_vlan_hw_support(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct ifnet *ifp = adapter->ifp; + u32 reg; + + if (adapter->vf_ifp) { + e1000_rlpml_set_vf(hw, + adapter->max_frame_size + VLAN_TAG_SIZE); + return; + } + + reg = E1000_READ_REG(hw, E1000_CTRL); + reg |= E1000_CTRL_VME; + E1000_WRITE_REG(hw, E1000_CTRL, reg); + + /* Enable the Filter Table */ + if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { + reg = E1000_READ_REG(hw, E1000_RCTL); + reg &= ~E1000_RCTL_CFIEN; + reg |= E1000_RCTL_VFE; + E1000_WRITE_REG(hw, E1000_RCTL, reg); + } + + /* Update the frame size */ + E1000_WRITE_REG(&adapter->hw, E1000_RLPML, + adapter->max_frame_size + VLAN_TAG_SIZE); + + /* Don't bother with table if no vlans */ + if ((adapter->num_vlans == 0) || + ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) + return; + /* + ** A soft reset zero's out the VFTA, so + ** we need to repopulate it now. + */ + for (int i = 0; i < IGB_VFTA_SIZE; i++) + if (adapter->shadow_vfta[i] != 0) { + if (adapter->vf_ifp) + e1000_vfta_set_vf(hw, + adapter->shadow_vfta[i], TRUE); + else + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, + i, adapter->shadow_vfta[i]); + } +} + +static void +igb_enable_intr(struct adapter *adapter) +{ + /* With RSS set up what to auto clear */ + if (adapter->msix_mem) { + u32 mask = (adapter->que_mask | adapter->link_mask); + E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask); + E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask); + E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask); + E1000_WRITE_REG(&adapter->hw, E1000_IMS, + E1000_IMS_LSC); + } else { + E1000_WRITE_REG(&adapter->hw, E1000_IMS, + IMS_ENABLE_MASK); + } + E1000_WRITE_FLUSH(&adapter->hw); + + return; +} + +static void +igb_disable_intr(struct adapter *adapter) +{ + if (adapter->msix_mem) { + E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0); + E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0); + } + E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); + E1000_WRITE_FLUSH(&adapter->hw); + return; +} + +/* + * Bit of a misnomer, what this really means is + * to enable OS management of the system... aka + * to disable special hardware management features + */ +static void +igb_init_manageability(struct adapter *adapter) +{ + if (adapter->has_manage) { + int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); + int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + /* enable receiving management packets to the host */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h |= 1 << 5; /* Mng Port 623 */ + manc2h |= 1 << 6; /* Mng Port 664 */ + E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); + E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); + } +} + +/* + * Give control back to hardware management + * controller if there is one. + */ +static void +igb_release_manageability(struct adapter *adapter) +{ + if (adapter->has_manage) { + int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + manc &= ~E1000_MANC_EN_MNG2HOST; + + E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); + } +} + +/* + * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. + * + */ +static void +igb_get_hw_control(struct adapter *adapter) +{ + u32 ctrl_ext; + + if (adapter->vf_ifp) + return; + + /* Let firmware know the driver has taken over */ + ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, + ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); +} + +/* + * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. + * + */ +static void +igb_release_hw_control(struct adapter *adapter) +{ + u32 ctrl_ext; + + if (adapter->vf_ifp) + return; + + /* Let firmware taken over control of h/w */ + ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); + E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); +} + +static int +igb_is_valid_ether_addr(uint8_t *addr) +{ + char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; + + if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { + return (FALSE); + } + + return (TRUE); +} + + +/* + * Enable PCI Wake On Lan capability + */ +static void +igb_enable_wakeup(device_t dev) +{ + u16 cap, status; + u8 id; + + /* First find the capabilities pointer*/ + cap = pci_read_config(dev, PCIR_CAP_PTR, 2); + /* Read the PM Capabilities */ + id = pci_read_config(dev, cap, 1); + if (id != PCIY_PMG) /* Something wrong */ + return; + /* OK, we have the power capabilities, so + now get the status register */ + cap += PCIR_POWER_STATUS; + status = pci_read_config(dev, cap, 2); + status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; + pci_write_config(dev, cap, status, 2); + return; +} + +static void +igb_led_func(void *arg, int onoff) +{ + struct adapter *adapter = arg; + + IGB_CORE_LOCK(adapter); + if (onoff) { + e1000_setup_led(&adapter->hw); + e1000_led_on(&adapter->hw); + } else { + e1000_led_off(&adapter->hw); + e1000_cleanup_led(&adapter->hw); + } + IGB_CORE_UNLOCK(adapter); +} + +/********************************************************************** + * + * Update the board statistics counters. + * + **********************************************************************/ +static void +igb_update_stats_counters(struct adapter *adapter) +{ + struct ifnet *ifp; + struct e1000_hw *hw = &adapter->hw; + struct e1000_hw_stats *stats; + + /* + ** The virtual function adapter has only a + ** small controlled set of stats, do only + ** those and return. + */ + if (adapter->vf_ifp) { + igb_update_vf_stats_counters(adapter); + return; + } + + stats = (struct e1000_hw_stats *)adapter->stats; + + if(adapter->hw.phy.media_type == e1000_media_type_copper || + (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { + stats->symerrs += + E1000_READ_REG(hw,E1000_SYMERRS); + stats->sec += E1000_READ_REG(hw, E1000_SEC); + } + + stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); + stats->mpc += E1000_READ_REG(hw, E1000_MPC); + stats->scc += E1000_READ_REG(hw, E1000_SCC); + stats->ecol += E1000_READ_REG(hw, E1000_ECOL); + + stats->mcc += E1000_READ_REG(hw, E1000_MCC); + stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); + stats->colc += E1000_READ_REG(hw, E1000_COLC); + stats->dc += E1000_READ_REG(hw, E1000_DC); + stats->rlec += E1000_READ_REG(hw, E1000_RLEC); + stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); + stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); + /* + ** For watchdog management we need to know if we have been + ** paused during the last interval, so capture that here. + */ + adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); + stats->xoffrxc += adapter->pause_frames; + stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); + stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); + stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); + stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); + stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); + stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); + stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); + stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); + stats->gprc += E1000_READ_REG(hw, E1000_GPRC); + stats->bprc += E1000_READ_REG(hw, E1000_BPRC); + stats->mprc += E1000_READ_REG(hw, E1000_MPRC); + stats->gptc += E1000_READ_REG(hw, E1000_GPTC); + + /* For the 64-bit byte counters the low dword must be read first. */ + /* Both registers clear on the read of the high dword */ + + stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + + ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32); + stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + + ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32); + + stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); + stats->ruc += E1000_READ_REG(hw, E1000_RUC); + stats->rfc += E1000_READ_REG(hw, E1000_RFC); + stats->roc += E1000_READ_REG(hw, E1000_ROC); + stats->rjc += E1000_READ_REG(hw, E1000_RJC); + + stats->tor += E1000_READ_REG(hw, E1000_TORH); + stats->tot += E1000_READ_REG(hw, E1000_TOTH); + + stats->tpr += E1000_READ_REG(hw, E1000_TPR); + stats->tpt += E1000_READ_REG(hw, E1000_TPT); + stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); + stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); + stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); + stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); + stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); + stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); + stats->mptc += E1000_READ_REG(hw, E1000_MPTC); + stats->bptc += E1000_READ_REG(hw, E1000_BPTC); + + /* Interrupt Counts */ + + stats->iac += E1000_READ_REG(hw, E1000_IAC); + stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); + stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); + stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); + stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); + stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); + stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); + stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); + stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); + + /* Host to Card Statistics */ + + stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); + stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); + stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); + stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); + stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); + stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); + stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); + stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + + ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32)); + stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + + ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); + stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); + stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); + stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); + + stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); + stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); + stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); + stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); + stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); + stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); + + ifp = adapter->ifp; + ifp->if_collisions = stats->colc; + + /* Rx Errors */ + ifp->if_ierrors = adapter->dropped_pkts + stats->rxerrc + + stats->crcerrs + stats->algnerrc + + stats->ruc + stats->roc + stats->mpc + stats->cexterr; + + /* Tx Errors */ + ifp->if_oerrors = stats->ecol + + stats->latecol + adapter->watchdog_events; + + /* Driver specific counters */ + adapter->device_control = E1000_READ_REG(hw, E1000_CTRL); + adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL); + adapter->int_mask = E1000_READ_REG(hw, E1000_IMS); + adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS); + adapter->packet_buf_alloc_tx = + ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); + adapter->packet_buf_alloc_rx = + (E1000_READ_REG(hw, E1000_PBA) & 0xffff); +} + + +/********************************************************************** + * + * Initialize the VF board statistics counters. + * + **********************************************************************/ +static void +igb_vf_init_stats(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_vf_stats *stats; + + stats = (struct e1000_vf_stats *)adapter->stats; + if (stats == NULL) + return; + stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); + stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); + stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); + stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); + stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); +} + +/********************************************************************** + * + * Update the VF board statistics counters. + * + **********************************************************************/ +static void +igb_update_vf_stats_counters(struct adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_vf_stats *stats; + + if (adapter->link_speed == 0) + return; + + stats = (struct e1000_vf_stats *)adapter->stats; + + UPDATE_VF_REG(E1000_VFGPRC, + stats->last_gprc, stats->gprc); + UPDATE_VF_REG(E1000_VFGORC, + stats->last_gorc, stats->gorc); + UPDATE_VF_REG(E1000_VFGPTC, + stats->last_gptc, stats->gptc); + UPDATE_VF_REG(E1000_VFGOTC, + stats->last_gotc, stats->gotc); + UPDATE_VF_REG(E1000_VFMPRC, + stats->last_mprc, stats->mprc); +} + +/* Export a single 32-bit register via a read-only sysctl. */ +static int +igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter; + u_int val; + + adapter = oidp->oid_arg1; + val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); + return (sysctl_handle_int(oidp, &val, 0, req)); +} + +/* +** Tuneable interrupt rate handler +*/ +static int +igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) +{ + struct igb_queue *que = ((struct igb_queue *)oidp->oid_arg1); + int error; + u32 reg, usec, rate; + + reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix)); + usec = ((reg & 0x7FFC) >> 2); + if (usec > 0) + rate = 1000000 / usec; + else + rate = 0; + error = sysctl_handle_int(oidp, &rate, 0, req); + if (error || !req->newptr) + return error; + return 0; +} + +/* + * Add sysctl variables, one per statistic, to the system. + */ +static void +igb_add_hw_stats(struct adapter *adapter) +{ + device_t dev = adapter->dev; + + struct tx_ring *txr = adapter->tx_rings; + struct rx_ring *rxr = adapter->rx_rings; + + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); + struct sysctl_oid *tree = device_get_sysctl_tree(dev); + struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); + struct e1000_hw_stats *stats = adapter->stats; + + struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node; + struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list; + +#define QUEUE_NAME_LEN 32 + char namebuf[QUEUE_NAME_LEN]; + + /* Driver Statistics */ + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq", + CTLFLAG_RD, &adapter->link_irq, 0, + "Link MSIX IRQ Handled"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", + CTLFLAG_RD, &adapter->dropped_pkts, + "Driver dropped packets"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", + CTLFLAG_RD, &adapter->no_tx_dma_setup, + "Driver tx dma failure in xmit"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", + CTLFLAG_RD, &adapter->rx_overruns, + "RX overruns"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", + CTLFLAG_RD, &adapter->watchdog_events, + "Watchdog timeouts"); + + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", + CTLFLAG_RD, &adapter->device_control, + "Device Control Register"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", + CTLFLAG_RD, &adapter->rx_control, + "Receiver Control Register"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", + CTLFLAG_RD, &adapter->int_mask, + "Interrupt Mask"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", + CTLFLAG_RD, &adapter->eint_mask, + "Extended Interrupt Mask"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", + CTLFLAG_RD, &adapter->packet_buf_alloc_tx, + "Transmit Buffer Packet Allocation"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", + CTLFLAG_RD, &adapter->packet_buf_alloc_rx, + "Receive Buffer Packet Allocation"); + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", + CTLFLAG_RD, &adapter->hw.fc.high_water, 0, + "Flow Control High Watermark"); + SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", + CTLFLAG_RD, &adapter->hw.fc.low_water, 0, + "Flow Control Low Watermark"); + + for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { + struct lro_ctrl *lro = &rxr->lro; + + snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); + queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, + CTLFLAG_RD, NULL, "Queue Name"); + queue_list = SYSCTL_CHILDREN(queue_node); + + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", + CTLFLAG_RD, &adapter->queues[i], + sizeof(&adapter->queues[i]), + igb_sysctl_interrupt_rate_handler, + "IU", "Interrupt Rate"); + + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", + CTLFLAG_RD, adapter, E1000_TDH(txr->me), + igb_sysctl_reg_handler, "IU", + "Transmit Descriptor Head"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", + CTLFLAG_RD, adapter, E1000_TDT(txr->me), + igb_sysctl_reg_handler, "IU", + "Transmit Descriptor Tail"); + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", + CTLFLAG_RD, &txr->no_desc_avail, + "Queue No Descriptor Available"); + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", + CTLFLAG_RD, &txr->tx_packets, + "Queue Packets Transmitted"); + + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", + CTLFLAG_RD, adapter, E1000_RDH(rxr->me), + igb_sysctl_reg_handler, "IU", + "Receive Descriptor Head"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", + CTLFLAG_RD, adapter, E1000_RDT(rxr->me), + igb_sysctl_reg_handler, "IU", + "Receive Descriptor Tail"); + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", + CTLFLAG_RD, &rxr->rx_packets, + "Queue Packets Received"); + SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + CTLFLAG_RD, &rxr->rx_bytes, + "Queue Bytes Received"); + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued", + CTLFLAG_RD, &lro->lro_queued, 0, + "LRO Queued"); + SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed", + CTLFLAG_RD, &lro->lro_flushed, 0, + "LRO Flushed"); + } + + /* MAC stats get their own sub node */ + + stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", + CTLFLAG_RD, NULL, "MAC Statistics"); + stat_list = SYSCTL_CHILDREN(stat_node); + + /* + ** VF adapter has a very limited set of stats + ** since its not managing the metal, so to speak. + */ + if (adapter->vf_ifp) { + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", + CTLFLAG_RD, &stats->gprc, + "Good Packets Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", + CTLFLAG_RD, &stats->gptc, + "Good Packets Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", + CTLFLAG_RD, &stats->gorc, + "Good Octets Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", + CTLFLAG_RD, &stats->gotc, + "Good Octets Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", + CTLFLAG_RD, &stats->mprc, + "Multicast Packets Received"); + return; + } + + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", + CTLFLAG_RD, &stats->ecol, + "Excessive collisions"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", + CTLFLAG_RD, &stats->scc, + "Single collisions"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", + CTLFLAG_RD, &stats->mcc, + "Multiple collisions"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", + CTLFLAG_RD, &stats->latecol, + "Late collisions"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", + CTLFLAG_RD, &stats->colc, + "Collision Count"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors", + CTLFLAG_RD, &stats->symerrs, + "Symbol Errors"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors", + CTLFLAG_RD, &stats->sec, + "Sequence Errors"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count", + CTLFLAG_RD, &stats->dc, + "Defer Count"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets", + CTLFLAG_RD, &stats->mpc, + "Missed Packets"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", + CTLFLAG_RD, &stats->rnbc, + "Receive No Buffers"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize", + CTLFLAG_RD, &stats->ruc, + "Receive Undersize"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", + CTLFLAG_RD, &stats->rfc, + "Fragmented Packets Received "); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize", + CTLFLAG_RD, &stats->roc, + "Oversized Packets Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber", + CTLFLAG_RD, &stats->rjc, + "Recevied Jabber"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs", + CTLFLAG_RD, &stats->rxerrc, + "Receive Errors"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs", + CTLFLAG_RD, &stats->crcerrs, + "CRC errors"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs", + CTLFLAG_RD, &stats->algnerrc, + "Alignment Errors"); + /* On 82575 these are collision counts */ + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", + CTLFLAG_RD, &stats->cexterr, + "Collision/Carrier extension errors"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd", + CTLFLAG_RD, &stats->xonrxc, + "XON Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd", + CTLFLAG_RD, &stats->xontxc, + "XON Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", + CTLFLAG_RD, &stats->xoffrxc, + "XOFF Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd", + CTLFLAG_RD, &stats->xofftxc, + "XOFF Transmitted"); + /* Packet Reception Stats */ + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", + CTLFLAG_RD, &stats->tpr, + "Total Packets Received "); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", + CTLFLAG_RD, &stats->gprc, + "Good Packets Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", + CTLFLAG_RD, &stats->bprc, + "Broadcast Packets Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", + CTLFLAG_RD, &stats->mprc, + "Multicast Packets Received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", + CTLFLAG_RD, &stats->prc64, + "64 byte frames received "); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", + CTLFLAG_RD, &stats->prc127, + "65-127 byte frames received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", + CTLFLAG_RD, &stats->prc255, + "128-255 byte frames received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", + CTLFLAG_RD, &stats->prc511, + "256-511 byte frames received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", + CTLFLAG_RD, &stats->prc1023, + "512-1023 byte frames received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", + CTLFLAG_RD, &stats->prc1522, + "1023-1522 byte frames received"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", + CTLFLAG_RD, &stats->gorc, + "Good Octets Received"); + + /* Packet Transmission Stats */ + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", + CTLFLAG_RD, &stats->gotc, + "Good Octets Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", + CTLFLAG_RD, &stats->tpt, + "Total Packets Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", + CTLFLAG_RD, &stats->gptc, + "Good Packets Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", + CTLFLAG_RD, &stats->bptc, + "Broadcast Packets Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", + CTLFLAG_RD, &stats->mptc, + "Multicast Packets Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", + CTLFLAG_RD, &stats->ptc64, + "64 byte frames transmitted "); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", + CTLFLAG_RD, &stats->ptc127, + "65-127 byte frames transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", + CTLFLAG_RD, &stats->ptc255, + "128-255 byte frames transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", + CTLFLAG_RD, &stats->ptc511, + "256-511 byte frames transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", + CTLFLAG_RD, &stats->ptc1023, + "512-1023 byte frames transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", + CTLFLAG_RD, &stats->ptc1522, + "1024-1522 byte frames transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd", + CTLFLAG_RD, &stats->tsctc, + "TSO Contexts Transmitted"); + SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", + CTLFLAG_RD, &stats->tsctfc, + "TSO Contexts Failed"); + + + /* Interrupt Stats */ + + int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", + CTLFLAG_RD, NULL, "Interrupt Statistics"); + int_list = SYSCTL_CHILDREN(int_node); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts", + CTLFLAG_RD, &stats->iac, + "Interrupt Assertion Count"); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", + CTLFLAG_RD, &stats->icrxptc, + "Interrupt Cause Rx Pkt Timer Expire Count"); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", + CTLFLAG_RD, &stats->icrxatc, + "Interrupt Cause Rx Abs Timer Expire Count"); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", + CTLFLAG_RD, &stats->ictxptc, + "Interrupt Cause Tx Pkt Timer Expire Count"); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", + CTLFLAG_RD, &stats->ictxatc, + "Interrupt Cause Tx Abs Timer Expire Count"); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", + CTLFLAG_RD, &stats->ictxqec, + "Interrupt Cause Tx Queue Empty Count"); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", + CTLFLAG_RD, &stats->ictxqmtc, + "Interrupt Cause Tx Queue Min Thresh Count"); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", + CTLFLAG_RD, &stats->icrxdmtc, + "Interrupt Cause Rx Desc Min Thresh Count"); + + SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun", + CTLFLAG_RD, &stats->icrxoc, + "Interrupt Cause Receiver Overrun Count"); + + /* Host to Card Stats */ + + host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", + CTLFLAG_RD, NULL, + "Host to Card Statistics"); + + host_list = SYSCTL_CHILDREN(host_node); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt", + CTLFLAG_RD, &stats->cbtmpc, + "Circuit Breaker Tx Packet Count"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard", + CTLFLAG_RD, &stats->htdpmc, + "Host Transmit Discarded Packets"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt", + CTLFLAG_RD, &stats->rpthc, + "Rx Packets To Host"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts", + CTLFLAG_RD, &stats->cbrmpc, + "Circuit Breaker Rx Packet Count"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop", + CTLFLAG_RD, &stats->cbrdpc, + "Circuit Breaker Rx Dropped Count"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt", + CTLFLAG_RD, &stats->hgptc, + "Host Good Packets Tx Count"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop", + CTLFLAG_RD, &stats->htcbdpc, + "Host Tx Circuit Breaker Dropped Count"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes", + CTLFLAG_RD, &stats->hgorc, + "Host Good Octets Received Count"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes", + CTLFLAG_RD, &stats->hgotc, + "Host Good Octets Transmit Count"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors", + CTLFLAG_RD, &stats->lenerrs, + "Length Errors"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt", + CTLFLAG_RD, &stats->scvpc, + "SerDes/SGMII Code Violation Pkt Count"); + + SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed", + CTLFLAG_RD, &stats->hrmpc, + "Header Redirection Missed Packet Count"); +} + + +/********************************************************************** + * + * This routine provides a way to dump out the adapter eeprom, + * often a useful debug/service tool. This only dumps the first + * 32 words, stuff that matters is in that extent. + * + **********************************************************************/ +static int +igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) +{ + struct adapter *adapter; + int error; + int result; + + result = -1; + error = sysctl_handle_int(oidp, &result, 0, req); + + if (error || !req->newptr) + return (error); + + /* + * This value will cause a hex dump of the + * first 32 16-bit words of the EEPROM to + * the screen. + */ + if (result == 1) { + adapter = (struct adapter *)arg1; + igb_print_nvm_info(adapter); + } + + return (error); +} + +static void +igb_print_nvm_info(struct adapter *adapter) +{ + u16 eeprom_data; + int i, j, row = 0; + + /* Its a bit crude, but it gets the job done */ + printf("\nInterface EEPROM Dump:\n"); + printf("Offset\n0x0000 "); + for (i = 0, j = 0; i < 32; i++, j++) { + if (j == 8) { /* Make the offset block */ + j = 0; ++row; + printf("\n0x00%x0 ",row); + } + e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); + printf("%04x ", eeprom_data); + } + printf("\n"); +} + +static void +igb_set_sysctl_value(struct adapter *adapter, const char *name, + const char *description, int *limit, int value) +{ + *limit = value; + SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), + OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); +} + +/* +** Set flow control using sysctl: +** Flow control values: +** 0 - off +** 1 - rx pause +** 2 - tx pause +** 3 - full +*/ +static int +igb_set_flowcntl(SYSCTL_HANDLER_ARGS) +{ + int error; + struct adapter *adapter; + + error = sysctl_handle_int(oidp, &igb_fc_setting, 0, req); + + if (error) + return (error); + + adapter = (struct adapter *) arg1; + switch (igb_fc_setting) { + case e1000_fc_rx_pause: + case e1000_fc_tx_pause: + case e1000_fc_full: + adapter->hw.fc.requested_mode = igb_fc_setting; + break; + case e1000_fc_none: + default: + adapter->hw.fc.requested_mode = e1000_fc_none; + } + + adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode; + e1000_force_mac_fc(&adapter->hw); + return error; +} diff --git a/lib/librte_pmd_igb/igb/if_igb.h b/lib/librte_pmd_igb/igb/if_igb.h new file mode 100644 index 0000000000..9a0bb474c7 --- /dev/null +++ b/lib/librte_pmd_igb/igb/if_igb.h @@ -0,0 +1,541 @@ +/****************************************************************************** + + Copyright (c) 2001-2011, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IGB_H_DEFINED_ +#define _IGB_H_DEFINED_ + +/* Tunables */ + +/* + * IGB_TXD: Maximum number of Transmit Descriptors + * + * This value is the number of transmit descriptors allocated by the driver. + * Increasing this value allows the driver to queue more transmits. Each + * descriptor is 16 bytes. + * Since TDLEN should be multiple of 128bytes, the number of transmit + * desscriptors should meet the following condition. + * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 + */ +#define IGB_MIN_TXD 256 +#define IGB_DEFAULT_TXD 1024 +#define IGB_MAX_TXD 4096 + +/* + * IGB_RXD: Maximum number of Transmit Descriptors + * + * This value is the number of receive descriptors allocated by the driver. + * Increasing this value allows the driver to buffer more incoming packets. + * Each descriptor is 16 bytes. A receive buffer is also allocated for each + * descriptor. The maximum MTU size is 16110. + * Since TDLEN should be multiple of 128bytes, the number of transmit + * desscriptors should meet the following condition. + * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 + */ +#define IGB_MIN_RXD 256 +#define IGB_DEFAULT_RXD 1024 +#define IGB_MAX_RXD 4096 + +/* + * IGB_TIDV - Transmit Interrupt Delay Value + * Valid Range: 0-65535 (0=off) + * Default Value: 64 + * This value delays the generation of transmit interrupts in units of + * 1.024 microseconds. Transmit interrupt reduction can improve CPU + * efficiency if properly tuned for specific network traffic. If the + * system is reporting dropped transmits, this value may be set too high + * causing the driver to run out of available transmit descriptors. + */ +#define IGB_TIDV 64 + +/* + * IGB_TADV - Transmit Absolute Interrupt Delay Value + * Valid Range: 0-65535 (0=off) + * Default Value: 64 + * This value, in units of 1.024 microseconds, limits the delay in which a + * transmit interrupt is generated. Useful only if IGB_TIDV is non-zero, + * this value ensures that an interrupt is generated after the initial + * packet is sent on the wire within the set amount of time. Proper tuning, + * along with IGB_TIDV, may improve traffic throughput in specific + * network conditions. + */ +#define IGB_TADV 64 + +/* + * IGB_RDTR - Receive Interrupt Delay Timer (Packet Timer) + * Valid Range: 0-65535 (0=off) + * Default Value: 0 + * This value delays the generation of receive interrupts in units of 1.024 + * microseconds. Receive interrupt reduction can improve CPU efficiency if + * properly tuned for specific network traffic. Increasing this value adds + * extra latency to frame reception and can end up decreasing the throughput + * of TCP traffic. If the system is reporting dropped receives, this value + * may be set too high, causing the driver to run out of available receive + * descriptors. + * + * CAUTION: When setting IGB_RDTR to a value other than 0, adapters + * may hang (stop transmitting) under certain network conditions. + * If this occurs a WATCHDOG message is logged in the system + * event log. In addition, the controller is automatically reset, + * restoring the network connection. To eliminate the potential + * for the hang ensure that IGB_RDTR is set to 0. + */ +#define IGB_RDTR 0 + +/* + * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) + * Valid Range: 0-65535 (0=off) + * Default Value: 64 + * This value, in units of 1.024 microseconds, limits the delay in which a + * receive interrupt is generated. Useful only if IGB_RDTR is non-zero, + * this value ensures that an interrupt is generated after the initial + * packet is received within the set amount of time. Proper tuning, + * along with IGB_RDTR, may improve traffic throughput in specific network + * conditions. + */ +#define IGB_RADV 64 + +/* + * This parameter controls the duration of transmit watchdog timer. + */ +#define IGB_WATCHDOG (10 * hz) + +/* + * This parameter controls when the driver calls the routine to reclaim + * transmit descriptors. Cleaning earlier seems a win. + */ +#define IGB_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 2) + +/* + * This parameter controls whether or not autonegotation is enabled. + * 0 - Disable autonegotiation + * 1 - Enable autonegotiation + */ +#define DO_AUTO_NEG 1 + +/* + * This parameter control whether or not the driver will wait for + * autonegotiation to complete. + * 1 - Wait for autonegotiation to complete + * 0 - Don't wait for autonegotiation to complete + */ +#define WAIT_FOR_AUTO_NEG_DEFAULT 0 + +/* Tunables -- End */ + +#define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ + ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ + ADVERTISE_1000_FULL) + +#define AUTO_ALL_MODES 0 + +/* PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default + +/* + * Micellaneous constants + */ +#define IGB_VENDOR_ID 0x8086 + +#define IGB_JUMBO_PBA 0x00000028 +#define IGB_DEFAULT_PBA 0x00000030 +#define IGB_SMARTSPEED_DOWNSHIFT 3 +#define IGB_SMARTSPEED_MAX 15 +#define IGB_MAX_LOOP 10 + +#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_RX_WTHRESH 1 + +#define IGB_TX_PTHRESH 8 +#define IGB_TX_HTHRESH 1 +#define IGB_TX_WTHRESH ((hw->mac.type != e1000_82575 && \ + adapter->msix_mem) ? 1 : 16) + +#define MAX_NUM_MULTICAST_ADDRESSES 128 +#define PCI_ANY_ID (~0U) +#define ETHER_ALIGN 2 +#define IGB_TX_BUFFER_SIZE ((uint32_t) 1514) +#define IGB_FC_PAUSE_TIME 0x0680 +#define IGB_EEPROM_APME 0x400; +#define IGB_QUEUE_IDLE 0 +#define IGB_QUEUE_WORKING 1 +#define IGB_QUEUE_HUNG 2 + +/* + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will + * also optimize cache line size effect. H/W supports up to cache line size 128. + */ +#define IGB_DBA_ALIGN 128 + +#define SPEED_MODE_BIT (1<<21) /* On PCI-E MACs only */ + +/* PCI Config defines */ +#define IGB_MSIX_BAR 3 + +/* Defines for printing debug information */ +#define DEBUG_INIT 0 +#define DEBUG_IOCTL 0 +#define DEBUG_HW 0 + +#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") +#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) +#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) +#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") +#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) +#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) +#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") +#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) +#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) + +#define IGB_MAX_SCATTER 64 +#define IGB_VFTA_SIZE 128 +#define IGB_BR_SIZE 4096 /* ring buf size */ +#define IGB_TSO_SIZE (65535 + sizeof(struct ether_vlan_header)) +#define IGB_TSO_SEG_SIZE 4096 /* Max dma segment size */ +#define IGB_HDR_BUF 128 +#define IGB_PKTTYPE_MASK 0x0000FFF0 +#define ETH_ZLEN 60 +#define ETH_ADDR_LEN 6 + +/* Offload bits in mbuf flag */ +#if __FreeBSD_version >= 800000 +#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) +#else +#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) +#endif + +/* Define the starting Interrupt rate per Queue */ +#define IGB_INTS_PER_SEC 8000 +#define IGB_DEFAULT_ITR ((1000000/IGB_INTS_PER_SEC) << 2) + +#define IGB_LINK_ITR 2000 + +/* Precision Time Sync (IEEE 1588) defines */ +#define ETHERTYPE_IEEE1588 0x88F7 +#define PICOSECS_PER_TICK 20833 +#define TSYNC_PORT 319 /* UDP port for the protocol */ + +/* + * Bus dma allocation structure used by + * e1000_dma_malloc and e1000_dma_free. + */ +struct igb_dma_alloc { + bus_addr_t dma_paddr; + caddr_t dma_vaddr; + bus_dma_tag_t dma_tag; + bus_dmamap_t dma_map; + bus_dma_segment_t dma_seg; + int dma_nseg; +}; + + +/* +** Driver queue struct: this is the interrupt container +** for the associated tx and rx ring. +*/ +struct igb_queue { + struct adapter *adapter; + u32 msix; /* This queue's MSIX vector */ + u32 eims; /* This queue's EIMS bit */ + u32 eitr_setting; + struct resource *res; + void *tag; + struct tx_ring *txr; + struct rx_ring *rxr; + struct task que_task; + struct taskqueue *tq; + u64 irqs; +}; + +/* + * Transmit ring: one per queue + */ +struct tx_ring { + struct adapter *adapter; + u32 me; + struct mtx tx_mtx; + char mtx_name[16]; + struct igb_dma_alloc txdma; + struct e1000_tx_desc *tx_base; + u32 next_avail_desc; + u32 next_to_clean; + volatile u16 tx_avail; + struct igb_tx_buffer *tx_buffers; +#if __FreeBSD_version >= 800000 + struct buf_ring *br; +#endif + bus_dma_tag_t txtag; + + u32 bytes; + u32 packets; + + int queue_status; + int watchdog_time; + int tdt; + int tdh; + u64 no_desc_avail; + u64 tx_packets; +}; + +/* + * Receive ring: one per queue + */ +struct rx_ring { + struct adapter *adapter; + u32 me; + struct igb_dma_alloc rxdma; + union e1000_adv_rx_desc *rx_base; + struct lro_ctrl lro; + bool lro_enabled; + bool hdr_split; + bool discard; + struct mtx rx_mtx; + char mtx_name[16]; + u32 next_to_refresh; + u32 next_to_check; + struct igb_rx_buf *rx_buffers; + bus_dma_tag_t htag; /* dma tag for rx head */ + bus_dma_tag_t ptag; /* dma tag for rx packet */ + /* + * First/last mbuf pointers, for + * collecting multisegment RX packets. + */ + struct mbuf *fmp; + struct mbuf *lmp; + + u32 bytes; + u32 packets; + int rdt; + int rdh; + + /* Soft stats */ + u64 rx_split_packets; + u64 rx_discarded; + u64 rx_packets; + u64 rx_bytes; +}; + +struct adapter { + struct ifnet *ifp; + struct e1000_hw hw; + + struct e1000_osdep osdep; + struct device *dev; + struct cdev *led_dev; + + struct resource *pci_mem; + struct resource *msix_mem; + struct resource *res; + void *tag; + u32 que_mask; + + int linkvec; + int link_mask; + struct task link_task; + int link_irq; + + struct ifmedia media; + struct callout timer; + int msix; /* total vectors allocated */ + int if_flags; + int max_frame_size; + int min_frame_size; + int pause_frames; + struct mtx core_mtx; + int igb_insert_vlan_header; + u16 num_queues; + u16 vf_ifp; /* a VF interface */ + + eventhandler_tag vlan_attach; + eventhandler_tag vlan_detach; + u32 num_vlans; + + /* Management and WOL features */ + int wol; + int has_manage; + + /* + ** Shadow VFTA table, this is needed because + ** the real vlan filter table gets cleared during + ** a soft reset and the driver needs to be able + ** to repopulate it. + */ + u32 shadow_vfta[IGB_VFTA_SIZE]; + + /* Info about the interface */ + u8 link_active; + u16 link_speed; + u16 link_duplex; + u32 smartspeed; + u32 dma_coalesce; + + /* Interface queues */ + struct igb_queue *queues; + + /* + * Transmit rings + */ + struct tx_ring *tx_rings; + u16 num_tx_desc; + + /* Multicast array pointer */ + u8 *mta; + + /* + * Receive rings + */ + struct rx_ring *rx_rings; + bool rx_hdr_split; + u16 num_rx_desc; + int rx_process_limit; + u32 rx_mbuf_sz; + u32 rx_mask; + + /* Misc stats maintained by the driver */ + unsigned long dropped_pkts; + unsigned long mbuf_defrag_failed; + unsigned long mbuf_header_failed; + unsigned long mbuf_packet_failed; + unsigned long no_tx_map_avail; + unsigned long no_tx_dma_setup; + unsigned long watchdog_events; + unsigned long rx_overruns; + unsigned long device_control; + unsigned long rx_control; + unsigned long int_mask; + unsigned long eint_mask; + unsigned long packet_buf_alloc_rx; + unsigned long packet_buf_alloc_tx; + + boolean_t in_detach; + +#ifdef IGB_IEEE1588 + /* IEEE 1588 precision time support */ + struct cyclecounter cycles; + struct nettimer clock; + struct nettime_compare compare; + struct hwtstamp_ctrl hwtstamp; +#endif + + void *stats; +}; + +/* ****************************************************************************** + * vendor_info_array + * + * This array contains the list of Subvendor/Subdevice IDs on which the driver + * should load. + * + * ******************************************************************************/ +typedef struct _igb_vendor_info_t { + unsigned int vendor_id; + unsigned int device_id; + unsigned int subvendor_id; + unsigned int subdevice_id; + unsigned int index; +} igb_vendor_info_t; + + +struct igb_tx_buffer { + int next_eop; /* Index of the desc to watch */ + struct mbuf *m_head; + bus_dmamap_t map; /* bus_dma map for packet */ +}; + +struct igb_rx_buf { + struct mbuf *m_head; + struct mbuf *m_pack; + bus_dmamap_t hmap; /* bus_dma map for header */ + bus_dmamap_t pmap; /* bus_dma map for packet */ +}; + +/* +** Find the number of unrefreshed RX descriptors +*/ +static inline u16 +igb_rx_unrefreshed(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + + if (rxr->next_to_check > rxr->next_to_refresh) + return (rxr->next_to_check - rxr->next_to_refresh - 1); + else + return ((adapter->num_rx_desc + rxr->next_to_check) - + rxr->next_to_refresh - 1); +} + +#define IGB_CORE_LOCK_INIT(_sc, _name) \ + mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF) +#define IGB_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) +#define IGB_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) +#define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) +#define IGB_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) + +#define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) +#define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) +#define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) +#define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) +#define IGB_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) + +#define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) +#define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) +#define IGB_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) +#define IGB_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED) + +#define UPDATE_VF_REG(reg, last, cur) \ +{ \ + u32 new = E1000_READ_REG(hw, reg); \ + if (new < last) \ + cur += 0x100000000LL; \ + last = new; \ + cur &= 0xFFFFFFFF00000000LL; \ + cur |= new; \ +} + +#if __FreeBSD_version < 800504 +static __inline int +drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) +{ +#ifdef ALTQ + if (ALTQ_IS_ENABLED(&ifp->if_snd)) + return (1); +#endif + return (!buf_ring_empty(br)); +} +#endif + +#endif /* _IGB_H_DEFINED_ */ + + diff --git a/lib/librte_pmd_ixgbe/Makefile b/lib/librte_pmd_ixgbe/Makefile new file mode 100644 index 0000000000..1fa9defa56 --- /dev/null +++ b/lib/librte_pmd_ixgbe/Makefile @@ -0,0 +1,65 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_ixgbe.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_common.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_82598.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_82599.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_x540.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_phy.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_api.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_mbx.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c + + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net lib/librte_malloc + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_pmd_ixgbe/ixgbe/README b/lib/librte_pmd_ixgbe/ixgbe/README new file mode 100644 index 0000000000..d0e7bdb253 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/README @@ -0,0 +1,70 @@ +.. + BSD LICENSE + + Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + version: DPDK.L.1.2.3-3 + +Intel® IXGBE driver +=================== + +This directory contains code from the Intel® Network Adapter Driver for PCI-E +10 Gigabit Network Connections under FreeBSD, version 2.4.4, dated 10/25/2011. +This code is available from +`http://downloadmirror.intel.com/14688/eng/ixgbe-2.4.4.tar.gz` + +This driver is valid for the product(s) listed below + +* Intel® 10 Gigabit AF DA Dual Port Server Adapter +* Intel® 10 Gigabit AT Server Adapter +* Intel® 10 Gigabit AT2 Server Adapter +* Intel® 10 Gigabit CX4 Dual Port Server Adapter +* Intel® 10 Gigabit XF LR Server Adapter +* Intel® 10 Gigabit XF SR Dual Port Server Adapter +* Intel® 10 Gigabit XF SR Server Adapter +* Intel® 82598 10 Gigabit Ethernet Controller +* Intel® 82599 10 Gigabit Ethernet Controller +* Intel® Ethernet Controller X540-AT2 +* Intel® Ethernet Server Adapter X520 Series +* Intel® Ethernet Server Adapter X520-T2 + +Updating driver +=============== + +The following modifications have been made to this code to integrate it with the +Intel® DPDK: + + +ixgbe_osdep.h +------------- + +The OS dependency layer has been extensively modified to support the drivers in +the Intel® DPDK environment. It is expected that these files will not need to be +changed on updating the driver. diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe.c new file mode 100644 index 0000000000..1ec23c9d5f --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe.c @@ -0,0 +1,5442 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifdef HAVE_KERNEL_OPTION_HEADERS +#include "opt_inet.h" +#include "opt_inet6.h" +#endif + +#include "ixgbe.h" + +/********************************************************************* + * Set this to one to display debug statistics + *********************************************************************/ +int ixgbe_display_debug_stats = 0; + +/********************************************************************* + * Driver version + *********************************************************************/ +char ixgbe_driver_version[] = "2.4.4"; + +/********************************************************************* + * PCI Device ID Table + * + * Used by probe to select devices to load on + * Last field stores an index into ixgbe_strings + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } + *********************************************************************/ + +static ixgbe_vendor_info_t ixgbe_vendor_info_array[] = +{ + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0}, + /* required last entry */ + {0, 0, 0, 0, 0} +}; + +/********************************************************************* + * Table of branding strings + *********************************************************************/ + +static char *ixgbe_strings[] = { + "Intel(R) PRO/10GbE PCI-Express Network Driver" +}; + +/********************************************************************* + * Function prototypes + *********************************************************************/ +static int ixgbe_probe(device_t); +static int ixgbe_attach(device_t); +static int ixgbe_detach(device_t); +static int ixgbe_shutdown(device_t); +static void ixgbe_start(struct ifnet *); +static void ixgbe_start_locked(struct tx_ring *, struct ifnet *); +#if __FreeBSD_version >= 800000 +static int ixgbe_mq_start(struct ifnet *, struct mbuf *); +static int ixgbe_mq_start_locked(struct ifnet *, + struct tx_ring *, struct mbuf *); +static void ixgbe_qflush(struct ifnet *); +#endif +static int ixgbe_ioctl(struct ifnet *, u_long, caddr_t); +static void ixgbe_init(void *); +static void ixgbe_init_locked(struct adapter *); +static void ixgbe_stop(void *); +static void ixgbe_media_status(struct ifnet *, struct ifmediareq *); +static int ixgbe_media_change(struct ifnet *); +static void ixgbe_identify_hardware(struct adapter *); +static int ixgbe_allocate_pci_resources(struct adapter *); +static int ixgbe_allocate_msix(struct adapter *); +static int ixgbe_allocate_legacy(struct adapter *); +static int ixgbe_allocate_queues(struct adapter *); +static int ixgbe_setup_msix(struct adapter *); +static void ixgbe_free_pci_resources(struct adapter *); +static void ixgbe_local_timer(void *); +static int ixgbe_setup_interface(device_t, struct adapter *); +static void ixgbe_config_link(struct adapter *); + +static int ixgbe_allocate_transmit_buffers(struct tx_ring *); +static int ixgbe_setup_transmit_structures(struct adapter *); +static void ixgbe_setup_transmit_ring(struct tx_ring *); +static void ixgbe_initialize_transmit_units(struct adapter *); +static void ixgbe_free_transmit_structures(struct adapter *); +static void ixgbe_free_transmit_buffers(struct tx_ring *); + +static int ixgbe_allocate_receive_buffers(struct rx_ring *); +static int ixgbe_setup_receive_structures(struct adapter *); +static int ixgbe_setup_receive_ring(struct rx_ring *); +static void ixgbe_initialize_receive_units(struct adapter *); +static void ixgbe_free_receive_structures(struct adapter *); +static void ixgbe_free_receive_buffers(struct rx_ring *); +static void ixgbe_setup_hw_rsc(struct rx_ring *); + +static void ixgbe_enable_intr(struct adapter *); +static void ixgbe_disable_intr(struct adapter *); +static void ixgbe_update_stats_counters(struct adapter *); +static bool ixgbe_txeof(struct tx_ring *); +static bool ixgbe_rxeof(struct ix_queue *, int); +static void ixgbe_rx_checksum(u32, struct mbuf *, u32); +static void ixgbe_set_promisc(struct adapter *); +static void ixgbe_set_multi(struct adapter *); +static void ixgbe_update_link_status(struct adapter *); +static void ixgbe_refresh_mbufs(struct rx_ring *, int); +static int ixgbe_xmit(struct tx_ring *, struct mbuf **); +static int ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS); +static int ixgbe_set_advertise(SYSCTL_HANDLER_ARGS); +static int ixgbe_dma_malloc(struct adapter *, bus_size_t, + struct ixgbe_dma_alloc *, int); +static void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); +static void ixgbe_add_rx_process_limit(struct adapter *, const char *, + const char *, int *, int); +static bool ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *); +static bool ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *); +static void ixgbe_set_ivar(struct adapter *, u8, u8, s8); +static void ixgbe_configure_ivars(struct adapter *); +static u8 * ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); + +static void ixgbe_setup_vlan_hw_support(struct adapter *); +static void ixgbe_register_vlan(void *, struct ifnet *, u16); +static void ixgbe_unregister_vlan(void *, struct ifnet *, u16); + +static void ixgbe_add_hw_stats(struct adapter *adapter); + +static __inline void ixgbe_rx_discard(struct rx_ring *, int); +static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *, + struct mbuf *, u32); + +/* Support for pluggable optic modules */ +static bool ixgbe_sfp_probe(struct adapter *); +static void ixgbe_setup_optics(struct adapter *); + +/* Legacy (single vector interrupt handler */ +static void ixgbe_legacy_irq(void *); + +/* The MSI/X Interrupt handlers */ +static void ixgbe_msix_que(void *); +static void ixgbe_msix_link(void *); + +/* Deferred interrupt tasklets */ +static void ixgbe_handle_que(void *, int); +static void ixgbe_handle_link(void *, int); +static void ixgbe_handle_msf(void *, int); +static void ixgbe_handle_mod(void *, int); + +#ifdef IXGBE_FDIR +static void ixgbe_atr(struct tx_ring *, struct mbuf *); +static void ixgbe_reinit_fdir(void *, int); +#endif + +/********************************************************************* + * FreeBSD Device Interface Entry Points + *********************************************************************/ + +static device_method_t ixgbe_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ixgbe_probe), + DEVMETHOD(device_attach, ixgbe_attach), + DEVMETHOD(device_detach, ixgbe_detach), + DEVMETHOD(device_shutdown, ixgbe_shutdown), + {0, 0} +}; + +static driver_t ixgbe_driver = { + "ix", ixgbe_methods, sizeof(struct adapter), +}; + +devclass_t ixgbe_devclass; +DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0); + +MODULE_DEPEND(ixgbe, pci, 1, 1, 1); +MODULE_DEPEND(ixgbe, ether, 1, 1, 1); + +/* +** TUNEABLE PARAMETERS: +*/ + +/* +** AIM: Adaptive Interrupt Moderation +** which means that the interrupt rate +** is varied over time based on the +** traffic for that interrupt vector +*/ +static int ixgbe_enable_aim = TRUE; +TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim); + +static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY); +TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate); + +/* How many packets rxeof tries to clean at a time */ +static int ixgbe_rx_process_limit = 128; +TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit); + +/* +** Smart speed setting, default to on +** this only works as a compile option +** right now as its during attach, set +** this to 'ixgbe_smart_speed_off' to +** disable. +*/ +static int ixgbe_smart_speed = ixgbe_smart_speed_on; + +/* + * MSIX should be the default for best performance, + * but this allows it to be forced off for testing. + */ +static int ixgbe_enable_msix = 1; +TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix); + +/* + * Header split: this causes the hardware to DMA + * the header into a separate mbuf from the payload, + * it can be a performance win in some workloads, but + * in others it actually hurts, its off by default. + */ +static bool ixgbe_header_split = FALSE; +TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split); + +/* + * Number of Queues, can be set to 0, + * it then autoconfigures based on the + * number of cpus with a max of 8. This + * can be overriden manually here. + */ +static int ixgbe_num_queues = 0; +TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues); + +/* +** Number of TX descriptors per ring, +** setting higher than RX as this seems +** the better performing choice. +*/ +static int ixgbe_txd = PERFORM_TXD; +TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd); + +/* Number of RX descriptors per ring */ +static int ixgbe_rxd = PERFORM_RXD; +TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd); + +/* Keep running tab on them for sanity check */ +static int ixgbe_total_ports; + +#ifdef IXGBE_FDIR +/* +** For Flow Director: this is the +** number of TX packets we sample +** for the filter pool, this means +** every 20th packet will be probed. +** +** This feature can be disabled by +** setting this to 0. +*/ +static int atr_sample_rate = 20; +/* +** Flow Director actually 'steals' +** part of the packet buffer as its +** filter pool, this variable controls +** how much it uses: +** 0 = 64K, 1 = 128K, 2 = 256K +*/ +static int fdir_pballoc = 1; +#endif + +/********************************************************************* + * Device identification routine + * + * ixgbe_probe determines if the driver should be loaded on + * adapter based on PCI vendor/device id of the adapter. + * + * return BUS_PROBE_DEFAULT on success, positive on failure + *********************************************************************/ + +static int +ixgbe_probe(device_t dev) +{ + ixgbe_vendor_info_t *ent; + + u16 pci_vendor_id = 0; + u16 pci_device_id = 0; + u16 pci_subvendor_id = 0; + u16 pci_subdevice_id = 0; + char adapter_name[256]; + + INIT_DEBUGOUT("ixgbe_probe: begin"); + + pci_vendor_id = pci_get_vendor(dev); + if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) + return (ENXIO); + + pci_device_id = pci_get_device(dev); + pci_subvendor_id = pci_get_subvendor(dev); + pci_subdevice_id = pci_get_subdevice(dev); + + ent = ixgbe_vendor_info_array; + while (ent->vendor_id != 0) { + if ((pci_vendor_id == ent->vendor_id) && + (pci_device_id == ent->device_id) && + + ((pci_subvendor_id == ent->subvendor_id) || + (ent->subvendor_id == 0)) && + + ((pci_subdevice_id == ent->subdevice_id) || + (ent->subdevice_id == 0))) { + sprintf(adapter_name, "%s, Version - %s", + ixgbe_strings[ent->index], + ixgbe_driver_version); + device_set_desc_copy(dev, adapter_name); + ++ixgbe_total_ports; + return (BUS_PROBE_DEFAULT); + } + ent++; + } + return (ENXIO); +} + +/********************************************************************* + * Device initialization routine + * + * The attach entry point is called when the driver is being loaded. + * This routine identifies the type of hardware, allocates all resources + * and initializes the hardware. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixgbe_attach(device_t dev) +{ + struct adapter *adapter; + struct ixgbe_hw *hw; + int error = 0; + u16 csum; + u32 ctrl_ext; + + INIT_DEBUGOUT("ixgbe_attach: begin"); + + if (resource_disabled("ixgbe", device_get_unit(dev))) { + device_printf(dev, "Disabled by device hint\n"); + return (ENXIO); + } + + /* Allocate, clear, and link in our adapter structure */ + adapter = device_get_softc(dev); + adapter->dev = adapter->osdep.dev = dev; + hw = &adapter->hw; + + /* Core Lock Init*/ + IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); + + /* SYSCTL APIs */ + + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW, + adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control"); + + SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW, + &ixgbe_enable_aim, 1, "Interrupt Moderation"); + + /* + ** Allow a kind of speed control by forcing the autoneg + ** advertised speed list to only a certain value, this + ** supports 1G on 82599 devices, and 100Mb on x540. + */ + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW, + adapter, 0, ixgbe_set_advertise, "I", "Link Speed"); + + + /* Set up the timer callout */ + callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); + + /* Determine hardware revision */ + ixgbe_identify_hardware(adapter); + + /* Do base PCI setup - map BAR0 */ + if (ixgbe_allocate_pci_resources(adapter)) { + device_printf(dev, "Allocation of PCI resources failed\n"); + error = ENXIO; + goto err_out; + } + + /* Do descriptor calc and sanity checks */ + if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || + ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) { + device_printf(dev, "TXD config issue, using default!\n"); + adapter->num_tx_desc = DEFAULT_TXD; + } else + adapter->num_tx_desc = ixgbe_txd; + + /* + ** With many RX rings it is easy to exceed the + ** system mbuf allocation. Tuning nmbclusters + ** can alleviate this. + */ + if (nmbclusters > 0 ) { + int s; + s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports; + if (s > nmbclusters) { + device_printf(dev, "RX Descriptors exceed " + "system mbuf max, using default instead!\n"); + ixgbe_rxd = DEFAULT_RXD; + } + } + + if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || + ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) { + device_printf(dev, "RXD config issue, using default!\n"); + adapter->num_rx_desc = DEFAULT_RXD; + } else + adapter->num_rx_desc = ixgbe_rxd; + + /* Allocate our TX/RX Queues */ + if (ixgbe_allocate_queues(adapter)) { + error = ENOMEM; + goto err_out; + } + + /* Allocate multicast array memory. */ + adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS * + MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); + if (adapter->mta == NULL) { + device_printf(dev, "Can not allocate multicast setup array\n"); + error = ENOMEM; + goto err_late; + } + + /* Initialize the shared code */ + error = ixgbe_init_shared_code(hw); + if (error == IXGBE_ERR_SFP_NOT_PRESENT) { + /* + ** No optics in this port, set up + ** so the timer routine will probe + ** for later insertion. + */ + adapter->sfp_probe = TRUE; + error = 0; + } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) { + device_printf(dev,"Unsupported SFP+ module detected!\n"); + error = EIO; + goto err_late; + } else if (error) { + device_printf(dev,"Unable to initialize the shared code\n"); + error = EIO; + goto err_late; + } + + /* Make sure we have a good EEPROM before we read from it */ + if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) { + device_printf(dev,"The EEPROM Checksum Is Not Valid\n"); + error = EIO; + goto err_late; + } + + /* Get Hardware Flow Control setting */ + hw->fc.requested_mode = ixgbe_fc_full; + adapter->fc = hw->fc.requested_mode; + hw->fc.pause_time = IXGBE_FC_PAUSE; + hw->fc.low_water = IXGBE_FC_LO; + hw->fc.high_water[0] = IXGBE_FC_HI; + hw->fc.send_xon = TRUE; + + error = ixgbe_init_hw(hw); + if (error == IXGBE_ERR_EEPROM_VERSION) { + device_printf(dev, "This device is a pre-production adapter/" + "LOM. Please be aware there may be issues associated " + "with your hardware.\n If you are experiencing problems " + "please contact your Intel or hardware representative " + "who provided you with this hardware.\n"); + } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) + device_printf(dev,"Unsupported SFP+ Module\n"); + + if (error) { + error = EIO; + device_printf(dev,"Hardware Initialization Failure\n"); + goto err_late; + } + + /* Detect and set physical type */ + ixgbe_setup_optics(adapter); + + if ((adapter->msix > 1) && (ixgbe_enable_msix)) + error = ixgbe_allocate_msix(adapter); + else + error = ixgbe_allocate_legacy(adapter); + if (error) + goto err_late; + + /* Setup OS specific network interface */ + if (ixgbe_setup_interface(dev, adapter) != 0) + goto err_late; + + /* Sysctl for limiting the amount of work done in the taskqueue */ + ixgbe_add_rx_process_limit(adapter, "rx_processing_limit", + "max number of rx packets to process", &adapter->rx_process_limit, + ixgbe_rx_process_limit); + + /* Initialize statistics */ + ixgbe_update_stats_counters(adapter); + + /* Register for VLAN events */ + adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, + ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); + adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, + ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); + + /* Print PCIE bus type/speed/width info */ + ixgbe_get_bus_info(hw); + device_printf(dev,"PCI Express Bus: Speed %s %s\n", + ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s": + (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"), + (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" : + (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" : + (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" : + ("Unknown")); + + if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) && + (hw->bus.speed == ixgbe_bus_speed_2500)) { + device_printf(dev, "PCI-Express bandwidth available" + " for this card\n is not sufficient for" + " optimal performance.\n"); + device_printf(dev, "For optimal performance a x8 " + "PCIE, or x4 PCIE 2 slot is required.\n"); + } + + /* let hardware know driver is loaded */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + ixgbe_add_hw_stats(adapter); + + INIT_DEBUGOUT("ixgbe_attach: end"); + return (0); +err_late: + ixgbe_free_transmit_structures(adapter); + ixgbe_free_receive_structures(adapter); +err_out: + if (adapter->ifp != NULL) + if_free(adapter->ifp); + ixgbe_free_pci_resources(adapter); + free(adapter->mta, M_DEVBUF); + return (error); + +} + +/********************************************************************* + * Device removal routine + * + * The detach entry point is called when the driver is being removed. + * This routine stops the adapter and deallocates all the resources + * that were allocated for driver operation. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixgbe_detach(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + struct ix_queue *que = adapter->queues; + u32 ctrl_ext; + + INIT_DEBUGOUT("ixgbe_detach: begin"); + + /* Make sure VLANS are not using driver */ + if (adapter->ifp->if_vlantrunk != NULL) { + device_printf(dev,"Vlan in use, detach first\n"); + return (EBUSY); + } + + IXGBE_CORE_LOCK(adapter); + ixgbe_stop(adapter); + IXGBE_CORE_UNLOCK(adapter); + + for (int i = 0; i < adapter->num_queues; i++, que++) { + if (que->tq) { + taskqueue_drain(que->tq, &que->que_task); + taskqueue_free(que->tq); + } + } + + /* Drain the Link queue */ + if (adapter->tq) { + taskqueue_drain(adapter->tq, &adapter->link_task); + taskqueue_drain(adapter->tq, &adapter->mod_task); + taskqueue_drain(adapter->tq, &adapter->msf_task); +#ifdef IXGBE_FDIR + taskqueue_drain(adapter->tq, &adapter->fdir_task); +#endif + taskqueue_free(adapter->tq); + } + + /* let hardware know driver is unloading */ + ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); + ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext); + + /* Unregister VLAN events */ + if (adapter->vlan_attach != NULL) + EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); + if (adapter->vlan_detach != NULL) + EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); + + ether_ifdetach(adapter->ifp); + callout_drain(&adapter->timer); + ixgbe_free_pci_resources(adapter); + bus_generic_detach(dev); + if_free(adapter->ifp); + + ixgbe_free_transmit_structures(adapter); + ixgbe_free_receive_structures(adapter); + free(adapter->mta, M_DEVBUF); + + IXGBE_CORE_LOCK_DESTROY(adapter); + return (0); +} + +/********************************************************************* + * + * Shutdown entry point + * + **********************************************************************/ + +static int +ixgbe_shutdown(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + IXGBE_CORE_LOCK(adapter); + ixgbe_stop(adapter); + IXGBE_CORE_UNLOCK(adapter); + return (0); +} + + +/********************************************************************* + * Transmit entry point + * + * ixgbe_start is called by the stack to initiate a transmit. + * The driver will remain in this routine as long as there are + * packets to transmit and transmit resources are available. + * In case resources are not available stack is notified and + * the packet is requeued. + **********************************************************************/ + +static void +ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp) +{ + struct mbuf *m_head; + struct adapter *adapter = txr->adapter; + + IXGBE_TX_LOCK_ASSERT(txr); + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING) + return; + if (!adapter->link_active) + return; + + while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + + IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) + break; + + if (ixgbe_xmit(txr, &m_head)) { + if (m_head == NULL) + break; + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + break; + } + /* Send a copy of the frame to the BPF listener */ + ETHER_BPF_MTAP(ifp, m_head); + + /* Set watchdog on */ + txr->watchdog_time = ticks; + txr->queue_status = IXGBE_QUEUE_WORKING; + + } + return; +} + +/* + * Legacy TX start - called by the stack, this + * always uses the first tx ring, and should + * not be used with multiqueue tx enabled. + */ +static void +ixgbe_start(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_TX_LOCK(txr); + ixgbe_start_locked(txr, ifp); + IXGBE_TX_UNLOCK(txr); + } + return; +} + +#if __FreeBSD_version >= 800000 +/* +** Multiqueue Transmit driver +** +*/ +static int +ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m) +{ + struct adapter *adapter = ifp->if_softc; + struct ix_queue *que; + struct tx_ring *txr; + int i = 0, err = 0; + + /* Which queue to use */ + if ((m->m_flags & M_FLOWID) != 0) + i = m->m_pkthdr.flowid % adapter->num_queues; + + txr = &adapter->tx_rings[i]; + que = &adapter->queues[i]; + + if (IXGBE_TX_TRYLOCK(txr)) { + err = ixgbe_mq_start_locked(ifp, txr, m); + IXGBE_TX_UNLOCK(txr); + } else { + err = drbr_enqueue(ifp, txr->br, m); + taskqueue_enqueue(que->tq, &que->que_task); + } + + return (err); +} + +static int +ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m) +{ + struct adapter *adapter = txr->adapter; + struct mbuf *next; + int enqueued, err = 0; + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING || adapter->link_active == 0) { + if (m != NULL) + err = drbr_enqueue(ifp, txr->br, m); + return (err); + } + + enqueued = 0; + if (m == NULL) { + next = drbr_dequeue(ifp, txr->br); + } else if (drbr_needs_enqueue(ifp, txr->br)) { + if ((err = drbr_enqueue(ifp, txr->br, m)) != 0) + return (err); + next = drbr_dequeue(ifp, txr->br); + } else + next = m; + + /* Process the queue */ + while (next != NULL) { + if ((err = ixgbe_xmit(txr, &next)) != 0) { + if (next != NULL) + err = drbr_enqueue(ifp, txr->br, next); + break; + } + enqueued++; + drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags); + /* Send a copy of the frame to the BPF listener */ + ETHER_BPF_MTAP(ifp, next); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; + if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) + ixgbe_txeof(txr); + if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) { + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + next = drbr_dequeue(ifp, txr->br); + } + + if (enqueued > 0) { + /* Set watchdog on */ + txr->queue_status = IXGBE_QUEUE_WORKING; + txr->watchdog_time = ticks; + } + + return (err); +} + +/* +** Flush all ring buffers +*/ +static void +ixgbe_qflush(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + struct mbuf *m; + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IXGBE_TX_LOCK(txr); + while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) + m_freem(m); + IXGBE_TX_UNLOCK(txr); + } + if_qflush(ifp); +} +#endif /* __FreeBSD_version >= 800000 */ + +/********************************************************************* + * Ioctl entry point + * + * ixgbe_ioctl is called when the user wants to configure the + * interface. + * + * return 0 on success, positive on failure + **********************************************************************/ + +static int +ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data) +{ + struct adapter *adapter = ifp->if_softc; + struct ifreq *ifr = (struct ifreq *) data; +#if defined(INET) || defined(INET6) + struct ifaddr *ifa = (struct ifaddr *)data; + bool avoid_reset = FALSE; +#endif + int error = 0; + + switch (command) { + + case SIOCSIFADDR: +#ifdef INET + if (ifa->ifa_addr->sa_family == AF_INET) + avoid_reset = TRUE; +#endif +#ifdef INET6 + if (ifa->ifa_addr->sa_family == AF_INET6) + avoid_reset = TRUE; +#endif +#if defined(INET) || defined(INET6) + /* + ** Calling init results in link renegotiation, + ** so we avoid doing it when possible. + */ + if (avoid_reset) { + ifp->if_flags |= IFF_UP; + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + ixgbe_init(adapter); + if (!(ifp->if_flags & IFF_NOARP)) + arp_ifinit(ifp, ifa); + } else + error = ether_ioctl(ifp, command, data); + break; +#endif + case SIOCSIFMTU: + IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); + if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) { + error = EINVAL; + } else { + IXGBE_CORE_LOCK(adapter); + ifp->if_mtu = ifr->ifr_mtu; + adapter->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + ixgbe_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); + } + break; + case SIOCSIFFLAGS: + IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); + IXGBE_CORE_LOCK(adapter); + if (ifp->if_flags & IFF_UP) { + if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { + if ((ifp->if_flags ^ adapter->if_flags) & + (IFF_PROMISC | IFF_ALLMULTI)) { + ixgbe_set_promisc(adapter); + } + } else + ixgbe_init_locked(adapter); + } else + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixgbe_stop(adapter); + adapter->if_flags = ifp->if_flags; + IXGBE_CORE_UNLOCK(adapter); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_CORE_LOCK(adapter); + ixgbe_disable_intr(adapter); + ixgbe_set_multi(adapter); + ixgbe_enable_intr(adapter); + IXGBE_CORE_UNLOCK(adapter); + } + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); + error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); + break; + case SIOCSIFCAP: + { + int mask = ifr->ifr_reqcap ^ ifp->if_capenable; + IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); + if (mask & IFCAP_HWCSUM) + ifp->if_capenable ^= IFCAP_HWCSUM; + if (mask & IFCAP_TSO4) + ifp->if_capenable ^= IFCAP_TSO4; + if (mask & IFCAP_LRO) + ifp->if_capenable ^= IFCAP_LRO; + if (mask & IFCAP_VLAN_HWTAGGING) + ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; + if (mask & IFCAP_VLAN_HWFILTER) + ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; + if (mask & IFCAP_VLAN_HWTSO) + ifp->if_capenable ^= IFCAP_VLAN_HWTSO; + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXGBE_CORE_LOCK(adapter); + ixgbe_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); + } + VLAN_CAPABILITIES(ifp); + break; + } + + default: + IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); + error = ether_ioctl(ifp, command, data); + break; + } + + return (error); +} + +/********************************************************************* + * Init entry point + * + * This routine is used in two ways. It is used by the stack as + * init entry point in network interface structure. It is also used + * by the driver as a hw/sw initialization routine to get to a + * consistent state. + * + * return 0 on success, positive on failure + **********************************************************************/ +#define IXGBE_MHADD_MFS_SHIFT 16 + +static void +ixgbe_init_locked(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + device_t dev = adapter->dev; + struct ixgbe_hw *hw = &adapter->hw; + u32 k, txdctl, mhadd, gpie; + u32 rxdctl, rxctrl; + + mtx_assert(&adapter->core_mtx, MA_OWNED); + INIT_DEBUGOUT("ixgbe_init: begin"); + hw->adapter_stopped = FALSE; + ixgbe_stop_adapter(hw); + callout_stop(&adapter->timer); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + + /* Get the latest mac address, User can use a LAA */ + bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, + IXGBE_ETH_LENGTH_OF_ADDRESS); + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); + hw->addr_ctrl.rar_used_count = 1; + + /* Set the various hardware offload abilities */ + ifp->if_hwassist = 0; + if (ifp->if_capenable & IFCAP_TSO4) + ifp->if_hwassist |= CSUM_TSO; + if (ifp->if_capenable & IFCAP_TXCSUM) { + ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); +#if __FreeBSD_version >= 800000 + if (hw->mac.type != ixgbe_mac_82598EB) + ifp->if_hwassist |= CSUM_SCTP; +#endif + } + + /* Prepare transmit descriptors and buffers */ + if (ixgbe_setup_transmit_structures(adapter)) { + device_printf(dev,"Could not setup transmit structures\n"); + ixgbe_stop(adapter); + return; + } + + ixgbe_init_hw(hw); + ixgbe_initialize_transmit_units(adapter); + + /* Setup Multicast table */ + ixgbe_set_multi(adapter); + + /* + ** Determine the correct mbuf pool + ** for doing jumbo/headersplit + */ + if (adapter->max_frame_size <= 2048) + adapter->rx_mbuf_sz = MCLBYTES; + else if (adapter->max_frame_size <= 4096) + adapter->rx_mbuf_sz = MJUMPAGESIZE; + else if (adapter->max_frame_size <= 9216) + adapter->rx_mbuf_sz = MJUM9BYTES; + else + adapter->rx_mbuf_sz = MJUM16BYTES; + + /* Prepare receive descriptors and buffers */ + if (ixgbe_setup_receive_structures(adapter)) { + device_printf(dev,"Could not setup receive structures\n"); + ixgbe_stop(adapter); + return; + } + + /* Configure RX settings */ + ixgbe_initialize_receive_units(adapter); + + gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); + + /* Enable Fan Failure Interrupt */ + gpie |= IXGBE_SDP1_GPIEN; + + /* Add for Module detection */ + if (hw->mac.type == ixgbe_mac_82599EB) + gpie |= IXGBE_SDP2_GPIEN; + + /* Thermal Failure Detection */ + if (hw->mac.type == ixgbe_mac_X540) + gpie |= IXGBE_SDP0_GPIEN; + + if (adapter->msix > 1) { + /* Enable Enhanced MSIX mode */ + gpie |= IXGBE_GPIE_MSIX_MODE; + gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT | + IXGBE_GPIE_OCD; + } + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* Set MTU size */ + if (ifp->if_mtu > ETHERMTU) { + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + /* Now enable all the queues */ + + for (int i = 0; i < adapter->num_queues; i++) { + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + /* Set WTHRESH to 8, burst writeback */ + txdctl |= (8 << 16); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl); + } + + for (int i = 0; i < adapter->num_queues; i++) { + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + if (hw->mac.type == ixgbe_mac_82598EB) { + /* + ** PTHRESH = 21 + ** HTHRESH = 4 + ** WTHRESH = 8 + */ + rxdctl &= ~0x3FFFFF; + rxdctl |= 0x080420; + } + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl); + for (k = 0; k < 10; k++) { + if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) & + IXGBE_RXDCTL_ENABLE) + break; + else + msec_delay(1); + } + wmb(); + IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1); + } + + /* Set up VLAN support and filter */ + ixgbe_setup_vlan_hw_support(adapter); + + /* Enable Receive engine */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + rxctrl |= IXGBE_RXCTRL_RXEN; + ixgbe_enable_rx_dma(hw, rxctrl); + + callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); + + /* Set up MSI/X routing */ + if (ixgbe_enable_msix) { + ixgbe_configure_ivars(adapter); + /* Set up auto-mask */ + if (hw->mac.type == ixgbe_mac_82598EB) + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + else { + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); + } + } else { /* Simple settings for Legacy/MSI */ + ixgbe_set_ivar(adapter, 0, 0, 0); + ixgbe_set_ivar(adapter, 0, 0, 1); + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } + +#ifdef IXGBE_FDIR + /* Init Flow director */ + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 hdrm = 64 << fdir_pballoc; + + hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL); + ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc); + } +#endif + + /* + ** Check on any SFP devices that + ** need to be kick-started + */ + if (hw->phy.type == ixgbe_phy_none) { + int err = hw->phy.ops.identify(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + device_printf(dev, + "Unsupported SFP+ module type was detected.\n"); + return; + } + } + + /* Set moderation on the Link interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR); + + /* Config/Enable Link */ + ixgbe_config_link(adapter); + + /* And now turn on interrupts */ + ixgbe_enable_intr(adapter); + + /* Now inform the stack we're ready */ + ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + + return; +} + +static void +ixgbe_init(void *arg) +{ + struct adapter *adapter = arg; + + IXGBE_CORE_LOCK(adapter); + ixgbe_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); + return; +} + + +/* +** +** MSIX Interrupt Handlers and Tasklets +** +*/ + +static inline void +ixgbe_enable_queue(struct adapter *adapter, u32 vector) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 queue = (u64)(1 << vector); + u32 mask; + + if (hw->mac.type == ixgbe_mac_82598EB) { + mask = (IXGBE_EIMS_RTX_QUEUE & queue); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); + } else { + mask = (queue & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (queue >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + } +} + +static inline void +ixgbe_disable_queue(struct adapter *adapter, u32 vector) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 queue = (u64)(1 << vector); + u32 mask; + + if (hw->mac.type == ixgbe_mac_82598EB) { + mask = (IXGBE_EIMS_RTX_QUEUE & queue); + IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); + } else { + mask = (queue & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); + mask = (queue >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); + } +} + +static inline void +ixgbe_rearm_queues(struct adapter *adapter, u64 queues) +{ + u32 mask; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + mask = (IXGBE_EIMS_RTX_QUEUE & queues); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); + } else { + mask = (queues & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); + mask = (queues >> 32); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); + } +} + + +static void +ixgbe_handle_que(void *context, int pending) +{ + struct ix_queue *que = context; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; + struct ifnet *ifp = adapter->ifp; + bool more; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + more = ixgbe_rxeof(que, adapter->rx_process_limit); + IXGBE_TX_LOCK(txr); + ixgbe_txeof(txr); +#if __FreeBSD_version >= 800000 + if (!drbr_empty(ifp, txr->br)) + ixgbe_mq_start_locked(ifp, txr, NULL); +#else + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + ixgbe_start_locked(txr, ifp); +#endif + IXGBE_TX_UNLOCK(txr); + if (more) { + taskqueue_enqueue(que->tq, &que->que_task); + return; + } + } + + /* Reenable this interrupt */ + ixgbe_enable_queue(adapter, que->msix); + return; +} + + +/********************************************************************* + * + * Legacy Interrupt Service routine + * + **********************************************************************/ + +static void +ixgbe_legacy_irq(void *arg) +{ + struct ix_queue *que = arg; + struct adapter *adapter = que->adapter; + struct ixgbe_hw *hw = &adapter->hw; + struct tx_ring *txr = adapter->tx_rings; + bool more_tx, more_rx; + u32 reg_eicr, loop = MAX_LOOP; + + + reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + + ++que->irqs; + if (reg_eicr == 0) { + ixgbe_enable_intr(adapter); + return; + } + + more_rx = ixgbe_rxeof(que, adapter->rx_process_limit); + + IXGBE_TX_LOCK(txr); + do { + more_tx = ixgbe_txeof(txr); + } while (loop-- && more_tx); + IXGBE_TX_UNLOCK(txr); + + if (more_rx || more_tx) + taskqueue_enqueue(que->tq, &que->que_task); + + /* Check for fan failure */ + if ((hw->phy.media_type == ixgbe_media_type_copper) && + (reg_eicr & IXGBE_EICR_GPI_SDP1)) { + device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " + "REPLACE IMMEDIATELY!!\n"); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1); + } + + /* Link status change */ + if (reg_eicr & IXGBE_EICR_LSC) + taskqueue_enqueue(adapter->tq, &adapter->link_task); + + ixgbe_enable_intr(adapter); + return; +} + + +/********************************************************************* + * + * MSIX Queue Interrupt Service routine + * + **********************************************************************/ +void +ixgbe_msix_que(void *arg) +{ + struct ix_queue *que = arg; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; + struct rx_ring *rxr = que->rxr; + bool more_tx, more_rx; + u32 newitr = 0; + + ++que->irqs; + + more_rx = ixgbe_rxeof(que, adapter->rx_process_limit); + + IXGBE_TX_LOCK(txr); + more_tx = ixgbe_txeof(txr); + /* + ** Make certain that if the stack + ** has anything queued the task gets + ** scheduled to handle it. + */ +#if __FreeBSD_version < 800000 + if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) +#else + if (!drbr_empty(adapter->ifp, txr->br)) +#endif + more_tx = 1; + IXGBE_TX_UNLOCK(txr); + + /* Do AIM now? */ + + if (ixgbe_enable_aim == FALSE) + goto no_calc; + /* + ** Do Adaptive Interrupt Moderation: + ** - Write out last calculated setting + ** - Calculate based on average size over + ** the last interval. + */ + if (que->eitr_setting) + IXGBE_WRITE_REG(&adapter->hw, + IXGBE_EITR(que->msix), que->eitr_setting); + + que->eitr_setting = 0; + + /* Idle, do nothing */ + if ((txr->bytes == 0) && (rxr->bytes == 0)) + goto no_calc; + + if ((txr->bytes) && (txr->packets)) + newitr = txr->bytes/txr->packets; + if ((rxr->bytes) && (rxr->packets)) + newitr = max(newitr, + (rxr->bytes / rxr->packets)); + newitr += 24; /* account for hardware frame, crc */ + + /* set an upper boundary */ + newitr = min(newitr, 3000); + + /* Be nice to the mid range */ + if ((newitr > 300) && (newitr < 1200)) + newitr = (newitr / 3); + else + newitr = (newitr / 2); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + newitr |= newitr << 16; + else + newitr |= IXGBE_EITR_CNT_WDIS; + + /* save for next interrupt */ + que->eitr_setting = newitr; + + /* Reset state */ + txr->bytes = 0; + txr->packets = 0; + rxr->bytes = 0; + rxr->packets = 0; + +no_calc: + if (more_tx || more_rx) + taskqueue_enqueue(que->tq, &que->que_task); + else /* Reenable this interrupt */ + ixgbe_enable_queue(adapter, que->msix); + return; +} + + +static void +ixgbe_msix_link(void *arg) +{ + struct adapter *adapter = arg; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg_eicr; + + ++adapter->link_irq; + + /* First get the cause */ + reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS); + /* Clear interrupt with write */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr); + + /* Link status change */ + if (reg_eicr & IXGBE_EICR_LSC) + taskqueue_enqueue(adapter->tq, &adapter->link_task); + + if (adapter->hw.mac.type != ixgbe_mac_82598EB) { +#ifdef IXGBE_FDIR + if (reg_eicr & IXGBE_EICR_FLOW_DIR) { + /* This is probably overkill :) */ + if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1)) + return; + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR); + /* Turn off the interface */ + adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + taskqueue_enqueue(adapter->tq, &adapter->fdir_task); + } else +#endif + if (reg_eicr & IXGBE_EICR_ECC) { + device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! " + "Please Reboot!!\n"); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } else + + if (reg_eicr & IXGBE_EICR_GPI_SDP1) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + taskqueue_enqueue(adapter->tq, &adapter->msf_task); + } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) { + /* Clear the interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); + taskqueue_enqueue(adapter->tq, &adapter->mod_task); + } + } + + /* Check for fan failure */ + if ((hw->device_id == IXGBE_DEV_ID_82598AT) && + (reg_eicr & IXGBE_EICR_GPI_SDP1)) { + device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! " + "REPLACE IMMEDIATELY!!\n"); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + } + + /* Check for over temp condition */ + if ((hw->mac.type == ixgbe_mac_X540) && + (reg_eicr & IXGBE_EICR_GPI_SDP0)) { + device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! " + "PHY IS SHUT DOWN!!\n"); + device_printf(adapter->dev, "System shutdown required\n"); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0); + } + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); + return; +} + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called whenever the user queries the status of + * the interface using ifconfig. + * + **********************************************************************/ +static void +ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) +{ + struct adapter *adapter = ifp->if_softc; + + INIT_DEBUGOUT("ixgbe_media_status: begin"); + IXGBE_CORE_LOCK(adapter); + ixgbe_update_link_status(adapter); + + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!adapter->link_active) { + IXGBE_CORE_UNLOCK(adapter); + return; + } + + ifmr->ifm_status |= IFM_ACTIVE; + + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: + ifmr->ifm_active |= IFM_100_TX | IFM_FDX; + break; + case IXGBE_LINK_SPEED_1GB_FULL: + ifmr->ifm_active |= IFM_1000_T | IFM_FDX; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= adapter->optics | IFM_FDX; + break; + } + + IXGBE_CORE_UNLOCK(adapter); + + return; +} + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called when the user changes speed/duplex using + * media/mediopt option with ifconfig. + * + **********************************************************************/ +static int +ixgbe_media_change(struct ifnet * ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct ifmedia *ifm = &adapter->media; + + INIT_DEBUGOUT("ixgbe_media_change: begin"); + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + switch (IFM_SUBTYPE(ifm->ifm_media)) { + case IFM_AUTO: + adapter->hw.phy.autoneg_advertised = + IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL | + IXGBE_LINK_SPEED_10GB_FULL; + break; + default: + device_printf(adapter->dev, "Only auto media type\n"); + return (EINVAL); + } + + return (0); +} + +/********************************************************************* + * + * This routine maps the mbufs to tx descriptors, allowing the + * TX engine to transmit the packets. + * - return 0 on success, positive on failure + * + **********************************************************************/ + +static int +ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp) +{ + struct adapter *adapter = txr->adapter; + u32 olinfo_status = 0, cmd_type_len; + u32 paylen = 0; + int i, j, error, nsegs; + int first, last = 0; + struct mbuf *m_head; + bus_dma_segment_t segs[adapter->num_segs]; + bus_dmamap_t map; + struct ixgbe_tx_buf *txbuf; + union ixgbe_adv_tx_desc *txd = NULL; + + m_head = *m_headp; + + /* Basic descriptor defines */ + cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); + + if (m_head->m_flags & M_VLANTAG) + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + + /* + * Important to capture the first descriptor + * used because it will contain the index of + * the one we tell the hardware to report back + */ + first = txr->next_avail_desc; + txbuf = &txr->tx_buffers[first]; + map = txbuf->map; + + /* + * Map the packet for DMA. + */ + error = bus_dmamap_load_mbuf_sg(txr->txtag, map, + *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); + + if (error == EFBIG) { + struct mbuf *m; + + m = m_defrag(*m_headp, M_DONTWAIT); + if (m == NULL) { + adapter->mbuf_defrag_failed++; + m_freem(*m_headp); + *m_headp = NULL; + return (ENOBUFS); + } + *m_headp = m; + + /* Try it again */ + error = bus_dmamap_load_mbuf_sg(txr->txtag, map, + *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); + + if (error == ENOMEM) { + adapter->no_tx_dma_setup++; + return (error); + } else if (error != 0) { + adapter->no_tx_dma_setup++; + m_freem(*m_headp); + *m_headp = NULL; + return (error); + } + } else if (error == ENOMEM) { + adapter->no_tx_dma_setup++; + return (error); + } else if (error != 0) { + adapter->no_tx_dma_setup++; + m_freem(*m_headp); + *m_headp = NULL; + return (error); + } + + /* Make certain there are enough descriptors */ + if (nsegs > txr->tx_avail - 2) { + txr->no_desc_avail++; + error = ENOBUFS; + goto xmit_fail; + } + m_head = *m_headp; + + /* + ** Set up the appropriate offload context + ** this becomes the first descriptor of + ** a packet. + */ + if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { + if (ixgbe_tso_setup(txr, m_head, &paylen)) { + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; + olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; + olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; + ++adapter->tso_tx; + } else + return (ENXIO); + } else if (ixgbe_tx_ctx_setup(txr, m_head)) + olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; + +#ifdef IXGBE_IEEE1588 + /* This is changing soon to an mtag detection */ + if (we detect this mbuf has a TSTAMP mtag) + cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP; +#endif + +#ifdef IXGBE_FDIR + /* Do the flow director magic */ + if ((txr->atr_sample) && (!adapter->fdir_reinit)) { + ++txr->atr_count; + if (txr->atr_count >= atr_sample_rate) { + ixgbe_atr(txr, m_head); + txr->atr_count = 0; + } + } +#endif + /* Record payload length */ + if (paylen == 0) + olinfo_status |= m_head->m_pkthdr.len << + IXGBE_ADVTXD_PAYLEN_SHIFT; + + i = txr->next_avail_desc; + for (j = 0; j < nsegs; j++) { + bus_size_t seglen; + bus_addr_t segaddr; + + txbuf = &txr->tx_buffers[i]; + txd = &txr->tx_base[i]; + seglen = segs[j].ds_len; + segaddr = htole64(segs[j].ds_addr); + + txd->read.buffer_addr = segaddr; + txd->read.cmd_type_len = htole32(txr->txd_cmd | + cmd_type_len |seglen); + txd->read.olinfo_status = htole32(olinfo_status); + last = i; /* descriptor that will get completion IRQ */ + + if (++i == adapter->num_tx_desc) + i = 0; + + txbuf->m_head = NULL; + txbuf->eop_index = -1; + } + + txd->read.cmd_type_len |= + htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); + txr->tx_avail -= nsegs; + txr->next_avail_desc = i; + + txbuf->m_head = m_head; + /* Swap the dma map between the first and last descriptor */ + txr->tx_buffers[first].map = txbuf->map; + txbuf->map = map; + bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); + + /* Set the index of the descriptor that will be marked done */ + txbuf = &txr->tx_buffers[first]; + txbuf->eop_index = last; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + /* + * Advance the Transmit Descriptor Tail (Tdt), this tells the + * hardware that this frame is available to transmit. + */ + ++txr->total_packets; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i); + + return (0); + +xmit_fail: + bus_dmamap_unload(txr->txtag, txbuf->map); + return (error); + +} + +static void +ixgbe_set_promisc(struct adapter *adapter) +{ + u_int32_t reg_rctl; + struct ifnet *ifp = adapter->ifp; + + reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); + reg_rctl &= (~IXGBE_FCTRL_UPE); + reg_rctl &= (~IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); + + if (ifp->if_flags & IFF_PROMISC) { + reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); + } else if (ifp->if_flags & IFF_ALLMULTI) { + reg_rctl |= IXGBE_FCTRL_MPE; + reg_rctl &= ~IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl); + } + return; +} + + +/********************************************************************* + * Multicast Update + * + * This routine is called whenever multicast address list is updated. + * + **********************************************************************/ +#define IXGBE_RAR_ENTRIES 16 + +static void +ixgbe_set_multi(struct adapter *adapter) +{ + u32 fctrl; + u8 *mta; + u8 *update_ptr; + struct ifmultiaddr *ifma; + int mcnt = 0; + struct ifnet *ifp = adapter->ifp; + + IOCTL_DEBUGOUT("ixgbe_set_multi: begin"); + + mta = adapter->mta; + bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS * + MAX_NUM_MULTICAST_ADDRESSES); + + fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + if (ifp->if_flags & IFF_PROMISC) + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + else if (ifp->if_flags & IFF_ALLMULTI) { + fctrl |= IXGBE_FCTRL_MPE; + fctrl &= ~IXGBE_FCTRL_UPE; + } else + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + + IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); + +#if __FreeBSD_version < 800000 + IF_ADDR_LOCK(ifp); +#else + if_maddr_rlock(ifp); +#endif + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), + &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], + IXGBE_ETH_LENGTH_OF_ADDRESS); + mcnt++; + } +#if __FreeBSD_version < 800000 + IF_ADDR_UNLOCK(ifp); +#else + if_maddr_runlock(ifp); +#endif + + update_ptr = mta; + ixgbe_update_mc_addr_list(&adapter->hw, + update_ptr, mcnt, ixgbe_mc_array_itr, TRUE); + + return; +} + +/* + * This is an iterator function now needed by the multicast + * shared code. It simply feeds the shared code routine the + * addresses in the array of ixgbe_set_multi() one by one. + */ +static u8 * +ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) +{ + u8 *addr = *update_ptr; + u8 *newptr; + *vmdq = 0; + + newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; + *update_ptr = newptr; + return addr; +} + + +/********************************************************************* + * Timer routine + * + * This routine checks for link status,updates statistics, + * and runs the watchdog check. + * + **********************************************************************/ + +static void +ixgbe_local_timer(void *arg) +{ + struct adapter *adapter = arg; + device_t dev = adapter->dev; + struct tx_ring *txr = adapter->tx_rings; + + mtx_assert(&adapter->core_mtx, MA_OWNED); + + /* Check for pluggable optics */ + if (adapter->sfp_probe) + if (!ixgbe_sfp_probe(adapter)) + goto out; /* Nothing to do */ + + ixgbe_update_link_status(adapter); + ixgbe_update_stats_counters(adapter); + + /* + * If the interface has been paused + * then don't do the watchdog check + */ + if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) + goto out; + + /* + ** Check status on the TX queues for a hang + */ + for (int i = 0; i < adapter->num_queues; i++, txr++) + if (txr->queue_status == IXGBE_QUEUE_HUNG) + goto hung; + +out: + ixgbe_rearm_queues(adapter, adapter->que_mask); + callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter); + return; + +hung: + device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); + device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, + IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)), + IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me))); + device_printf(dev,"TX(%d) desc avail = %d," + "Next TX to Clean = %d\n", + txr->me, txr->tx_avail, txr->next_to_clean); + adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + adapter->watchdog_events++; + ixgbe_init_locked(adapter); +} + +/* +** Note: this routine updates the OS on the link state +** the real check of the hardware only happens with +** a link interrupt. +*/ +static void +ixgbe_update_link_status(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + struct tx_ring *txr = adapter->tx_rings; + device_t dev = adapter->dev; + + + if (adapter->link_up){ + if (adapter->link_active == FALSE) { + if (bootverbose) + device_printf(dev,"Link is up %d Gbps %s \n", + ((adapter->link_speed == 128)? 10:1), + "Full Duplex"); + adapter->link_active = TRUE; + if_link_state_change(ifp, LINK_STATE_UP); + } + } else { /* Link down */ + if (adapter->link_active == TRUE) { + if (bootverbose) + device_printf(dev,"Link is Down\n"); + if_link_state_change(ifp, LINK_STATE_DOWN); + adapter->link_active = FALSE; + for (int i = 0; i < adapter->num_queues; + i++, txr++) + txr->queue_status = IXGBE_QUEUE_IDLE; + } + } + + return; +} + + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC and deallocates TX/RX buffers. + * + **********************************************************************/ + +static void +ixgbe_stop(void *arg) +{ + struct ifnet *ifp; + struct adapter *adapter = arg; + struct ixgbe_hw *hw = &adapter->hw; + ifp = adapter->ifp; + + mtx_assert(&adapter->core_mtx, MA_OWNED); + + INIT_DEBUGOUT("ixgbe_stop: begin\n"); + ixgbe_disable_intr(adapter); + + /* Tell the stack that the interface is no longer active */ + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + + ixgbe_reset_hw(hw); + hw->adapter_stopped = FALSE; + ixgbe_stop_adapter(hw); + /* Turn off the laser */ + if (hw->phy.multispeed_fiber) + ixgbe_disable_tx_laser(hw); + callout_stop(&adapter->timer); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); + + return; +} + + +/********************************************************************* + * + * Determine hardware revision. + * + **********************************************************************/ +static void +ixgbe_identify_hardware(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ixgbe_hw *hw = &adapter->hw; + + /* Save off the information about this board */ + hw->vendor_id = pci_get_vendor(dev); + hw->device_id = pci_get_device(dev); + hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); + hw->subsystem_vendor_id = + pci_read_config(dev, PCIR_SUBVEND_0, 2); + hw->subsystem_device_id = + pci_read_config(dev, PCIR_SUBDEV_0, 2); + + /* We need this here to set the num_segs below */ + ixgbe_set_mac_type(hw); + + /* Pick up the 82599 and VF settings */ + if (hw->mac.type != ixgbe_mac_82598EB) { + hw->phy.smart_speed = ixgbe_smart_speed; + adapter->num_segs = IXGBE_82599_SCATTER; + } else + adapter->num_segs = IXGBE_82598_SCATTER; + + return; +} + +/********************************************************************* + * + * Determine optic type + * + **********************************************************************/ +static void +ixgbe_setup_optics(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + int layer; + + layer = ixgbe_get_supported_physical_layer(hw); + + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) { + adapter->optics = IFM_10G_T; + return; + } + + if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) { + adapter->optics = IFM_1000_T; + return; + } + + if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR | + IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) { + adapter->optics = IFM_10G_LR; + return; + } + + if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) { + adapter->optics = IFM_10G_SR; + return; + } + + if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) { + adapter->optics = IFM_10G_TWINAX; + return; + } + + if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | + IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) { + adapter->optics = IFM_10G_CX4; + return; + } + + /* If we get here just set the default */ + adapter->optics = IFM_ETHER | IFM_AUTO; + return; +} + +/********************************************************************* + * + * Setup the Legacy or MSI Interrupt handler + * + **********************************************************************/ +static int +ixgbe_allocate_legacy(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ix_queue *que = adapter->queues; + int error, rid = 0; + + /* MSI RID at 1 */ + if (adapter->msix == 1) + rid = 1; + + /* We allocate a single interrupt resource */ + adapter->res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); + if (adapter->res == NULL) { + device_printf(dev, "Unable to allocate bus resource: " + "interrupt\n"); + return (ENXIO); + } + + /* + * Try allocating a fast interrupt and the associated deferred + * processing contexts. + */ + TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); + que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, + taskqueue_thread_enqueue, &que->tq); + taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq", + device_get_nameunit(adapter->dev)); + + /* Tasklets for Link, SFP and Multispeed Fiber */ + TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); + TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); + TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); +#ifdef IXGBE_FDIR + TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); +#endif + adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, + taskqueue_thread_enqueue, &adapter->tq); + taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", + device_get_nameunit(adapter->dev)); + + if ((error = bus_setup_intr(dev, adapter->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq, + que, &adapter->tag)) != 0) { + device_printf(dev, "Failed to register fast interrupt " + "handler: %d\n", error); + taskqueue_free(que->tq); + taskqueue_free(adapter->tq); + que->tq = NULL; + adapter->tq = NULL; + return (error); + } + /* For simplicity in the handlers */ + adapter->que_mask = IXGBE_EIMS_ENABLE_MASK; + + return (0); +} + + +/********************************************************************* + * + * Setup MSIX Interrupt resources and handlers + * + **********************************************************************/ +static int +ixgbe_allocate_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ix_queue *que = adapter->queues; + int error, rid, vector = 0; + + for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { + rid = vector + 1; + que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); + if (que->res == NULL) { + device_printf(dev,"Unable to allocate" + " bus resource: que interrupt [%d]\n", vector); + return (ENXIO); + } + /* Set the handler function */ + error = bus_setup_intr(dev, que->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + ixgbe_msix_que, que, &que->tag); + if (error) { + que->res = NULL; + device_printf(dev, "Failed to register QUE handler"); + return (error); + } +#if __FreeBSD_version >= 800504 + bus_describe_intr(dev, que->res, que->tag, "que %d", i); +#endif + que->msix = vector; + adapter->que_mask |= (u64)(1 << que->msix); + /* + ** Bind the msix vector, and thus the + ** ring to the corresponding cpu. + */ + if (adapter->num_queues > 1) + bus_bind_intr(dev, que->res, i); + + TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que); + que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT, + taskqueue_thread_enqueue, &que->tq); + taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", + device_get_nameunit(adapter->dev)); + } + + /* and Link */ + rid = vector + 1; + adapter->res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); + if (!adapter->res) { + device_printf(dev,"Unable to allocate" + " bus resource: Link interrupt [%d]\n", rid); + return (ENXIO); + } + /* Set the link handler function */ + error = bus_setup_intr(dev, adapter->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + ixgbe_msix_link, adapter, &adapter->tag); + if (error) { + adapter->res = NULL; + device_printf(dev, "Failed to register LINK handler"); + return (error); + } +#if __FreeBSD_version >= 800504 + bus_describe_intr(dev, adapter->res, adapter->tag, "link"); +#endif + adapter->linkvec = vector; + /* Tasklets for Link, SFP and Multispeed Fiber */ + TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter); + TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter); + TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter); +#ifdef IXGBE_FDIR + TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter); +#endif + adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT, + taskqueue_thread_enqueue, &adapter->tq); + taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq", + device_get_nameunit(adapter->dev)); + + return (0); +} + +/* + * Setup Either MSI/X or MSI + */ +static int +ixgbe_setup_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + int rid, want, queues, msgs; + + /* Override by tuneable */ + if (ixgbe_enable_msix == 0) + goto msi; + + /* First try MSI/X */ + rid = PCIR_BAR(MSIX_82598_BAR); + adapter->msix_mem = bus_alloc_resource_any(dev, + SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (!adapter->msix_mem) { + rid += 4; /* 82599 maps in higher BAR */ + adapter->msix_mem = bus_alloc_resource_any(dev, + SYS_RES_MEMORY, &rid, RF_ACTIVE); + } + if (!adapter->msix_mem) { + /* May not be enabled */ + device_printf(adapter->dev, + "Unable to map MSIX table \n"); + goto msi; + } + + msgs = pci_msix_count(dev); + if (msgs == 0) { /* system has msix disabled */ + bus_release_resource(dev, SYS_RES_MEMORY, + rid, adapter->msix_mem); + adapter->msix_mem = NULL; + goto msi; + } + + /* Figure out a reasonable auto config value */ + queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; + + if (ixgbe_num_queues != 0) + queues = ixgbe_num_queues; + /* Set max queues to 8 when autoconfiguring */ + else if ((ixgbe_num_queues == 0) && (queues > 8)) + queues = 8; + + /* + ** Want one vector (RX/TX pair) per queue + ** plus an additional for Link. + */ + want = queues + 1; + if (msgs >= want) + msgs = want; + else { + device_printf(adapter->dev, + "MSIX Configuration Problem, " + "%d vectors but %d queues wanted!\n", + msgs, want); + return (0); /* Will go to Legacy setup */ + } + if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) { + device_printf(adapter->dev, + "Using MSIX interrupts with %d vectors\n", msgs); + adapter->num_queues = queues; + return (msgs); + } +msi: + msgs = pci_msi_count(dev); + if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0) + device_printf(adapter->dev,"Using an MSI interrupt\n"); + else + device_printf(adapter->dev,"Using a Legacy interrupt\n"); + return (msgs); +} + + +static int +ixgbe_allocate_pci_resources(struct adapter *adapter) +{ + int rid; + device_t dev = adapter->dev; + + rid = PCIR_BAR(0); + adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); + + if (!(adapter->pci_mem)) { + device_printf(dev,"Unable to allocate bus resource: memory\n"); + return (ENXIO); + } + + adapter->osdep.mem_bus_space_tag = + rman_get_bustag(adapter->pci_mem); + adapter->osdep.mem_bus_space_handle = + rman_get_bushandle(adapter->pci_mem); + adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; + + /* Legacy defaults */ + adapter->num_queues = 1; + adapter->hw.back = &adapter->osdep; + + /* + ** Now setup MSI or MSI/X, should + ** return us the number of supported + ** vectors. (Will be 1 for MSI) + */ + adapter->msix = ixgbe_setup_msix(adapter); + return (0); +} + +static void +ixgbe_free_pci_resources(struct adapter * adapter) +{ + struct ix_queue *que = adapter->queues; + device_t dev = adapter->dev; + int rid, memrid; + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) + memrid = PCIR_BAR(MSIX_82598_BAR); + else + memrid = PCIR_BAR(MSIX_82599_BAR); + + /* + ** There is a slight possibility of a failure mode + ** in attach that will result in entering this function + ** before interrupt resources have been initialized, and + ** in that case we do not want to execute the loops below + ** We can detect this reliably by the state of the adapter + ** res pointer. + */ + if (adapter->res == NULL) + goto mem; + + /* + ** Release all msix queue resources: + */ + for (int i = 0; i < adapter->num_queues; i++, que++) { + rid = que->msix + 1; + if (que->tag != NULL) { + bus_teardown_intr(dev, que->res, que->tag); + que->tag = NULL; + } + if (que->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); + } + + + /* Clean the Legacy or Link interrupt last */ + if (adapter->linkvec) /* we are doing MSIX */ + rid = adapter->linkvec + 1; + else + (adapter->msix != 0) ? (rid = 1):(rid = 0); + + if (adapter->tag != NULL) { + bus_teardown_intr(dev, adapter->res, adapter->tag); + adapter->tag = NULL; + } + if (adapter->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); + +mem: + if (adapter->msix) + pci_release_msi(dev); + + if (adapter->msix_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + memrid, adapter->msix_mem); + + if (adapter->pci_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(0), adapter->pci_mem); + + return; +} + +/********************************************************************* + * + * Setup networking device structure and register an interface. + * + **********************************************************************/ +static int +ixgbe_setup_interface(device_t dev, struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ifnet *ifp; + + INIT_DEBUGOUT("ixgbe_setup_interface: begin"); + + ifp = adapter->ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) { + device_printf(dev, "can not allocate ifnet structure\n"); + return (-1); + } + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_mtu = ETHERMTU; + ifp->if_baudrate = 1000000000; + ifp->if_init = ixgbe_init; + ifp->if_softc = adapter; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_ioctl = ixgbe_ioctl; + ifp->if_start = ixgbe_start; +#if __FreeBSD_version >= 800000 + ifp->if_transmit = ixgbe_mq_start; + ifp->if_qflush = ixgbe_qflush; +#endif + ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; + + ether_ifattach(ifp, adapter->hw.mac.addr); + + adapter->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + /* + * Tell the upper layer(s) we support long frames. + */ + ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + + ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; + ifp->if_capabilities |= IFCAP_JUMBO_MTU; + ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING + | IFCAP_VLAN_HWTSO + | IFCAP_VLAN_MTU; + ifp->if_capenable = ifp->if_capabilities; + + /* Don't enable LRO by default */ + ifp->if_capabilities |= IFCAP_LRO; + + /* + ** Don't turn this on by default, if vlans are + ** created on another pseudo device (eg. lagg) + ** then vlan events are not passed thru, breaking + ** operation, but with HW FILTER off it works. If + ** using vlans directly on the ixgbe driver you can + ** enable this and get full hardware tag filtering. + */ + ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; + + /* + * Specify the media types supported by this adapter and register + * callbacks to update media and link information + */ + ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change, + ixgbe_media_status); + ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL); + ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics); + if (hw->device_id == IXGBE_DEV_ID_82598AT) { + ifmedia_add(&adapter->media, + IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); + ifmedia_add(&adapter->media, + IFM_ETHER | IFM_1000_T, 0, NULL); + } + ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); + + return (0); +} + +static void +ixgbe_config_link(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 autoneg, err = 0; + bool sfp, negotiate; + + sfp = ixgbe_is_sfp(hw); + + if (sfp) { + if (hw->phy.multispeed_fiber) { + hw->mac.ops.setup_sfp(hw); + ixgbe_enable_tx_laser(hw); + taskqueue_enqueue(adapter->tq, &adapter->msf_task); + } else + taskqueue_enqueue(adapter->tq, &adapter->mod_task); + } else { + if (hw->mac.ops.check_link) + err = ixgbe_check_link(hw, &autoneg, + &adapter->link_up, FALSE); + if (err) + goto out; + autoneg = hw->phy.autoneg_advertised; + if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) + err = hw->mac.ops.get_link_capabilities(hw, + &autoneg, &negotiate); + if (err) + goto out; + if (hw->mac.ops.setup_link) + err = hw->mac.ops.setup_link(hw, autoneg, + negotiate, adapter->link_up); + } +out: + return; +} + +/******************************************************************** + * Manage DMA'able memory. + *******************************************************************/ +static void +ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) +{ + if (error) + return; + *(bus_addr_t *) arg = segs->ds_addr; + return; +} + +static int +ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size, + struct ixgbe_dma_alloc *dma, int mapflags) +{ + device_t dev = adapter->dev; + int r; + + r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ + DBA_ALIGN, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + size, /* maxsize */ + 1, /* nsegments */ + size, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &dma->dma_tag); + if (r != 0) { + device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; " + "error %u\n", r); + goto fail_0; + } + r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, + BUS_DMA_NOWAIT, &dma->dma_map); + if (r != 0) { + device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; " + "error %u\n", r); + goto fail_1; + } + r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, + size, + ixgbe_dmamap_cb, + &dma->dma_paddr, + mapflags | BUS_DMA_NOWAIT); + if (r != 0) { + device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; " + "error %u\n", r); + goto fail_2; + } + dma->dma_size = size; + return (0); +fail_2: + bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); +fail_1: + bus_dma_tag_destroy(dma->dma_tag); +fail_0: + dma->dma_map = NULL; + dma->dma_tag = NULL; + return (r); +} + +static void +ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma) +{ + bus_dmamap_sync(dma->dma_tag, dma->dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(dma->dma_tag, dma->dma_map); + bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); + bus_dma_tag_destroy(dma->dma_tag); +} + + +/********************************************************************* + * + * Allocate memory for the transmit and receive rings, and then + * the descriptors associated with each, called only once at attach. + * + **********************************************************************/ +static int +ixgbe_allocate_queues(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ix_queue *que; + struct tx_ring *txr; + struct rx_ring *rxr; + int rsize, tsize, error = IXGBE_SUCCESS; + int txconf = 0, rxconf = 0; + + /* First allocate the top level queue structs */ + if (!(adapter->queues = + (struct ix_queue *) malloc(sizeof(struct ix_queue) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate queue memory\n"); + error = ENOMEM; + goto fail; + } + + /* First allocate the TX ring struct memory */ + if (!(adapter->tx_rings = + (struct tx_ring *) malloc(sizeof(struct tx_ring) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate TX ring memory\n"); + error = ENOMEM; + goto tx_fail; + } + + /* Next allocate the RX */ + if (!(adapter->rx_rings = + (struct rx_ring *) malloc(sizeof(struct rx_ring) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate RX ring memory\n"); + error = ENOMEM; + goto rx_fail; + } + + /* For the ring itself */ + tsize = roundup2(adapter->num_tx_desc * + sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN); + + /* + * Now set up the TX queues, txconf is needed to handle the + * possibility that things fail midcourse and we need to + * undo memory gracefully + */ + for (int i = 0; i < adapter->num_queues; i++, txconf++) { + /* Set up some basics */ + txr = &adapter->tx_rings[i]; + txr->adapter = adapter; + txr->me = i; + + /* Initialize the TX side lock */ + snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", + device_get_nameunit(dev), txr->me); + mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); + + if (ixgbe_dma_malloc(adapter, tsize, + &txr->txdma, BUS_DMA_NOWAIT)) { + device_printf(dev, + "Unable to allocate TX Descriptor memory\n"); + error = ENOMEM; + goto err_tx_desc; + } + txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; + bzero((void *)txr->tx_base, tsize); + + /* Now allocate transmit buffers for the ring */ + if (ixgbe_allocate_transmit_buffers(txr)) { + device_printf(dev, + "Critical Failure setting up transmit buffers\n"); + error = ENOMEM; + goto err_tx_desc; + } +#if __FreeBSD_version >= 800000 + /* Allocate a buf ring */ + txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF, + M_WAITOK, &txr->tx_mtx); + if (txr->br == NULL) { + device_printf(dev, + "Critical Failure setting up buf ring\n"); + error = ENOMEM; + goto err_tx_desc; + } +#endif + } + + /* + * Next the RX queues... + */ + rsize = roundup2(adapter->num_rx_desc * + sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); + for (int i = 0; i < adapter->num_queues; i++, rxconf++) { + rxr = &adapter->rx_rings[i]; + /* Set up some basics */ + rxr->adapter = adapter; + rxr->me = i; + + /* Initialize the RX side lock */ + snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", + device_get_nameunit(dev), rxr->me); + mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); + + if (ixgbe_dma_malloc(adapter, rsize, + &rxr->rxdma, BUS_DMA_NOWAIT)) { + device_printf(dev, + "Unable to allocate RxDescriptor memory\n"); + error = ENOMEM; + goto err_rx_desc; + } + rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; + bzero((void *)rxr->rx_base, rsize); + + /* Allocate receive buffers for the ring*/ + if (ixgbe_allocate_receive_buffers(rxr)) { + device_printf(dev, + "Critical Failure setting up receive buffers\n"); + error = ENOMEM; + goto err_rx_desc; + } + } + + /* + ** Finally set up the queue holding structs + */ + for (int i = 0; i < adapter->num_queues; i++) { + que = &adapter->queues[i]; + que->adapter = adapter; + que->txr = &adapter->tx_rings[i]; + que->rxr = &adapter->rx_rings[i]; + } + + return (0); + +err_rx_desc: + for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) + ixgbe_dma_free(adapter, &rxr->rxdma); +err_tx_desc: + for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) + ixgbe_dma_free(adapter, &txr->txdma); + free(adapter->rx_rings, M_DEVBUF); +rx_fail: + free(adapter->tx_rings, M_DEVBUF); +tx_fail: + free(adapter->queues, M_DEVBUF); +fail: + return (error); +} + +/********************************************************************* + * + * Allocate memory for tx_buffer structures. The tx_buffer stores all + * the information needed to transmit a packet on the wire. This is + * called only once at attach, setup is done every reset. + * + **********************************************************************/ +static int +ixgbe_allocate_transmit_buffers(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + device_t dev = adapter->dev; + struct ixgbe_tx_buf *txbuf; + int error, i; + + /* + * Setup DMA descriptor areas. + */ + if ((error = bus_dma_tag_create(NULL, /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + IXGBE_TSO_SIZE, /* maxsize */ + adapter->num_segs, /* nsegments */ + PAGE_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &txr->txtag))) { + device_printf(dev,"Unable to allocate TX DMA tag\n"); + goto fail; + } + + if (!(txr->tx_buffers = + (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) * + adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate tx_buffer memory\n"); + error = ENOMEM; + goto fail; + } + + /* Create the descriptor buffer dma maps */ + txbuf = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { + error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); + if (error != 0) { + device_printf(dev, "Unable to create TX DMA map\n"); + goto fail; + } + } + + return 0; +fail: + /* We free all, it handles case where we are in the middle */ + ixgbe_free_transmit_structures(adapter); + return (error); +} + +/********************************************************************* + * + * Initialize a transmit ring. + * + **********************************************************************/ +static void +ixgbe_setup_transmit_ring(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_tx_buf *txbuf; + int i; + + /* Clear the old ring contents */ + IXGBE_TX_LOCK(txr); + bzero((void *)txr->tx_base, + (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); + /* Reset indices */ + txr->next_avail_desc = 0; + txr->next_to_clean = 0; + + /* Free any existing tx buffers. */ + txbuf = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { + if (txbuf->m_head != NULL) { + bus_dmamap_sync(txr->txtag, txbuf->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, txbuf->map); + m_freem(txbuf->m_head); + txbuf->m_head = NULL; + } + /* Clear the EOP index */ + txbuf->eop_index = -1; + } + +#ifdef IXGBE_FDIR + /* Set the rate at which we sample packets */ + if (adapter->hw.mac.type != ixgbe_mac_82598EB) + txr->atr_sample = atr_sample_rate; +#endif + + /* Set number of descriptors available */ + txr->tx_avail = adapter->num_tx_desc; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + IXGBE_TX_UNLOCK(txr); +} + +/********************************************************************* + * + * Initialize all transmit rings. + * + **********************************************************************/ +static int +ixgbe_setup_transmit_structures(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + + for (int i = 0; i < adapter->num_queues; i++, txr++) + ixgbe_setup_transmit_ring(txr); + + return (0); +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +static void +ixgbe_initialize_transmit_units(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + struct ixgbe_hw *hw = &adapter->hw; + + /* Setup the Base and Length of the Tx Descriptor Ring */ + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + u64 tdba = txr->txdma.dma_paddr; + u32 txctrl; + + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), + (tdba & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), + adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc)); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); + + /* Setup Transmit Descriptor Cmd Settings */ + txr->txd_cmd = IXGBE_TXD_CMD_IFCS; + txr->queue_status = IXGBE_QUEUE_IDLE; + + /* Disable Head Writeback */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + default: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + break; + } + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + default: + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl); + break; + } + + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + u32 dmatxctl, rttdcs; + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); + /* Disable arbiter to set MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + } + + return; +} + +/********************************************************************* + * + * Free all transmit rings. + * + **********************************************************************/ +static void +ixgbe_free_transmit_structures(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IXGBE_TX_LOCK(txr); + ixgbe_free_transmit_buffers(txr); + ixgbe_dma_free(adapter, &txr->txdma); + IXGBE_TX_UNLOCK(txr); + IXGBE_TX_LOCK_DESTROY(txr); + } + free(adapter->tx_rings, M_DEVBUF); +} + +/********************************************************************* + * + * Free transmit ring related data structures. + * + **********************************************************************/ +static void +ixgbe_free_transmit_buffers(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_tx_buf *tx_buffer; + int i; + + INIT_DEBUGOUT("free_transmit_ring: begin"); + + if (txr->tx_buffers == NULL) + return; + + tx_buffer = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { + if (tx_buffer->m_head != NULL) { + bus_dmamap_sync(txr->txtag, tx_buffer->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + m_freem(tx_buffer->m_head); + tx_buffer->m_head = NULL; + if (tx_buffer->map != NULL) { + bus_dmamap_destroy(txr->txtag, + tx_buffer->map); + tx_buffer->map = NULL; + } + } else if (tx_buffer->map != NULL) { + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + bus_dmamap_destroy(txr->txtag, + tx_buffer->map); + tx_buffer->map = NULL; + } + } +#if __FreeBSD_version >= 800000 + if (txr->br != NULL) + buf_ring_free(txr->br, M_DEVBUF); +#endif + if (txr->tx_buffers != NULL) { + free(txr->tx_buffers, M_DEVBUF); + txr->tx_buffers = NULL; + } + if (txr->txtag != NULL) { + bus_dma_tag_destroy(txr->txtag); + txr->txtag = NULL; + } + return; +} + +/********************************************************************* + * + * Advanced Context Descriptor setup for VLAN or CSUM + * + **********************************************************************/ + +static boolean_t +ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_adv_tx_context_desc *TXD; + struct ixgbe_tx_buf *tx_buffer; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + struct ether_vlan_header *eh; + struct ip *ip; + struct ip6_hdr *ip6; + int ehdrlen, ip_hlen = 0; + u16 etype; + u8 ipproto = 0; + bool offload = TRUE; + int ctxd = txr->next_avail_desc; + u16 vtag = 0; + + + if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) + offload = FALSE; + + tx_buffer = &txr->tx_buffers[ctxd]; + TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; + + /* + ** In advanced descriptors the vlan tag must + ** be placed into the descriptor itself. + */ + if (mp->m_flags & M_VLANTAG) { + vtag = htole16(mp->m_pkthdr.ether_vtag); + vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); + } else if (offload == FALSE) + return FALSE; + + /* + * Determine where frame payload starts. + * Jump over vlan headers if already present, + * helpful for QinQ too. + */ + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + etype = ntohs(eh->evl_proto); + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + } else { + etype = ntohs(eh->evl_encap_proto); + ehdrlen = ETHER_HDR_LEN; + } + + /* Set the ether header length */ + vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; + + switch (etype) { + case ETHERTYPE_IP: + ip = (struct ip *)(mp->m_data + ehdrlen); + ip_hlen = ip->ip_hl << 2; + ipproto = ip->ip_p; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + break; + case ETHERTYPE_IPV6: + ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); + ip_hlen = sizeof(struct ip6_hdr); + ipproto = ip6->ip6_nxt; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; + break; + default: + offload = FALSE; + break; + } + + vlan_macip_lens |= ip_hlen; + type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + switch (ipproto) { + case IPPROTO_TCP: + if (mp->m_pkthdr.csum_flags & CSUM_TCP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + break; + + case IPPROTO_UDP: + if (mp->m_pkthdr.csum_flags & CSUM_UDP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; + break; + +#if __FreeBSD_version >= 800000 + case IPPROTO_SCTP: + if (mp->m_pkthdr.csum_flags & CSUM_SCTP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + break; +#endif + default: + offload = FALSE; + break; + } + + /* Now copy bits into descriptor */ + TXD->vlan_macip_lens |= htole32(vlan_macip_lens); + TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); + TXD->seqnum_seed = htole32(0); + TXD->mss_l4len_idx = htole32(0); + + tx_buffer->m_head = NULL; + tx_buffer->eop_index = -1; + + /* We've consumed the first desc, adjust counters */ + if (++ctxd == adapter->num_tx_desc) + ctxd = 0; + txr->next_avail_desc = ctxd; + --txr->tx_avail; + + return (offload); +} + +/********************************************************************** + * + * Setup work for hardware segmentation offload (TSO) on + * adapters using advanced tx descriptors + * + **********************************************************************/ +static boolean_t +ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_adv_tx_context_desc *TXD; + struct ixgbe_tx_buf *tx_buffer; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + u32 mss_l4len_idx = 0; + u16 vtag = 0; + int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen; + struct ether_vlan_header *eh; + struct ip *ip; + struct tcphdr *th; + + + /* + * Determine where frame payload starts. + * Jump over vlan headers if already present + */ + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + else + ehdrlen = ETHER_HDR_LEN; + + /* Ensure we have at least the IP+TCP header in the first mbuf. */ + if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr)) + return FALSE; + + ctxd = txr->next_avail_desc; + tx_buffer = &txr->tx_buffers[ctxd]; + TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; + + ip = (struct ip *)(mp->m_data + ehdrlen); + if (ip->ip_p != IPPROTO_TCP) + return FALSE; /* 0 */ + ip->ip_sum = 0; + ip_hlen = ip->ip_hl << 2; + th = (struct tcphdr *)((caddr_t)ip + ip_hlen); + th->th_sum = in_pseudo(ip->ip_src.s_addr, + ip->ip_dst.s_addr, htons(IPPROTO_TCP)); + tcp_hlen = th->th_off << 2; + hdrlen = ehdrlen + ip_hlen + tcp_hlen; + + /* This is used in the transmit desc in encap */ + *paylen = mp->m_pkthdr.len - hdrlen; + + /* VLAN MACLEN IPLEN */ + if (mp->m_flags & M_VLANTAG) { + vtag = htole16(mp->m_pkthdr.ether_vtag); + vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); + } + + vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= ip_hlen; + TXD->vlan_macip_lens |= htole32(vlan_macip_lens); + + /* ADV DTYPE TUCMD */ + type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); + + + /* MSS L4LEN IDX */ + mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); + TXD->mss_l4len_idx = htole32(mss_l4len_idx); + + TXD->seqnum_seed = htole32(0); + tx_buffer->m_head = NULL; + tx_buffer->eop_index = -1; + + if (++ctxd == adapter->num_tx_desc) + ctxd = 0; + + txr->tx_avail--; + txr->next_avail_desc = ctxd; + return TRUE; +} + +#ifdef IXGBE_FDIR +/* +** This routine parses packet headers so that Flow +** Director can make a hashed filter table entry +** allowing traffic flows to be identified and kept +** on the same cpu. This would be a performance +** hit, but we only do it at IXGBE_FDIR_RATE of +** packets. +*/ +static void +ixgbe_atr(struct tx_ring *txr, struct mbuf *mp) +{ + struct adapter *adapter = txr->adapter; + struct ix_queue *que; + struct ip *ip; + struct tcphdr *th; + struct udphdr *uh; + struct ether_vlan_header *eh; + union ixgbe_atr_hash_dword input = {.dword = 0}; + union ixgbe_atr_hash_dword common = {.dword = 0}; + int ehdrlen, ip_hlen; + u16 etype; + + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + etype = eh->evl_proto; + } else { + ehdrlen = ETHER_HDR_LEN; + etype = eh->evl_encap_proto; + } + + /* Only handling IPv4 */ + if (etype != htons(ETHERTYPE_IP)) + return; + + ip = (struct ip *)(mp->m_data + ehdrlen); + ip_hlen = ip->ip_hl << 2; + + /* check if we're UDP or TCP */ + switch (ip->ip_p) { + case IPPROTO_TCP: + th = (struct tcphdr *)((caddr_t)ip + ip_hlen); + /* src and dst are inverted */ + common.port.dst ^= th->th_sport; + common.port.src ^= th->th_dport; + input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + uh = (struct udphdr *)((caddr_t)ip + ip_hlen); + /* src and dst are inverted */ + common.port.dst ^= uh->uh_sport; + common.port.src ^= uh->uh_dport; + input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + default: + return; + } + + input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag); + if (mp->m_pkthdr.ether_vtag) + common.flex_bytes ^= htons(ETHERTYPE_VLAN); + else + common.flex_bytes ^= etype; + common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr; + + que = &adapter->queues[txr->me]; + /* + ** This assumes the Rx queue and Tx + ** queue are bound to the same CPU + */ + ixgbe_fdir_add_signature_filter_82599(&adapter->hw, + input, common, que->msix); +} +#endif /* IXGBE_FDIR */ + +/********************************************************************** + * + * Examine each tx_buffer in the used queue. If the hardware is done + * processing the packet then free associated resources. The + * tx_buffer is put back on the free queue. + * + **********************************************************************/ +static boolean_t +ixgbe_txeof(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct ifnet *ifp = adapter->ifp; + u32 first, last, done, processed; + struct ixgbe_tx_buf *tx_buffer; + struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc; + + mtx_assert(&txr->tx_mtx, MA_OWNED); + + if (txr->tx_avail == adapter->num_tx_desc) { + txr->queue_status = IXGBE_QUEUE_IDLE; + return FALSE; + } + + processed = 0; + first = txr->next_to_clean; + tx_buffer = &txr->tx_buffers[first]; + /* For cleanup we just use legacy struct */ + tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; + last = tx_buffer->eop_index; + if (last == -1) + return FALSE; + eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; + + /* + ** Get the index of the first descriptor + ** BEYOND the EOP and call that 'done'. + ** I do this so the comparison in the + ** inner while loop below can be simple + */ + if (++last == adapter->num_tx_desc) last = 0; + done = last; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_POSTREAD); + /* + ** Only the EOP descriptor of a packet now has the DD + ** bit set, this is what we look for... + */ + while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) { + /* We clean the range of the packet */ + while (first != done) { + tx_desc->upper.data = 0; + tx_desc->lower.data = 0; + tx_desc->buffer_addr = 0; + ++txr->tx_avail; + ++processed; + + if (tx_buffer->m_head) { + txr->bytes += + tx_buffer->m_head->m_pkthdr.len; + bus_dmamap_sync(txr->txtag, + tx_buffer->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + m_freem(tx_buffer->m_head); + tx_buffer->m_head = NULL; + tx_buffer->map = NULL; + } + tx_buffer->eop_index = -1; + txr->watchdog_time = ticks; + + if (++first == adapter->num_tx_desc) + first = 0; + + tx_buffer = &txr->tx_buffers[first]; + tx_desc = + (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; + } + ++txr->packets; + ++ifp->if_opackets; + /* See if there is more work now */ + last = tx_buffer->eop_index; + if (last != -1) { + eop_desc = + (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; + /* Get next done point */ + if (++last == adapter->num_tx_desc) last = 0; + done = last; + } else + break; + } + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + txr->next_to_clean = first; + + /* + ** Watchdog calculation, we know there's + ** work outstanding or the first return + ** would have been taken, so none processed + ** for too long indicates a hang. + */ + if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG)) + txr->queue_status = IXGBE_QUEUE_HUNG; + + /* + * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that + * it is OK to send packets. If there are no pending descriptors, + * clear the timeout. Otherwise, if some descriptors have been freed, + * restart the timeout. + */ + if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) { + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + if (txr->tx_avail == adapter->num_tx_desc) { + txr->queue_status = IXGBE_QUEUE_IDLE; + return FALSE; + } + } + + return TRUE; +} + +/********************************************************************* + * + * Refresh mbuf buffers for RX descriptor rings + * - now keeps its own state so discards due to resource + * exhaustion are unnecessary, if an mbuf cannot be obtained + * it just returns, keeping its placeholder, thus it can simply + * be recalled to try again. + * + **********************************************************************/ +static void +ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit) +{ + struct adapter *adapter = rxr->adapter; + bus_dma_segment_t hseg[1]; + bus_dma_segment_t pseg[1]; + struct ixgbe_rx_buf *rxbuf; + struct mbuf *mh, *mp; + int i, j, nsegs, error; + bool refreshed = FALSE; + + i = j = rxr->next_to_refresh; + /* Control the loop with one beyond */ + if (++j == adapter->num_rx_desc) + j = 0; + + while (j != limit) { + rxbuf = &rxr->rx_buffers[i]; + if (rxr->hdr_split == FALSE) + goto no_split; + + if (rxbuf->m_head == NULL) { + mh = m_gethdr(M_DONTWAIT, MT_DATA); + if (mh == NULL) + goto update; + } else + mh = rxbuf->m_head; + + mh->m_pkthdr.len = mh->m_len = MHLEN; + mh->m_len = MHLEN; + mh->m_flags |= M_PKTHDR; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->htag, + rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); + if (error != 0) { + printf("Refresh mbufs: hdr dmamap load" + " failure - %d\n", error); + m_free(mh); + rxbuf->m_head = NULL; + goto update; + } + rxbuf->m_head = mh; + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_PREREAD); + rxr->rx_base[i].read.hdr_addr = + htole64(hseg[0].ds_addr); + +no_split: + if (rxbuf->m_pack == NULL) { + mp = m_getjcl(M_DONTWAIT, MT_DATA, + M_PKTHDR, adapter->rx_mbuf_sz); + if (mp == NULL) + goto update; + } else + mp = rxbuf->m_pack; + + mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->ptag, + rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); + if (error != 0) { + printf("Refresh mbufs: payload dmamap load" + " failure - %d\n", error); + m_free(mp); + rxbuf->m_pack = NULL; + goto update; + } + rxbuf->m_pack = mp; + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_PREREAD); + rxr->rx_base[i].read.pkt_addr = + htole64(pseg[0].ds_addr); + + refreshed = TRUE; + /* Next is precalculated */ + i = j; + rxr->next_to_refresh = i; + if (++j == adapter->num_rx_desc) + j = 0; + } +update: + if (refreshed) /* Update hardware tail index */ + IXGBE_WRITE_REG(&adapter->hw, + IXGBE_RDT(rxr->me), rxr->next_to_refresh); + return; +} + +/********************************************************************* + * + * Allocate memory for rx_buffer structures. Since we use one + * rx_buffer per received packet, the maximum number of rx_buffer's + * that we'll need is equal to the number of receive descriptors + * that we've allocated. + * + **********************************************************************/ +static int +ixgbe_allocate_receive_buffers(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + device_t dev = adapter->dev; + struct ixgbe_rx_buf *rxbuf; + int i, bsize, error; + + bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc; + if (!(rxr->rx_buffers = + (struct ixgbe_rx_buf *) malloc(bsize, + M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate rx_buffer memory\n"); + error = ENOMEM; + goto fail; + } + + if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MSIZE, /* maxsize */ + 1, /* nsegments */ + MSIZE, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &rxr->htag))) { + device_printf(dev, "Unable to create RX DMA tag\n"); + goto fail; + } + + if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MJUM16BYTES, /* maxsize */ + 1, /* nsegments */ + MJUM16BYTES, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &rxr->ptag))) { + device_printf(dev, "Unable to create RX DMA tag\n"); + goto fail; + } + + for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) { + rxbuf = &rxr->rx_buffers[i]; + error = bus_dmamap_create(rxr->htag, + BUS_DMA_NOWAIT, &rxbuf->hmap); + if (error) { + device_printf(dev, "Unable to create RX head map\n"); + goto fail; + } + error = bus_dmamap_create(rxr->ptag, + BUS_DMA_NOWAIT, &rxbuf->pmap); + if (error) { + device_printf(dev, "Unable to create RX pkt map\n"); + goto fail; + } + } + + return (0); + +fail: + /* Frees all, but can handle partial completion */ + ixgbe_free_receive_structures(adapter); + return (error); +} + +/* +** Used to detect a descriptor that has +** been merged by Hardware RSC. +*/ +static inline u32 +ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx) +{ + return (le32toh(rx->wb.lower.lo_dword.data) & + IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT; +} + +/********************************************************************* + * + * Initialize Hardware RSC (LRO) feature on 82599 + * for an RX ring, this is toggled by the LRO capability + * even though it is transparent to the stack. + * + **********************************************************************/ +static void +ixgbe_setup_hw_rsc(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl, rdrxctl; + + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + rdrxctl |= IXGBE_RDRXCTL_RSCACKC; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + + rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me)); + rscctrl |= IXGBE_RSCCTL_RSCEN; + + /* + ** Limit the total number of descriptors that + ** can be combined, so it does not exceed 64K + */ + if (adapter->rx_mbuf_sz == MCLBYTES) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; + else if (adapter->rx_mbuf_sz == MJUMPAGESIZE) + rscctrl |= IXGBE_RSCCTL_MAXDESC_8; + else if (adapter->rx_mbuf_sz == MJUM9BYTES) + rscctrl |= IXGBE_RSCCTL_MAXDESC_4; + else /* Using 16K cluster */ + rscctrl |= IXGBE_RSCCTL_MAXDESC_1; + + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl); + + /* Enable TCP header recognition */ + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), + (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) | + IXGBE_PSRTYPE_TCPHDR)); + + /* Disable RSC for ACK packets */ + IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, + (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); + + rxr->hw_rsc = TRUE; +} + + +static void +ixgbe_free_receive_ring(struct rx_ring *rxr) +{ + struct adapter *adapter; + struct ixgbe_rx_buf *rxbuf; + int i; + + adapter = rxr->adapter; + for (i = 0; i < adapter->num_rx_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->m_head != NULL) { + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->htag, rxbuf->hmap); + rxbuf->m_head->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_head); + } + if (rxbuf->m_pack != NULL) { + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + rxbuf->m_pack->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_pack); + } + rxbuf->m_head = NULL; + rxbuf->m_pack = NULL; + } +} + + +/********************************************************************* + * + * Initialize a receive ring and its buffers. + * + **********************************************************************/ +static int +ixgbe_setup_receive_ring(struct rx_ring *rxr) +{ + struct adapter *adapter; + struct ifnet *ifp; + device_t dev; + struct ixgbe_rx_buf *rxbuf; + bus_dma_segment_t pseg[1], hseg[1]; + struct lro_ctrl *lro = &rxr->lro; + int rsize, nsegs, error = 0; + + adapter = rxr->adapter; + ifp = adapter->ifp; + dev = adapter->dev; + + /* Clear the ring contents */ + IXGBE_RX_LOCK(rxr); + rsize = roundup2(adapter->num_rx_desc * + sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); + bzero((void *)rxr->rx_base, rsize); + + /* Free current RX buffer structs and their mbufs */ + ixgbe_free_receive_ring(rxr); + + /* Configure header split? */ + if (ixgbe_header_split) + rxr->hdr_split = TRUE; + + /* Now replenish the mbufs */ + for (int j = 0; j != adapter->num_rx_desc; ++j) { + struct mbuf *mh, *mp; + + rxbuf = &rxr->rx_buffers[j]; + /* + ** Don't allocate mbufs if not + ** doing header split, its wasteful + */ + if (rxr->hdr_split == FALSE) + goto skip_head; + + /* First the header */ + rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA); + if (rxbuf->m_head == NULL) { + error = ENOBUFS; + goto fail; + } + m_adj(rxbuf->m_head, ETHER_ALIGN); + mh = rxbuf->m_head; + mh->m_len = mh->m_pkthdr.len = MHLEN; + mh->m_flags |= M_PKTHDR; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->htag, + rxbuf->hmap, rxbuf->m_head, hseg, + &nsegs, BUS_DMA_NOWAIT); + if (error != 0) /* Nothing elegant to do here */ + goto fail; + bus_dmamap_sync(rxr->htag, + rxbuf->hmap, BUS_DMASYNC_PREREAD); + /* Update descriptor */ + rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); + +skip_head: + /* Now the payload cluster */ + rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, + M_PKTHDR, adapter->rx_mbuf_sz); + if (rxbuf->m_pack == NULL) { + error = ENOBUFS; + goto fail; + } + mp = rxbuf->m_pack; + mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->ptag, + rxbuf->pmap, mp, pseg, + &nsegs, BUS_DMA_NOWAIT); + if (error != 0) + goto fail; + bus_dmamap_sync(rxr->ptag, + rxbuf->pmap, BUS_DMASYNC_PREREAD); + /* Update descriptor */ + rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); + } + + + /* Setup our descriptor indices */ + rxr->next_to_check = 0; + rxr->next_to_refresh = 0; + rxr->lro_enabled = FALSE; + rxr->rx_split_packets = 0; + rxr->rx_bytes = 0; + rxr->discard = FALSE; + + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* + ** Now set up the LRO interface: + ** 82598 uses software LRO, the + ** 82599 and X540 use a hardware assist. + */ + if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && + (ifp->if_capenable & IFCAP_RXCSUM) && + (ifp->if_capenable & IFCAP_LRO)) + ixgbe_setup_hw_rsc(rxr); + else if (ifp->if_capenable & IFCAP_LRO) { + int err = tcp_lro_init(lro); + if (err) { + device_printf(dev, "LRO Initialization failed!\n"); + goto fail; + } + INIT_DEBUGOUT("RX Soft LRO Initialized\n"); + rxr->lro_enabled = TRUE; + lro->ifp = adapter->ifp; + } + + IXGBE_RX_UNLOCK(rxr); + return (0); + +fail: + ixgbe_free_receive_ring(rxr); + IXGBE_RX_UNLOCK(rxr); + return (error); +} + +/********************************************************************* + * + * Initialize all receive rings. + * + **********************************************************************/ +static int +ixgbe_setup_receive_structures(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + int j; + + for (j = 0; j < adapter->num_queues; j++, rxr++) + if (ixgbe_setup_receive_ring(rxr)) + goto fail; + + return (0); +fail: + /* + * Free RX buffers allocated so far, we will only handle + * the rings that completed, the failing case will have + * cleaned up for itself. 'j' failed, so its the terminus. + */ + for (int i = 0; i < j; ++i) { + rxr = &adapter->rx_rings[i]; + ixgbe_free_receive_ring(rxr); + } + + return (ENOBUFS); +} + +/********************************************************************* + * + * Setup receive registers and features. + * + **********************************************************************/ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void +ixgbe_initialize_receive_units(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + struct ixgbe_hw *hw = &adapter->hw; + struct ifnet *ifp = adapter->ifp; + u32 bufsz, rxctrl, fctrl, srrctl, rxcsum; + u32 reta, mrqc = 0, hlreg, random[10]; + + + /* + * Make sure receives are disabled while + * setting up the descriptor ring + */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, + rxctrl & ~IXGBE_RXCTRL_RXEN); + + /* Enable broadcasts */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; + fctrl |= IXGBE_FCTRL_PMCF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + /* Set for Jumbo Frames? */ + hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (ifp->if_mtu > ETHERMTU) + hlreg |= IXGBE_HLREG0_JUMBOEN; + else + hlreg &= ~IXGBE_HLREG0_JUMBOEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); + + bufsz = adapter->rx_mbuf_sz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + u64 rdba = rxr->rxdma.dma_paddr; + + /* Setup the Base and Length of the Rx Descriptor Ring */ + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), + (rdba & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), + adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + + /* Set up the SRRCTL register */ + srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); + srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + srrctl |= bufsz; + if (rxr->hdr_split) { + /* Use a standard mbuf for the header */ + srrctl |= ((IXGBE_RX_HDR << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) + & IXGBE_SRRCTL_BSIZEHDR_MASK); + srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl); + + /* Setup the HW Rx Head and Tail Descriptor Pointers */ + IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); + } + + if (adapter->hw.mac.type != ixgbe_mac_82598EB) { + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_IPV6HDR; + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype); + } + + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + + /* Setup RSS */ + if (adapter->num_queues > 1) { + int i, j; + reta = 0; + + /* set up random bits */ + arc4rand(&random, sizeof(random), 0); + + /* Set up the redirection table */ + for (i = 0, j = 0; i < 128; i++, j++) { + if (j == adapter->num_queues) j = 0; + reta = (reta << 8) | (j * 0x11); + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); + } + + /* Now fill our hash function seeds */ + for (int i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]); + + /* Perform hash on these packet types */ + mrqc = IXGBE_MRQC_RSSEN + | IXGBE_MRQC_RSS_FIELD_IPV4 + | IXGBE_MRQC_RSS_FIELD_IPV4_TCP + | IXGBE_MRQC_RSS_FIELD_IPV4_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX + | IXGBE_MRQC_RSS_FIELD_IPV6 + | IXGBE_MRQC_RSS_FIELD_IPV6_TCP + | IXGBE_MRQC_RSS_FIELD_IPV6_UDP + | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* RSS and RX IPP Checksum are mutually exclusive */ + rxcsum |= IXGBE_RXCSUM_PCSD; + } + + if (ifp->if_capenable & IFCAP_RXCSUM) + rxcsum |= IXGBE_RXCSUM_PCSD; + + if (!(rxcsum & IXGBE_RXCSUM_PCSD)) + rxcsum |= IXGBE_RXCSUM_IPPCSE; + + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + return; +} + +/********************************************************************* + * + * Free all receive rings. + * + **********************************************************************/ +static void +ixgbe_free_receive_structures(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + struct lro_ctrl *lro = &rxr->lro; + ixgbe_free_receive_buffers(rxr); + /* Free LRO memory */ + tcp_lro_free(lro); + /* Free the ring memory as well */ + ixgbe_dma_free(adapter, &rxr->rxdma); + } + + free(adapter->rx_rings, M_DEVBUF); +} + + +/********************************************************************* + * + * Free receive ring data structures + * + **********************************************************************/ +static void +ixgbe_free_receive_buffers(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + struct ixgbe_rx_buf *rxbuf; + + INIT_DEBUGOUT("free_receive_structures: begin"); + + /* Cleanup any existing buffers */ + if (rxr->rx_buffers != NULL) { + for (int i = 0; i < adapter->num_rx_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->m_head != NULL) { + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->htag, rxbuf->hmap); + rxbuf->m_head->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_head); + } + if (rxbuf->m_pack != NULL) { + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + rxbuf->m_pack->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_pack); + } + rxbuf->m_head = NULL; + rxbuf->m_pack = NULL; + if (rxbuf->hmap != NULL) { + bus_dmamap_destroy(rxr->htag, rxbuf->hmap); + rxbuf->hmap = NULL; + } + if (rxbuf->pmap != NULL) { + bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); + rxbuf->pmap = NULL; + } + } + if (rxr->rx_buffers != NULL) { + free(rxr->rx_buffers, M_DEVBUF); + rxr->rx_buffers = NULL; + } + } + + if (rxr->htag != NULL) { + bus_dma_tag_destroy(rxr->htag); + rxr->htag = NULL; + } + if (rxr->ptag != NULL) { + bus_dma_tag_destroy(rxr->ptag); + rxr->ptag = NULL; + } + + return; +} + +static __inline void +ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) +{ + + /* + * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet + * should be computed by hardware. Also it should not have VLAN tag in + * ethernet header. + */ + if (rxr->lro_enabled && + (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && + (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && + (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == + (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) && + (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { + /* + * Send to the stack if: + ** - LRO not enabled, or + ** - no LRO resources, or + ** - lro enqueue fails + */ + if (rxr->lro.lro_cnt != 0) + if (tcp_lro_rx(&rxr->lro, m, 0) == 0) + return; + } + IXGBE_RX_UNLOCK(rxr); + (*ifp->if_input)(ifp, m); + IXGBE_RX_LOCK(rxr); +} + +static __inline void +ixgbe_rx_discard(struct rx_ring *rxr, int i) +{ + struct ixgbe_rx_buf *rbuf; + + rbuf = &rxr->rx_buffers[i]; + + if (rbuf->fmp != NULL) {/* Partial chain ? */ + rbuf->fmp->m_flags |= M_PKTHDR; + m_freem(rbuf->fmp); + rbuf->fmp = NULL; + } + + /* + ** With advanced descriptors the writeback + ** clobbers the buffer addrs, so its easier + ** to just free the existing mbufs and take + ** the normal refresh path to get new buffers + ** and mapping. + */ + if (rbuf->m_head) { + m_free(rbuf->m_head); + rbuf->m_head = NULL; + } + + if (rbuf->m_pack) { + m_free(rbuf->m_pack); + rbuf->m_pack = NULL; + } + + return; +} + + +/********************************************************************* + * + * This routine executes in interrupt context. It replenishes + * the mbufs in the descriptor and sends data which has been + * dma'ed into host memory to upper layer. + * + * We loop at most count times if count is > 0, or until done if + * count < 0. + * + * Return TRUE for more work, FALSE for all clean. + *********************************************************************/ +static bool +ixgbe_rxeof(struct ix_queue *que, int count) +{ + struct adapter *adapter = que->adapter; + struct rx_ring *rxr = que->rxr; + struct ifnet *ifp = adapter->ifp; + struct lro_ctrl *lro = &rxr->lro; + struct lro_entry *queued; + int i, nextp, processed = 0; + u32 staterr = 0; + union ixgbe_adv_rx_desc *cur; + struct ixgbe_rx_buf *rbuf, *nbuf; + + IXGBE_RX_LOCK(rxr); + + for (i = rxr->next_to_check; count != 0;) { + struct mbuf *sendmp, *mh, *mp; + u32 rsc, ptype; + u16 hlen, plen, hdr, vtag; + bool eop; + + /* Sync the ring. */ + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + cur = &rxr->rx_base[i]; + staterr = le32toh(cur->wb.upper.status_error); + + if ((staterr & IXGBE_RXD_STAT_DD) == 0) + break; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; + + count--; + sendmp = NULL; + nbuf = NULL; + rsc = 0; + cur->wb.upper.status_error = 0; + rbuf = &rxr->rx_buffers[i]; + mh = rbuf->m_head; + mp = rbuf->m_pack; + + plen = le16toh(cur->wb.upper.length); + ptype = le32toh(cur->wb.lower.lo_dword.data) & + IXGBE_RXDADV_PKTTYPE_MASK; + hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); + vtag = le16toh(cur->wb.upper.vlan); + eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); + + /* Make sure bad packets are discarded */ + if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) || + (rxr->discard)) { + ifp->if_ierrors++; + rxr->rx_discarded++; + if (eop) + rxr->discard = FALSE; + else + rxr->discard = TRUE; + ixgbe_rx_discard(rxr, i); + goto next_desc; + } + + /* + ** On 82599 which supports a hardware + ** LRO (called HW RSC), packets need + ** not be fragmented across sequential + ** descriptors, rather the next descriptor + ** is indicated in bits of the descriptor. + ** This also means that we might proceses + ** more than one packet at a time, something + ** that has never been true before, it + ** required eliminating global chain pointers + ** in favor of what we are doing here. -jfv + */ + if (!eop) { + /* + ** Figure out the next descriptor + ** of this frame. + */ + if (rxr->hw_rsc == TRUE) { + rsc = ixgbe_rsc_count(cur); + rxr->rsc_num += (rsc - 1); + } + if (rsc) { /* Get hardware index */ + nextp = ((staterr & + IXGBE_RXDADV_NEXTP_MASK) >> + IXGBE_RXDADV_NEXTP_SHIFT); + } else { /* Just sequential */ + nextp = i + 1; + if (nextp == adapter->num_rx_desc) + nextp = 0; + } + nbuf = &rxr->rx_buffers[nextp]; + prefetch(nbuf); + } + /* + ** The header mbuf is ONLY used when header + ** split is enabled, otherwise we get normal + ** behavior, ie, both header and payload + ** are DMA'd into the payload buffer. + ** + ** Rather than using the fmp/lmp global pointers + ** we now keep the head of a packet chain in the + ** buffer struct and pass this along from one + ** descriptor to the next, until we get EOP. + */ + if (rxr->hdr_split && (rbuf->fmp == NULL)) { + /* This must be an initial descriptor */ + hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > IXGBE_RX_HDR) + hlen = IXGBE_RX_HDR; + mh->m_len = hlen; + mh->m_flags |= M_PKTHDR; + mh->m_next = NULL; + mh->m_pkthdr.len = mh->m_len; + /* Null buf pointer so it is refreshed */ + rbuf->m_head = NULL; + /* + ** Check the payload length, this + ** could be zero if its a small + ** packet. + */ + if (plen > 0) { + mp->m_len = plen; + mp->m_next = NULL; + mp->m_flags &= ~M_PKTHDR; + mh->m_next = mp; + mh->m_pkthdr.len += mp->m_len; + /* Null buf pointer so it is refreshed */ + rbuf->m_pack = NULL; + rxr->rx_split_packets++; + } + /* + ** Now create the forward + ** chain so when complete + ** we wont have to. + */ + if (eop == 0) { + /* stash the chain head */ + nbuf->fmp = mh; + /* Make forward chain */ + if (plen) + mp->m_next = nbuf->m_pack; + else + mh->m_next = nbuf->m_pack; + } else { + /* Singlet, prepare to send */ + sendmp = mh; + if ((adapter->num_vlans) && + (staterr & IXGBE_RXD_STAT_VP)) { + sendmp->m_pkthdr.ether_vtag = vtag; + sendmp->m_flags |= M_VLANTAG; + } + } + } else { + /* + ** Either no header split, or a + ** secondary piece of a fragmented + ** split packet. + */ + mp->m_len = plen; + /* + ** See if there is a stored head + ** that determines what we are + */ + sendmp = rbuf->fmp; + rbuf->m_pack = rbuf->fmp = NULL; + + if (sendmp != NULL) /* secondary frag */ + sendmp->m_pkthdr.len += mp->m_len; + else { + /* first desc of a non-ps chain */ + sendmp = mp; + sendmp->m_flags |= M_PKTHDR; + sendmp->m_pkthdr.len = mp->m_len; + if (staterr & IXGBE_RXD_STAT_VP) { + sendmp->m_pkthdr.ether_vtag = vtag; + sendmp->m_flags |= M_VLANTAG; + } + } + /* Pass the head pointer on */ + if (eop == 0) { + nbuf->fmp = sendmp; + sendmp = NULL; + mp->m_next = nbuf->m_pack; + } + } + ++processed; + /* Sending this frame? */ + if (eop) { + sendmp->m_pkthdr.rcvif = ifp; + ifp->if_ipackets++; + rxr->rx_packets++; + /* capture data for AIM */ + rxr->bytes += sendmp->m_pkthdr.len; + rxr->rx_bytes += sendmp->m_pkthdr.len; + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) + ixgbe_rx_checksum(staterr, sendmp, ptype); +#if __FreeBSD_version >= 800000 + sendmp->m_pkthdr.flowid = que->msix; + sendmp->m_flags |= M_FLOWID; +#endif + } +next_desc: + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* Advance our pointers to the next descriptor. */ + if (++i == adapter->num_rx_desc) + i = 0; + + /* Now send to the stack or do LRO */ + if (sendmp != NULL) { + rxr->next_to_check = i; + ixgbe_rx_input(rxr, ifp, sendmp, ptype); + i = rxr->next_to_check; + } + + /* Every 8 descriptors we go to refresh mbufs */ + if (processed == 8) { + ixgbe_refresh_mbufs(rxr, i); + processed = 0; + } + } + + /* Refresh any remaining buf structs */ + if (ixgbe_rx_unrefreshed(rxr)) + ixgbe_refresh_mbufs(rxr, i); + + rxr->next_to_check = i; + + /* + * Flush any outstanding LRO work + */ + while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { + SLIST_REMOVE_HEAD(&lro->lro_active, next); + tcp_lro_flush(lro, queued); + } + + IXGBE_RX_UNLOCK(rxr); + + /* + ** We still have cleaning to do? + ** Schedule another interrupt if so. + */ + if ((staterr & IXGBE_RXD_STAT_DD) != 0) { + ixgbe_rearm_queues(adapter, (u64)(1 << que->msix)); + return (TRUE); + } + + return (FALSE); +} + + +/********************************************************************* + * + * Verify that the hardware indicated that the checksum is valid. + * Inform the stack about the status of checksum so that stack + * doesn't spend time verifying the checksum. + * + *********************************************************************/ +static void +ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype) +{ + u16 status = (u16) staterr; + u8 errors = (u8) (staterr >> 24); + bool sctp = FALSE; + + if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && + (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) + sctp = TRUE; + + if (status & IXGBE_RXD_STAT_IPCS) { + if (!(errors & IXGBE_RXD_ERR_IPE)) { + /* IP Checksum Good */ + mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; + mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; + + } else + mp->m_pkthdr.csum_flags = 0; + } + if (status & IXGBE_RXD_STAT_L4CS) { + u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); +#if __FreeBSD_version >= 800000 + if (sctp) + type = CSUM_SCTP_VALID; +#endif + if (!(errors & IXGBE_RXD_ERR_TCPE)) { + mp->m_pkthdr.csum_flags |= type; + if (!sctp) + mp->m_pkthdr.csum_data = htons(0xffff); + } + } + return; +} + + +/* +** This routine is run via an vlan config EVENT, +** it enables us to use the HW Filter table since +** we can get the vlan id. This just creates the +** entry in the soft version of the VFTA, init will +** repopulate the real table. +*/ +static void +ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct adapter *adapter = ifp->if_softc; + u16 index, bit; + + if (ifp->if_softc != arg) /* Not our event */ + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + IXGBE_CORE_LOCK(adapter); + index = (vtag >> 5) & 0x7F; + bit = vtag & 0x1F; + adapter->shadow_vfta[index] |= (1 << bit); + ++adapter->num_vlans; + ixgbe_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); +} + +/* +** This routine is run via an vlan +** unconfig EVENT, remove our entry +** in the soft vfta. +*/ +static void +ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct adapter *adapter = ifp->if_softc; + u16 index, bit; + + if (ifp->if_softc != arg) + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + IXGBE_CORE_LOCK(adapter); + index = (vtag >> 5) & 0x7F; + bit = vtag & 0x1F; + adapter->shadow_vfta[index] &= ~(1 << bit); + --adapter->num_vlans; + /* Re-init to load the changes */ + ixgbe_init_locked(adapter); + IXGBE_CORE_UNLOCK(adapter); +} + +static void +ixgbe_setup_vlan_hw_support(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl; + + + /* + ** We get here thru init_locked, meaning + ** a soft reset, this has already cleared + ** the VFTA and other state, so if there + ** have been no vlan's registered do nothing. + */ + if (adapter->num_vlans == 0) + return; + + /* + ** A soft reset zero's out the VFTA, so + ** we need to repopulate it now. + */ + for (int i = 0; i < IXGBE_VFTA_SIZE; i++) + if (adapter->shadow_vfta[i] != 0) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), + adapter->shadow_vfta[i]); + + ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + /* Enable the Filter Table if enabled */ + if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { + ctrl &= ~IXGBE_VLNCTRL_CFIEN; + ctrl |= IXGBE_VLNCTRL_VFE; + } + if (hw->mac.type == ixgbe_mac_82598EB) + ctrl |= IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); + + /* On 82599 the VLAN enable is per/queue in RXDCTL */ + if (hw->mac.type != ixgbe_mac_82598EB) + for (int i = 0; i < adapter->num_queues; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl); + } +} + +static void +ixgbe_enable_intr(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ix_queue *que = adapter->queues; + u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + + + /* Enable Fan Failure detection */ + if (hw->device_id == IXGBE_DEV_ID_82598AT) + mask |= IXGBE_EIMS_GPI_SDP1; + else { + mask |= IXGBE_EIMS_ECC; + mask |= IXGBE_EIMS_GPI_SDP0; + mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP2; +#ifdef IXGBE_FDIR + mask |= IXGBE_EIMS_FLOW_DIR; +#endif + } + + IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask); + + /* With RSS we use auto clear */ + if (adapter->msix_mem) { + mask = IXGBE_EIMS_ENABLE_MASK; + /* Don't autoclear Link */ + mask &= ~IXGBE_EIMS_OTHER; + mask &= ~IXGBE_EIMS_LSC; + IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask); + } + + /* + ** Now enable all queues, this is done separately to + ** allow for handling the extended (beyond 32) MSIX + ** vectors that can be used by 82599 + */ + for (int i = 0; i < adapter->num_queues; i++, que++) + ixgbe_enable_queue(adapter, que->msix); + + IXGBE_WRITE_FLUSH(hw); + + return; +} + +static void +ixgbe_disable_intr(struct adapter *adapter) +{ + if (adapter->msix_mem) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0); + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); + } else { + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); + } + IXGBE_WRITE_FLUSH(&adapter->hw); + return; +} + +u16 +ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg) +{ + u16 value; + + value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev, + reg, 2); + + return (value); +} + +void +ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value) +{ + pci_write_config(((struct ixgbe_osdep *)hw->back)->dev, + reg, value, 2); + + return; +} + +/* +** Setup the correct IVAR register for a particular MSIX interrupt +** (yes this is all very magic and confusing :) +** - entry is the register array entry +** - vector is the MSIX vector for this queue +** - type is RX/TX/MISC +*/ +static void +ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ivar, index; + + vector |= IXGBE_IVAR_ALLOC_VAL; + + switch (hw->mac.type) { + + case ixgbe_mac_82598EB: + if (type == -1) + entry = IXGBE_IVAR_OTHER_CAUSES_INDEX; + else + entry += (type * 64); + index = (entry >> 2) & 0x1F; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index)); + ivar &= ~(0xFF << (8 * (entry & 0x3))); + ivar |= (vector << (8 * (entry & 0x3))); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar); + break; + + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + if (type == -1) { /* MISC IVAR */ + index = (entry & 1) * 8; + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC); + ivar &= ~(0xFF << index); + ivar |= (vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar); + } else { /* RX/TX IVARS */ + index = (16 * (entry & 1)) + (8 * type); + ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1)); + ivar &= ~(0xFF << index); + ivar |= (vector << index); + IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar); + } + + default: + break; + } +} + +static void +ixgbe_configure_ivars(struct adapter *adapter) +{ + struct ix_queue *que = adapter->queues; + u32 newitr; + + if (ixgbe_max_interrupt_rate > 0) + newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8; + else + newitr = 0; + + for (int i = 0; i < adapter->num_queues; i++, que++) { + /* First the RX queue entry */ + ixgbe_set_ivar(adapter, i, que->msix, 0); + /* ... and the TX */ + ixgbe_set_ivar(adapter, i, que->msix, 1); + /* Set an Initial EITR value */ + IXGBE_WRITE_REG(&adapter->hw, + IXGBE_EITR(que->msix), newitr); + } + + /* For the Link interrupt */ + ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1); +} + +/* +** ixgbe_sfp_probe - called in the local timer to +** determine if a port had optics inserted. +*/ +static bool ixgbe_sfp_probe(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + device_t dev = adapter->dev; + bool result = FALSE; + + if ((hw->phy.type == ixgbe_phy_nl) && + (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) { + s32 ret = hw->phy.ops.identify_sfp(hw); + if (ret) + goto out; + ret = hw->phy.ops.reset(hw); + if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) { + device_printf(dev,"Unsupported SFP+ module detected!"); + printf(" Reload driver with supported module.\n"); + adapter->sfp_probe = FALSE; + goto out; + } else + device_printf(dev,"SFP+ module detected!\n"); + /* We now have supported optics */ + adapter->sfp_probe = FALSE; + /* Set the optics type so system reports correctly */ + ixgbe_setup_optics(adapter); + result = TRUE; + } +out: + return (result); +} + +/* +** Tasklet handler for MSIX Link interrupts +** - do outside interrupt since it might sleep +*/ +static void +ixgbe_handle_link(void *context, int pending) +{ + struct adapter *adapter = context; + + ixgbe_check_link(&adapter->hw, + &adapter->link_speed, &adapter->link_up, 0); + ixgbe_update_link_status(adapter); +} + +/* +** Tasklet for handling SFP module interrupts +*/ +static void +ixgbe_handle_mod(void *context, int pending) +{ + struct adapter *adapter = context; + struct ixgbe_hw *hw = &adapter->hw; + device_t dev = adapter->dev; + u32 err; + + err = hw->phy.ops.identify_sfp(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + device_printf(dev, + "Unsupported SFP+ module type was detected.\n"); + return; + } + err = hw->mac.ops.setup_sfp(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { + device_printf(dev, + "Setup failure - unsupported SFP+ module type.\n"); + return; + } + taskqueue_enqueue(adapter->tq, &adapter->msf_task); + return; +} + + +/* +** Tasklet for handling MSF (multispeed fiber) interrupts +*/ +static void +ixgbe_handle_msf(void *context, int pending) +{ + struct adapter *adapter = context; + struct ixgbe_hw *hw = &adapter->hw; + u32 autoneg; + bool negotiate; + + autoneg = hw->phy.autoneg_advertised; + if ((!autoneg) && (hw->mac.ops.get_link_capabilities)) + hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate); + if (hw->mac.ops.setup_link) + hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE); + return; +} + +#ifdef IXGBE_FDIR +/* +** Tasklet for reinitializing the Flow Director filter table +*/ +static void +ixgbe_reinit_fdir(void *context, int pending) +{ + struct adapter *adapter = context; + struct ifnet *ifp = adapter->ifp; + + if (adapter->fdir_reinit != 1) /* Shouldn't happen */ + return; + ixgbe_reinit_fdir_tables_82599(&adapter->hw); + adapter->fdir_reinit = 0; + /* Restart the interface */ + ifp->if_drv_flags |= IFF_DRV_RUNNING; + return; +} +#endif + +/********************************************************************** + * + * Update the board statistics counters. + * + **********************************************************************/ +static void +ixgbe_update_stats_counters(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + struct ixgbe_hw *hw = &adapter->hw; + u32 missed_rx = 0, bprc, lxon, lxoff, total; + u64 total_missed_rx = 0; + + adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); + adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); + adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); + + for (int i = 0; i < 8; i++) { + u32 mp; + mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + /* missed_rx tallies misses for the gprc workaround */ + missed_rx += mp; + /* global total per queue */ + adapter->stats.mpc[i] += mp; + /* Running comprehensive total for stats display */ + total_missed_rx += adapter->stats.mpc[i]; + if (hw->mac.type == ixgbe_mac_82598EB) + adapter->stats.rnbc[i] += + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + adapter->stats.pxontxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + adapter->stats.pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + adapter->stats.pxofftxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + adapter->stats.pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + adapter->stats.pxon2offc[i] += + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + } + for (int i = 0; i < 16; i++) { + adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + adapter->stats.qbrc[i] += + ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32); + adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + adapter->stats.qbtc[i] += + ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32); + adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + } + adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); + adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); + adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + + /* Hardware workaround, gprc counts missed packets */ + adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + adapter->stats.gprc -= missed_rx; + + if (hw->mac.type != ixgbe_mac_82598EB) { + adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + + ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + + ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) + + ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + /* 82598 only has a counter in the high register */ + adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH); + } + + /* + * Workaround: mprc hardware is incorrectly counting + * broadcasts, so for now we subtract those. + */ + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + adapter->stats.bprc += bprc; + adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + if (hw->mac.type == ixgbe_mac_82598EB) + adapter->stats.mprc -= bprc; + + adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + adapter->stats.lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + adapter->stats.lxofftxc += lxoff; + total = lxon + lxoff; + + adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + adapter->stats.gptc -= total; + adapter->stats.mptc -= total; + adapter->stats.ptc64 -= total; + adapter->stats.gotc -= total * ETHER_MIN_LEN; + + adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC); + adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); + adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); + adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); + adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT); + adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC); + adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); + /* Only read FCOE on 82599 */ + if (hw->mac.type != ixgbe_mac_82598EB) { + adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); + } + + /* Fill out the OS statistics structure */ + ifp->if_ipackets = adapter->stats.gprc; + ifp->if_opackets = adapter->stats.gptc; + ifp->if_ibytes = adapter->stats.gorc; + ifp->if_obytes = adapter->stats.gotc; + ifp->if_imcasts = adapter->stats.mprc; + ifp->if_collisions = 0; + + /* Rx Errors */ + ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs + + adapter->stats.rlec; +} + +/** ixgbe_sysctl_tdh_handler - Handler function + * Retrieves the TDH value from the hardware + */ +static int +ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS) +{ + int error; + + struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); + if (!txr) return 0; + + unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me)); + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return error; + return 0; +} + +/** ixgbe_sysctl_tdt_handler - Handler function + * Retrieves the TDT value from the hardware + */ +static int +ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS) +{ + int error; + + struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1); + if (!txr) return 0; + + unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me)); + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return error; + return 0; +} + +/** ixgbe_sysctl_rdh_handler - Handler function + * Retrieves the RDH value from the hardware + */ +static int +ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS) +{ + int error; + + struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); + if (!rxr) return 0; + + unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me)); + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return error; + return 0; +} + +/** ixgbe_sysctl_rdt_handler - Handler function + * Retrieves the RDT value from the hardware + */ +static int +ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS) +{ + int error; + + struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1); + if (!rxr) return 0; + + unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me)); + error = sysctl_handle_int(oidp, &val, 0, req); + if (error || !req->newptr) + return error; + return 0; +} + +static int +ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) +{ + int error; + struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1); + unsigned int reg, usec, rate; + + reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix)); + usec = ((reg & 0x0FF8) >> 3); + if (usec > 0) + rate = 1000000 / usec; + else + rate = 0; + error = sysctl_handle_int(oidp, &rate, 0, req); + if (error || !req->newptr) + return error; + return 0; +} + +/* + * Add sysctl variables, one per statistic, to the system. + */ +static void +ixgbe_add_hw_stats(struct adapter *adapter) +{ + + device_t dev = adapter->dev; + + struct tx_ring *txr = adapter->tx_rings; + struct rx_ring *rxr = adapter->rx_rings; + + struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); + struct sysctl_oid *tree = device_get_sysctl_tree(dev); + struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); + struct ixgbe_hw_stats *stats = &adapter->stats; + + struct sysctl_oid *stat_node, *queue_node; + struct sysctl_oid_list *stat_list, *queue_list; + +#define QUEUE_NAME_LEN 32 + char namebuf[QUEUE_NAME_LEN]; + + /* Driver Statistics */ + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", + CTLFLAG_RD, &adapter->dropped_pkts, + "Driver dropped packets"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed", + CTLFLAG_RD, &adapter->mbuf_defrag_failed, + "m_defrag() failed"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup", + CTLFLAG_RD, &adapter->no_tx_dma_setup, + "Driver tx dma failure in xmit"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events", + CTLFLAG_RD, &adapter->watchdog_events, + "Watchdog timeouts"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx", + CTLFLAG_RD, &adapter->tso_tx, + "TSO"); + SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq", + CTLFLAG_RD, &adapter->link_irq, + "Link MSIX IRQ Handled"); + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); + queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, + CTLFLAG_RD, NULL, "Queue Name"); + queue_list = SYSCTL_CHILDREN(queue_node); + + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", + CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i], + sizeof(&adapter->queues[i]), + ixgbe_sysctl_interrupt_rate_handler, "IU", + "Interrupt Rate"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", + CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), + ixgbe_sysctl_tdh_handler, "IU", + "Transmit Descriptor Head"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", + CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr), + ixgbe_sysctl_tdt_handler, "IU", + "Transmit Descriptor Tail"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", + CTLFLAG_RD, &txr->no_desc_avail, + "Queue No Descriptor Available"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets", + CTLFLAG_RD, &txr->total_packets, + "Queue Packets Transmitted"); + } + + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); + queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, + CTLFLAG_RD, NULL, "Queue Name"); + queue_list = SYSCTL_CHILDREN(queue_node); + + struct lro_ctrl *lro = &rxr->lro; + + snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); + queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, + CTLFLAG_RD, NULL, "Queue Name"); + queue_list = SYSCTL_CHILDREN(queue_node); + + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", + CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), + ixgbe_sysctl_rdh_handler, "IU", + "Receive Descriptor Head"); + SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", + CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr), + ixgbe_sysctl_rdt_handler, "IU", + "Receive Descriptor Tail"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets", + CTLFLAG_RD, &rxr->rx_packets, + "Queue Packets Received"); + SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes", + CTLFLAG_RD, &rxr->rx_bytes, + "Queue Bytes Received"); + SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued", + CTLFLAG_RD, &lro->lro_queued, 0, + "LRO Queued"); + SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed", + CTLFLAG_RD, &lro->lro_flushed, 0, + "LRO Flushed"); + } + + /* MAC stats get the own sub node */ + + stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", + CTLFLAG_RD, NULL, "MAC Statistics"); + stat_list = SYSCTL_CHILDREN(stat_node); + + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs", + CTLFLAG_RD, &stats->crcerrs, + "CRC Errors"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs", + CTLFLAG_RD, &stats->illerrc, + "Illegal Byte Errors"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs", + CTLFLAG_RD, &stats->errbc, + "Byte Errors"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards", + CTLFLAG_RD, &stats->mspdc, + "MAC Short Packets Discarded"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults", + CTLFLAG_RD, &stats->mlfc, + "MAC Local Faults"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults", + CTLFLAG_RD, &stats->mrfc, + "MAC Remote Faults"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs", + CTLFLAG_RD, &stats->rlec, + "Receive Length Errors"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd", + CTLFLAG_RD, &stats->lxontxc, + "Link XON Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd", + CTLFLAG_RD, &stats->lxonrxc, + "Link XON Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd", + CTLFLAG_RD, &stats->lxofftxc, + "Link XOFF Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd", + CTLFLAG_RD, &stats->lxoffrxc, + "Link XOFF Received"); + + /* Packet Reception Stats */ + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd", + CTLFLAG_RD, &stats->tor, + "Total Octets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd", + CTLFLAG_RD, &stats->gorc, + "Good Octets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd", + CTLFLAG_RD, &stats->tpr, + "Total Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd", + CTLFLAG_RD, &stats->gprc, + "Good Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd", + CTLFLAG_RD, &stats->mprc, + "Multicast Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd", + CTLFLAG_RD, &stats->bprc, + "Broadcast Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", + CTLFLAG_RD, &stats->prc64, + "64 byte frames received "); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", + CTLFLAG_RD, &stats->prc127, + "65-127 byte frames received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", + CTLFLAG_RD, &stats->prc255, + "128-255 byte frames received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", + CTLFLAG_RD, &stats->prc511, + "256-511 byte frames received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", + CTLFLAG_RD, &stats->prc1023, + "512-1023 byte frames received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", + CTLFLAG_RD, &stats->prc1522, + "1023-1522 byte frames received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized", + CTLFLAG_RD, &stats->ruc, + "Receive Undersized"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", + CTLFLAG_RD, &stats->rfc, + "Fragmented Packets Received "); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized", + CTLFLAG_RD, &stats->roc, + "Oversized Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd", + CTLFLAG_RD, &stats->rjc, + "Received Jabber"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd", + CTLFLAG_RD, &stats->mngprc, + "Management Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd", + CTLFLAG_RD, &stats->mngptc, + "Management Packets Dropped"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs", + CTLFLAG_RD, &stats->xec, + "Checksum Errors"); + + /* Packet Transmission Stats */ + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", + CTLFLAG_RD, &stats->gotc, + "Good Octets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", + CTLFLAG_RD, &stats->tpt, + "Total Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", + CTLFLAG_RD, &stats->gptc, + "Good Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", + CTLFLAG_RD, &stats->bptc, + "Broadcast Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", + CTLFLAG_RD, &stats->mptc, + "Multicast Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd", + CTLFLAG_RD, &stats->mngptc, + "Management Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", + CTLFLAG_RD, &stats->ptc64, + "64 byte frames transmitted "); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", + CTLFLAG_RD, &stats->ptc127, + "65-127 byte frames transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", + CTLFLAG_RD, &stats->ptc255, + "128-255 byte frames transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", + CTLFLAG_RD, &stats->ptc511, + "256-511 byte frames transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", + CTLFLAG_RD, &stats->ptc1023, + "512-1023 byte frames transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", + CTLFLAG_RD, &stats->ptc1522, + "1024-1522 byte frames transmitted"); + + /* FC Stats */ + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc", + CTLFLAG_RD, &stats->fccrc, + "FC CRC Errors"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last", + CTLFLAG_RD, &stats->fclast, + "FC Last Error"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd", + CTLFLAG_RD, &stats->fcoerpdc, + "FCoE Packets Dropped"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd", + CTLFLAG_RD, &stats->fcoeprc, + "FCoE Packets Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd", + CTLFLAG_RD, &stats->fcoeptc, + "FCoE Packets Transmitted"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd", + CTLFLAG_RD, &stats->fcoedwrc, + "FCoE DWords Received"); + SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd", + CTLFLAG_RD, &stats->fcoedwtc, + "FCoE DWords Transmitted"); +} + +/* +** Set flow control using sysctl: +** Flow control values: +** 0 - off +** 1 - rx pause +** 2 - tx pause +** 3 - full +*/ +static int +ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS) +{ + int error, last; + struct adapter *adapter = (struct adapter *) arg1; + + last = adapter->fc; + error = sysctl_handle_int(oidp, &adapter->fc, 0, req); + if ((error) || (req->newptr == NULL)) + return (error); + + /* Don't bother if it's not changed */ + if (adapter->fc == last) + return (0); + + switch (adapter->fc) { + case ixgbe_fc_rx_pause: + case ixgbe_fc_tx_pause: + case ixgbe_fc_full: + adapter->hw.fc.requested_mode = adapter->fc; + break; + case ixgbe_fc_none: + default: + adapter->hw.fc.requested_mode = ixgbe_fc_none; + } + + ixgbe_fc_enable(&adapter->hw, 0); + return error; +} + +static void +ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name, + const char *description, int *limit, int value) +{ + *limit = value; + SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), + OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); +} + +/* +** Control link advertise speed: +** 0 - normal +** 1 - advertise only 1G +** 2 - advertise 100Mb +*/ +static int +ixgbe_set_advertise(SYSCTL_HANDLER_ARGS) +{ + int error = 0; + struct adapter *adapter; + device_t dev; + struct ixgbe_hw *hw; + ixgbe_link_speed speed, last; + + adapter = (struct adapter *) arg1; + dev = adapter->dev; + hw = &adapter->hw; + last = hw->phy.autoneg_advertised; + + error = sysctl_handle_int(oidp, &adapter->advertise, 0, req); + + if ((error) || (adapter->advertise == -1)) + return (error); + + if (!((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->phy.multispeed_fiber))) + return (error); + + if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) { + device_printf(dev, "Set Advertise: 100Mb on X540 only\n"); + return (error); + } + + if (adapter->advertise == 1) + speed = IXGBE_LINK_SPEED_1GB_FULL; + else if (adapter->advertise == 2) + speed = IXGBE_LINK_SPEED_100_FULL; + else + speed = IXGBE_LINK_SPEED_1GB_FULL | + IXGBE_LINK_SPEED_10GB_FULL; + + if (speed == last) /* no change */ + return (error); + + hw->mac.autotry_restart = TRUE; + hw->mac.ops.setup_link(hw, speed, TRUE, TRUE); + + return (error); +} + diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe.h new file mode 100644 index 0000000000..716e7758e5 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe.h @@ -0,0 +1,521 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#ifndef _IXGBE_H_ +#define _IXGBE_H_ + + +#include +#include +#if __FreeBSD_version >= 800000 +#include +#endif +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef IXGBE_IEEE1588 +#include +#endif + +#include "ixgbe_api.h" + +/* Tunables */ + +/* + * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the + * number of transmit descriptors allocated by the driver. Increasing this + * value allows the driver to queue more transmits. Each descriptor is 16 + * bytes. Performance tests have show the 2K value to be optimal for top + * performance. + */ +#define DEFAULT_TXD 1024 +#define PERFORM_TXD 2048 +#define MAX_TXD 4096 +#define MIN_TXD 64 + +/* + * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the + * number of receive descriptors allocated for each RX queue. Increasing this + * value allows the driver to buffer more incoming packets. Each descriptor + * is 16 bytes. A receive buffer is also allocated for each descriptor. + * + * Note: with 8 rings and a dual port card, it is possible to bump up + * against the system mbuf pool limit, you can tune nmbclusters + * to adjust for this. + */ +#define DEFAULT_RXD 1024 +#define PERFORM_RXD 2048 +#define MAX_RXD 4096 +#define MIN_RXD 64 + +/* Alignment for rings */ +#define DBA_ALIGN 128 + +/* + * This parameter controls the maximum no of times the driver will loop in + * the isr. Minimum Value = 1 + */ +#define MAX_LOOP 10 + +/* + * This is the max watchdog interval, ie. the time that can + * pass between any two TX clean operations, such only happening + * when the TX hardware is functioning. + */ +#define IXGBE_WATCHDOG (10 * hz) + +/* + * This parameters control when the driver calls the routine to reclaim + * transmit descriptors. + */ +#define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) +#define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32) + +#define IXGBE_MAX_FRAME_SIZE 0x3F00 + +/* Flow control constants */ +#define IXGBE_FC_PAUSE 0xFFFF +#define IXGBE_FC_HI 0x20000 +#define IXGBE_FC_LO 0x10000 + +/* Keep older OS drivers building... */ +#if !defined(SYSCTL_ADD_UQUAD) +#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD +#endif + +/* Defines for printing debug information */ +#define DEBUG_INIT 0 +#define DEBUG_IOCTL 0 +#define DEBUG_HW 0 + +#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") +#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) +#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) +#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") +#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) +#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) +#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") +#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) +#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) + +#define MAX_NUM_MULTICAST_ADDRESSES 128 +#define IXGBE_82598_SCATTER 100 +#define IXGBE_82599_SCATTER 32 +#define MSIX_82598_BAR 3 +#define MSIX_82599_BAR 4 +#define IXGBE_TSO_SIZE 65535 +#define IXGBE_TX_BUFFER_SIZE ((u32) 1514) +#define IXGBE_RX_HDR 128 +#define IXGBE_VFTA_SIZE 128 +#define IXGBE_BR_SIZE 4096 +#define IXGBE_QUEUE_IDLE 0 +#define IXGBE_QUEUE_WORKING 1 +#define IXGBE_QUEUE_HUNG 2 + +/* Offload bits in mbuf flag */ +#if __FreeBSD_version >= 800000 +#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) +#else +#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) +#endif + +/* For 6.X code compatibility */ +#if !defined(ETHER_BPF_MTAP) +#define ETHER_BPF_MTAP BPF_MTAP +#endif + +#if __FreeBSD_version < 700000 +#define CSUM_TSO 0 +#define IFCAP_TSO4 0 +#endif + +/* + * Interrupt Moderation parameters + */ +#define IXGBE_LOW_LATENCY 128 +#define IXGBE_AVE_LATENCY 400 +#define IXGBE_BULK_LATENCY 1200 +#define IXGBE_LINK_ITR 2000 + +/* + ***************************************************************************** + * vendor_info_array + * + * This array contains the list of Subvendor/Subdevice IDs on which the driver + * should load. + * + ***************************************************************************** + */ +typedef struct _ixgbe_vendor_info_t { + unsigned int vendor_id; + unsigned int device_id; + unsigned int subvendor_id; + unsigned int subdevice_id; + unsigned int index; +} ixgbe_vendor_info_t; + + +struct ixgbe_tx_buf { + u32 eop_index; + struct mbuf *m_head; + bus_dmamap_t map; +}; + +struct ixgbe_rx_buf { + struct mbuf *m_head; + struct mbuf *m_pack; + struct mbuf *fmp; + bus_dmamap_t hmap; + bus_dmamap_t pmap; +}; + +/* + * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free. + */ +struct ixgbe_dma_alloc { + bus_addr_t dma_paddr; + caddr_t dma_vaddr; + bus_dma_tag_t dma_tag; + bus_dmamap_t dma_map; + bus_dma_segment_t dma_seg; + bus_size_t dma_size; + int dma_nseg; +}; + +/* +** Driver queue struct: this is the interrupt container +** for the associated tx and rx ring. +*/ +struct ix_queue { + struct adapter *adapter; + u32 msix; /* This queue's MSIX vector */ + u32 eims; /* This queue's EIMS bit */ + u32 eitr_setting; + struct resource *res; + void *tag; + struct tx_ring *txr; + struct rx_ring *rxr; + struct task que_task; + struct taskqueue *tq; + u64 irqs; +}; + +/* + * The transmit ring, one per queue + */ +struct tx_ring { + struct adapter *adapter; + struct mtx tx_mtx; + u32 me; + int queue_status; + int watchdog_time; + union ixgbe_adv_tx_desc *tx_base; + struct ixgbe_dma_alloc txdma; + u32 next_avail_desc; + u32 next_to_clean; + struct ixgbe_tx_buf *tx_buffers; + volatile u16 tx_avail; + u32 txd_cmd; + bus_dma_tag_t txtag; + char mtx_name[16]; +#if __FreeBSD_version >= 800000 + struct buf_ring *br; +#endif +#ifdef IXGBE_FDIR + u16 atr_sample; + u16 atr_count; +#endif + u32 bytes; /* used for AIM */ + u32 packets; + /* Soft Stats */ + u64 no_desc_avail; + u64 total_packets; +}; + + +/* + * The Receive ring, one per rx queue + */ +struct rx_ring { + struct adapter *adapter; + struct mtx rx_mtx; + u32 me; + union ixgbe_adv_rx_desc *rx_base; + struct ixgbe_dma_alloc rxdma; + struct lro_ctrl lro; + bool lro_enabled; + bool hdr_split; + bool hw_rsc; + bool discard; + u32 next_to_refresh; + u32 next_to_check; + char mtx_name[16]; + struct ixgbe_rx_buf *rx_buffers; + bus_dma_tag_t htag; + bus_dma_tag_t ptag; + + u32 bytes; /* Used for AIM calc */ + u32 packets; + + /* Soft stats */ + u64 rx_irq; + u64 rx_split_packets; + u64 rx_packets; + u64 rx_bytes; + u64 rx_discarded; + u64 rsc_num; +#ifdef IXGBE_FDIR + u64 flm; +#endif +}; + +/* Our adapter structure */ +struct adapter { + struct ifnet *ifp; + struct ixgbe_hw hw; + + struct ixgbe_osdep osdep; + struct device *dev; + + struct resource *pci_mem; + struct resource *msix_mem; + + /* + * Interrupt resources: this set is + * either used for legacy, or for Link + * when doing MSIX + */ + void *tag; + struct resource *res; + + struct ifmedia media; + struct callout timer; + int msix; + int if_flags; + + struct mtx core_mtx; + + eventhandler_tag vlan_attach; + eventhandler_tag vlan_detach; + + u16 num_vlans; + u16 num_queues; + + /* + ** Shadow VFTA table, this is needed because + ** the real vlan filter table gets cleared during + ** a soft reset and the driver needs to be able + ** to repopulate it. + */ + u32 shadow_vfta[IXGBE_VFTA_SIZE]; + + /* Info about the interface */ + u32 optics; + u32 fc; /* local flow ctrl setting */ + int advertise; /* link speeds */ + bool link_active; + u16 max_frame_size; + u16 num_segs; + u32 link_speed; + bool link_up; + u32 linkvec; + + /* Mbuf cluster size */ + u32 rx_mbuf_sz; + + /* Support for pluggable optics */ + bool sfp_probe; + struct task link_task; /* Link tasklet */ + struct task mod_task; /* SFP tasklet */ + struct task msf_task; /* Multispeed Fiber */ +#ifdef IXGBE_FDIR + int fdir_reinit; + struct task fdir_task; +#endif + struct taskqueue *tq; + + /* + ** Queues: + ** This is the irq holder, it has + ** and RX/TX pair or rings associated + ** with it. + */ + struct ix_queue *queues; + + /* + * Transmit rings: + * Allocated at run time, an array of rings. + */ + struct tx_ring *tx_rings; + int num_tx_desc; + + /* + * Receive rings: + * Allocated at run time, an array of rings. + */ + struct rx_ring *rx_rings; + int num_rx_desc; + u64 que_mask; + u32 rx_process_limit; + + /* Multicast array memory */ + u8 *mta; + + /* Misc stats maintained by the driver */ + unsigned long dropped_pkts; + unsigned long mbuf_defrag_failed; + unsigned long mbuf_header_failed; + unsigned long mbuf_packet_failed; + unsigned long no_tx_map_avail; + unsigned long no_tx_dma_setup; + unsigned long watchdog_events; + unsigned long tso_tx; + unsigned long link_irq; + + struct ixgbe_hw_stats stats; +}; + +/* Precision Time Sync (IEEE 1588) defines */ +#define ETHERTYPE_IEEE1588 0x88F7 +#define PICOSECS_PER_TICK 20833 +#define TSYNC_UDP_PORT 319 /* UDP port for the protocol */ +#define IXGBE_ADVTXD_TSTAMP 0x00080000 + + +#define IXGBE_CORE_LOCK_INIT(_sc, _name) \ + mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF) +#define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) +#define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) +#define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) +#define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) +#define IXGBE_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) +#define IXGBE_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) +#define IXGBE_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) +#define IXGBE_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) +#define IXGBE_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) +#define IXGBE_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) +#define IXGBE_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) +#define IXGBE_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) + + +static inline bool +ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->phy.type) { + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + return TRUE; + default: + return FALSE; + } +} + +/* Workaround to make 8.0 buildable */ +#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 +static __inline int +drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) +{ +#ifdef ALTQ + if (ALTQ_IS_ENABLED(&ifp->if_snd)) + return (1); +#endif + return (!buf_ring_empty(br)); +} +#endif + +/* +** Find the number of unrefreshed RX descriptors +*/ +static inline u16 +ixgbe_rx_unrefreshed(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + + if (rxr->next_to_check > rxr->next_to_refresh) + return (rxr->next_to_check - rxr->next_to_refresh - 1); + else + return ((adapter->num_rx_desc + rxr->next_to_check) - + rxr->next_to_refresh - 1); +} + +#endif /* _IXGBE_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c new file mode 100644 index 0000000000..6a1af54c9c --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c @@ -0,0 +1,1402 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num); +static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete); +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); +void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw); +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy); + +/** + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82598 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) +{ + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 250ms through the GCR register + */ + if (!(gcr & IXGBE_GCR_CAP_VER2)) { + gcr |= IXGBE_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; + IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); +} + +/** + * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw) +{ + u32 msix_count = 18; + + DEBUGFUNC("ixgbe_get_pcie_msix_count_82598"); + + if (hw->mac.msix_vectors_from_pcie) { + msix_count = IXGBE_READ_PCIE_WORD(hw, + IXGBE_PCIE_MSIX_82598_CAPS); + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW, so increment to give + * proper value */ + msix_count++; + } + return msix_count; +} + +/** + * ixgbe_init_ops_82598 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for 82598. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_82598"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.init = &ixgbe_init_phy_ops_82598; + + /* MAC */ + mac->ops.start_hw = &ixgbe_start_hw_82598; + mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598; + mac->ops.reset_hw = &ixgbe_reset_hw_82598; + mac->ops.get_media_type = &ixgbe_get_media_type_82598; + mac->ops.get_supported_physical_layer = + &ixgbe_get_supported_physical_layer_82598; + mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598; + mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598; + mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = &ixgbe_set_vmdq_82598; + mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598; + mac->ops.set_vfta = &ixgbe_set_vfta_82598; + mac->ops.clear_vfta = &ixgbe_clear_vfta_82598; + + /* Flow Control */ + mac->ops.fc_enable = &ixgbe_fc_enable_82598; + + mac->mcft_size = 128; + mac->vft_size = 128; + mac->num_rar_entries = 16; + mac->rx_pb_size = 512; + mac->max_tx_queues = 32; + mac->max_rx_queues = 64; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); + + /* SFP+ Module */ + phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598; + + /* Link */ + mac->ops.check_link = &ixgbe_check_mac_link_82598; + mac->ops.setup_link = &ixgbe_setup_mac_link_82598; + mac->ops.flap_tx_laser = NULL; + mac->ops.get_link_capabilities = + &ixgbe_get_link_capabilities_82598; + mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = NULL; + + return ret_val; +} + +/** + * ixgbe_init_phy_ops_82598 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; + u16 list_offset, data_offset; + + DEBUGFUNC("ixgbe_init_phy_ops_82598"); + + /* Identify the PHY */ + phy->ops.identify(hw); + + /* Overwrite the link function pointers if copper PHY */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82598; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_generic; + } + + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: + phy->ops.reset = &ixgbe_reset_phy_nl; + + /* Call SFP+ identify routine to get the SFP+ module type */ + ret_val = phy->ops.identify_sfp(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Check to see if SFP+ module is supported */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, + &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + break; + default: + break; + } + +out: + return ret_val; +} + +/** + * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function. + * Disables relaxed ordering Then set pcie completion timeout + * + **/ +s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82598"); + + ret_val = ixgbe_start_hw_generic(hw); + + /* Disable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + /* set the completion timeout for interface */ + if (ret_val == IXGBE_SUCCESS) + ixgbe_set_pcie_completion_timeout(hw); + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82598 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + **/ +static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_SUCCESS; + u32 autoc = 0; + + DEBUGFUNC("ixgbe_get_link_capabilities_82598"); + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been + * stored, use the current register value. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = FALSE; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *autoneg = FALSE; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = TRUE; + break; + + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = TRUE; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + break; + } + + return status; +} + +/** + * ixgbe_get_media_type_82598 - Determines media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82598"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + + /* Media type for I82598 is based on device ID */ + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + media_type = ixgbe_media_type_cx4; + break; + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * ixgbe_fc_enable_82598 - Enable flow control + * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 fctrl_reg; + u32 rmcs_reg; + u32 reg; + u32 link_speed = 0; + bool link_up; + + DEBUGFUNC("ixgbe_fc_enable_82598"); + + /* + * On 82598 having Rx FC on causes resets while doing 1G + * so if it's on turn it off once we know link_speed. For + * more details see 82598 Specification update. + */ + hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); + if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) { + switch (hw->fc.requested_mode) { + case ixgbe_fc_full: + hw->fc.requested_mode = ixgbe_fc_tx_pause; + break; + case ixgbe_fc_rx_pause: + hw->fc.requested_mode = ixgbe_fc_none; + break; + default: + /* no change */ + break; + } + } + + /* Negotiate the fc mode to use */ + ret_val = ixgbe_fc_autoneg(hw); + if (ret_val == IXGBE_ERR_FLOW_CONTROL) + goto out; + + /* Disable any previous flow control settings */ + fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); + + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + fctrl_reg |= IXGBE_FCTRL_RFCE; + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + fctrl_reg |= IXGBE_FCTRL_DPF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + reg = hw->fc.low_water << 6; + if (hw->fc.send_xon) + reg |= IXGBE_FCRTL_XONE; + + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg); + + reg = hw->fc.high_water[packetbuf_num] << 6; + reg |= IXGBE_FCRTH_FCEN; + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg); + } + + /* Configure pause time (2 TCs per register) */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); + if ((packetbuf_num & 1) == 0) + reg = (reg & 0xFFFF0000) | hw->fc.pause_time; + else + reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); + + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + +out: + return ret_val; +} + +/** + * ixgbe_start_mac_link_82598 - Configures MAC link settings + * @hw: pointer to hardware structure + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_mac_link_82598"); + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autonegotiation did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + + return status; +} + +/** + * ixgbe_validate_link_ready - Function looks for phy link + * @hw: pointer to hardware structure + * + * Function indicates success when phy link is available. If phy is not ready + * within 5 seconds of MAC indicating link, the function returns error. + **/ +static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) +{ + u32 timeout; + u16 an_reg; + + if (hw->device_id != IXGBE_DEV_ID_82598AT2) + return IXGBE_SUCCESS; + + for (timeout = 0; + timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); + + if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && + (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) + break; + + msec_delay(100); + } + + if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { + DEBUGOUT("Link was indicated but link is down\n"); + return IXGBE_ERR_LINK_SETUP; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_mac_link_82598 - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: TRUE is link is up, FALSE otherwise + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete) +{ + u32 links_reg; + u32 i; + u16 link_reg, adapt_comp_reg; + + DEBUGFUNC("ixgbe_check_mac_link_82598"); + + /* + * SERDES PHY requires us to read link status from undocumented + * register 0xC79F. Bit 0 set indicates link is up/ready; clear + * indicates link down. OxC00C is read to check that the XAUI lanes + * are active. Bit 0 clear indicates active; set indicates inactive. + */ + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, + &adapt_comp_reg); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if ((link_reg & 1) && + ((adapt_comp_reg & 1) == 0)) { + *link_up = TRUE; + break; + } else { + *link_up = FALSE; + } + msec_delay(100); + hw->phy.ops.read_reg(hw, 0xC79F, + IXGBE_TWINAX_DEV, + &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, + IXGBE_TWINAX_DEV, + &adapt_comp_reg); + } + } else { + if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0)) + *link_up = TRUE; + else + *link_up = FALSE; + } + + if (*link_up == FALSE) + goto out; + } + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = TRUE; + break; + } else { + *link_up = FALSE; + } + msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = TRUE; + else + *link_up = FALSE; + } + + if (links_reg & IXGBE_LINKS_SPEED) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) && + (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) + *link_up = FALSE; + + /* if link is down, zero out the current_mode */ + if (*link_up == FALSE) { + hw->fc.current_mode = ixgbe_fc_none; + hw->fc.fc_was_autonegged = FALSE; + } +out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_mac_link_82598 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc = curr_autoc; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + + DEBUGFUNC("ixgbe_setup_mac_link_82598"); + + /* Check to see if speed passed in is supported. */ + ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) + status = IXGBE_ERR_LINK_SETUP; + + /* Set KX4/KX support according to speed requested */ + else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) { + autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK; + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + if (autoc != curr_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } + + if (status == IXGBE_SUCCESS) { + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new + * stored values + */ + status = ixgbe_start_mac_link_82598(hw, + autoneg_wait_to_complete); + } + + return status; +} + + +/** + * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * @autoneg_wait_to_complete: TRUE if waiting is needed to complete + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status; + + DEBUGFUNC("ixgbe_setup_copper_link_82598"); + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82598 - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + s32 phy_status = IXGBE_SUCCESS; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + + DEBUGFUNC("ixgbe_reset_hw_82598"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* + * Power up the Atlas Tx lanes if they are currently powered down. + * Atlas Tx lanes are powered down for MAC loopback tests, but + * they are not automatically restored on reset. + */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); + if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { + /* Enable Tx Atlas so packets can be transmitted again */ + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, + analog_val); + + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + &analog_val); + analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, + analog_val); + } + + /* Reset PHY */ + if (hw->phy.reset_disable == FALSE) { + /* PHY ops must be identified and initialized prior to reset */ + + /* Init PHY and function pointers, perform SFP setup */ + phy_status = hw->phy.ops.init(hw); + if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT) + goto mac_reset_top; + + hw->phy.ops.reset(hw); + } + +mac_reset_top: + /* + * Issue global reset to the MAC. This needs to be a SW reset. + * If link reset is used, it might reset the MAC when mng is using it + */ + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST; + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR); + gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6)); + IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr); + + /* + * Store the original AUTOC value if it has not been + * stored off yet. Otherwise restore the stored original + * AUTOC value since the reset operation sets back to deaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + if (hw->mac.orig_link_settings_stored == FALSE) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_link_settings_stored = TRUE; + } else if (autoc != hw->mac.orig_autoc) { + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc); + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table + */ + hw->mac.ops.init_rx_addrs(hw); + +reset_hw_out: + if (phy_status != IXGBE_SUCCESS) + status = phy_status; + + return status; +} + +/** + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index + **/ +s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_vmdq_82598"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + UNREFERENCED_1PARAMETER(vmdq); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + if (rar_high & IXGBE_RAH_VIND_MASK) { + rar_high &= ~IXGBE_RAH_VIND_MASK; + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vfta_82598 - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + u32 regindex; + u32 bitindex; + u32 bits; + u32 vftabyte; + + DEBUGFUNC("ixgbe_set_vfta_82598"); + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* Determine 32-bit word position in array */ + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ + + /* Determine the location of the (VMD) queue index */ + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ + + /* Set the nibble for VMD queue index */ + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); + bits &= (~(0x0F << bitindex)); + bits |= (vind << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); + + /* Determine the location of the bit for this VLAN id */ + bitindex = vlan & 0x1F; /* lower five bits */ + + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + if (vlan_on) + /* Turn on this VLAN id */ + bits |= (1 << bitindex); + else + /* Turn off this VLAN id */ + bits &= ~(1 << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vfta_82598 - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) +{ + u32 offset; + u32 vlanbyte; + + DEBUGFUNC("ixgbe_clear_vfta_82598"); + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Atlas analog register specified. + **/ +s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 atlas_ctl; + + DEBUGFUNC("ixgbe_read_analog_reg8_82598"); + + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 atlas_ctl; + + DEBUGFUNC("ixgbe_write_analog_reg8_82598"); + + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface. + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + s32 status = IXGBE_SUCCESS; + u16 sfp_addr = 0; + u16 sfp_data = 0; + u16 sfp_stat = 0; + u32 i; + + DEBUGFUNC("ixgbe_read_i2c_eeprom_82598"); + + if (hw->phy.type == ixgbe_phy_nl) { + /* + * NetLogic phy SDA/SCL registers are at addresses 0xC30A to + * 0xC30D. These registers are used to talk to the SFP+ + * module's EEPROM through the SDA/SCL (I2C) interface. + */ + sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset; + sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); + hw->phy.ops.write_reg(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + sfp_addr); + + /* Poll status */ + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &sfp_stat); + sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) + break; + msec_delay(10); + } + + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { + DEBUGOUT("EEPROM read did not pass.\n"); + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* Read data */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); + + *eeprom_data = (u8)(sfp_data >> 8); + } else { + status = IXGBE_ERR_PHY; + goto out; + } + +out: + return status; +} + +/** + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); + + hw->phy.ops.identify(hw); + + /* Copper PHY must be checked before AUTOC LMS to determine correct + * physical layer because 10GBase-T PHYs use LMS = KX4/KX */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; + else + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else /* XAUI */ + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + case IXGBE_AUTOC_LMS_KX4_AN: + case IXGBE_AUTOC_LMS_KX4_AN_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + break; + default: + break; + } + + if (hw->phy.type == ixgbe_phy_nl) { + hw->phy.ops.identify_sfp(hw); + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_da_cu: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_sfp_type_sr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case ixgbe_sfp_type_lr: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + break; + } + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + break; + case IXGBE_DEV_ID_82598EB_XF_LR: + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + break; + default: + break; + } + +out: + return physical_layer; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple + * port devices. + * @hw: pointer to the HW structure + * + * Calls common function and corrects issue with some single port devices + * that enable LAN1 but not LAN0. + **/ +void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u16 pci_gen = 0; + u16 pci_ctrl2 = 0; + + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); + + ixgbe_set_lan_id_multi_port_pcie(hw); + + /* check if LAN0 is disabled */ + hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen); + if ((pci_gen != 0) && (pci_gen != 0xFFFF)) { + + hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2); + + /* if LAN0 is completely disabled force function to 0 */ + if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) && + !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) { + + bus->func = 0; + } + } +} + +/** + * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + + DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598"); + + /* Enable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); + regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); + } + + for (i = 0; ((i < hw->mac.max_rx_queues) && + (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN | + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + +} + +/** + * ixgbe_set_rxpba_82598 - Initialize RX packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy) +{ + u32 rxpktsize = IXGBE_RXPBSIZE_64KB; + u8 i = 0; + UNREFERENCED_1PARAMETER(headroom); + + if (!num_pb) + return; + + /* Setup Rx packet buffer sizes */ + switch (strategy) { + case PBA_STRATEGY_WEIGHTED: + /* Setup the first four at 80KB */ + rxpktsize = IXGBE_RXPBSIZE_80KB; + for (; i < 4; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Setup the last four at 48KB...don't re-init i */ + rxpktsize = IXGBE_RXPBSIZE_48KB; + /* Fall Through */ + case PBA_STRATEGY_EQUAL: + default: + /* Divide the remaining Rx packet buffer evenly among the TCs */ + for (; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + } + + /* Setup Tx packet buffer sizes */ + for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); + + return; +} diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c new file mode 100644 index 0000000000..59639d4e5f --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c @@ -0,0 +1,2281 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete); +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); +u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); +static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data); +static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); + +void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); + + /* enable the laser control functions for SFP+ fiber */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { + mac->ops.disable_tx_laser = + &ixgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = + &ixgbe_enable_tx_laser_multispeed_fiber; + mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; + + } else { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + } + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ + mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; + } else { + if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && + (hw->phy.smart_speed == ixgbe_smart_speed_auto || + hw->phy.smart_speed == ixgbe_smart_speed_on) && + !ixgbe_verify_lesm_fw_enabled_82599(hw)) { + mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; + } else { + mac->ops.setup_link = &ixgbe_setup_mac_link_82599; + } + } +} + +/** + * ixgbe_init_phy_ops_82599 - PHY/SFP specific init + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be + * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_init_phy_ops_82599"); + + /* Identify the PHY or SFP module */ + ret_val = phy->ops.identify(hw); + if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto init_phy_ops_out; + + /* Setup function pointers based on detected SFP module and speeds */ + ixgbe_init_mac_link_ops_82599(hw); + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) + hw->phy.ops.reset = NULL; + + /* If copper media, overwrite with copper function pointers */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { + mac->ops.setup_link = &ixgbe_setup_copper_link_82599; + mac->ops.get_link_capabilities = + &ixgbe_get_copper_link_capabilities_generic; + } + + /* Set necessary function pointers based on phy type */ + switch (hw->phy.type) { + case ixgbe_phy_tn: + phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; + phy->ops.check_link = &ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = + &ixgbe_get_phy_firmware_version_tnx; + break; + default: + break; + } +init_phy_ops_out: + return ret_val; +} + +s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 reg_anlp1 = 0; + u32 i = 0; + u16 list_offset, data_offset, data_value; + + DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); + + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { + ixgbe_init_mac_link_ops_82599(hw); + + hw->phy.ops.reset = NULL; + + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) + goto setup_sfp_out; + + /* PHY config will finish before releasing the semaphore */ + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); + if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto setup_sfp_out; + } + + hw->eeprom.ops.read(hw, ++data_offset, &data_value); + while (data_value != 0xffff) { + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); + IXGBE_WRITE_FLUSH(hw); + hw->eeprom.ops.read(hw, ++data_offset, &data_value); + } + + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); + /* Delay obtaining semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); + + /* Now restart DSP by setting Restart_AN and clearing LMS */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, + IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | + IXGBE_AUTOC_AN_RESTART)); + + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { + msec_delay(4); + reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { + DEBUGOUT("sfp module setup not complete\n"); + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + goto setup_sfp_out; + } + + /* Restart DSP by setting Restart_AN and return to SFI mode */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, + IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | + IXGBE_AUTOC_AN_RESTART)); + } + +setup_sfp_out: + return ret_val; +} + +/** + * ixgbe_init_ops_82599 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for 82599. + * Does not touch the hardware. + **/ + +s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_82599"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + /* PHY */ + phy->ops.identify = &ixgbe_identify_phy_82599; + phy->ops.init = &ixgbe_init_phy_ops_82599; + + /* MAC */ + mac->ops.reset_hw = &ixgbe_reset_hw_82599; + mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; + mac->ops.get_media_type = &ixgbe_get_media_type_82599; + mac->ops.get_supported_physical_layer = + &ixgbe_get_supported_physical_layer_82599; + mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; + mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; + mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; + mac->ops.start_hw = &ixgbe_start_hw_82599; + mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; + mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = &ixgbe_set_vfta_generic; + mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; + mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; + mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; + mac->ops.check_link = &ixgbe_check_mac_link_generic; + mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic; + ixgbe_init_mac_link_ops_82599(hw); + + mac->mcft_size = 128; + mac->vft_size = 128; + mac->num_rar_entries = 128; + mac->rx_pb_size = 512; + mac->max_tx_queues = 128; + mac->max_rx_queues = 128; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) & + IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE; + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; + + /* EEPROM */ + eeprom->ops.read = &ixgbe_read_eeprom_82599; + eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic; + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_82599 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @negotiation: TRUE when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *negotiation) +{ + s32 status = IXGBE_SUCCESS; + u32 autoc = 0; + + DEBUGFUNC("ixgbe_get_link_capabilities_82599"); + + + /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = TRUE; + goto out; + } + + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not + * been stored, use the current register values. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; + else + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = FALSE; + break; + + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *negotiation = FALSE; + break; + + case IXGBE_AUTOC_LMS_1G_AN: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = TRUE; + break; + + case IXGBE_AUTOC_LMS_10G_SERIAL: + *speed = IXGBE_LINK_SPEED_10GB_FULL; + *negotiation = FALSE; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + *speed = IXGBE_LINK_SPEED_UNKNOWN; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = TRUE; + break; + + case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: + *speed = IXGBE_LINK_SPEED_100_FULL; + if (autoc & IXGBE_AUTOC_KR_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (autoc & IXGBE_AUTOC_KX_SUPP) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = TRUE; + break; + + case IXGBE_AUTOC_LMS_SGMII_1G_100M: + *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; + *negotiation = FALSE; + break; + + default: + status = IXGBE_ERR_LINK_SETUP; + goto out; + break; + } + + if (hw->phy.multispeed_fiber) { + *speed |= IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *negotiation = TRUE; + } + +out: + return status; +} + +/** + * ixgbe_get_media_type_82599 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) +{ + enum ixgbe_media_type media_type; + + DEBUGFUNC("ixgbe_get_media_type_82599"); + + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: + case ixgbe_phy_tn: + media_type = ixgbe_media_type_copper; + goto out; + default: + break; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_XAUI_LOM: + /* Default device ID is mezzanine card KX/KX4 */ + media_type = ixgbe_media_type_backplane; + break; + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599EN_SFP: + media_type = ixgbe_media_type_fiber; + break; + case IXGBE_DEV_ID_82599_CX4: + media_type = ixgbe_media_type_cx4; + break; + case IXGBE_DEV_ID_82599_T3_LOM: + media_type = ixgbe_media_type_copper; + break; + default: + media_type = ixgbe_media_type_unknown; + break; + } +out: + return media_type; +} + +/** + * ixgbe_start_mac_link_82599 - Setup MAC link settings + * @hw: pointer to hardware structure + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) +{ + u32 autoc_reg; + u32 links_reg; + u32 i; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_mac_link_82599"); + + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + (autoc_reg & IXGBE_AUTOC_LMS_MASK) == + IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /* Just in case Autoneg time = 0 */ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + + return status; +} + +/** + * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Disable tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + usec_delay(100); +} + +/** + * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser + * @hw: pointer to hardware structure + * + * The base drivers may require better control over SFP+ module + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + + /* Enable tx laser; allow 100ms to light up */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + msec_delay(100); +} + +/** + * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to TRUE to indicate that we need to + * initiate a new autotry session with the link partner. To do + * so, we set the speed then disable and re-enable the tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with TRUE clause 37 autoneg, which also + * involves a loss of signal. + **/ +void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) +{ + DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); + + if (hw->mac.autotry_restart) { + ixgbe_disable_tx_laser_multispeed_fiber(hw); + ixgbe_enable_tx_laser_multispeed_fiber(hw); + hw->mac.autotry_restart = FALSE; + } +} + +/** + * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; + u32 speedcnt = 0; + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + u32 i = 0; + bool link_up = FALSE; + bool negotiation; + + DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); + + /* Mask off requested but non-supported speeds */ + status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); + if (status != IXGBE_SUCCESS) + return status; + + speed &= link_speed; + + /* + * Try each speed one by one, highest priority first. We do this in + * software because 10gb fiber doesn't support speed autonegotiation. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + speedcnt++; + highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + + /* Allow module to change analog characteristics (1G->10G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_10GB_FULL, + autoneg, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted. 82599 uses the same timing for 10g SFI. + */ + for (i = 0; i < 5; i++) { + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, + &link_up, FALSE); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + speedcnt++; + if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) + highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If we already have link at this speed, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + if (status != IXGBE_SUCCESS) + return status; + + if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) + goto out; + + /* Set the module link speed */ + esdp_reg &= ~IXGBE_ESDP_SDP5; + esdp_reg |= IXGBE_ESDP_SDP5_DIR; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); + + /* Allow module to change analog characteristics (10G->1G) */ + msec_delay(40); + + status = ixgbe_setup_mac_link_82599(hw, + IXGBE_LINK_SPEED_1GB_FULL, + autoneg, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + return status; + + /* Flap the tx laser if it has not already been done */ + ixgbe_flap_tx_laser(hw); + + /* Wait for the link partner to also set speed */ + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + if (status != IXGBE_SUCCESS) + return status; + + if (link_up) + goto out; + } + + /* + * We didn't get link. Configure back to the highest speed we tried, + * (if there was more than one). We call ourselves back with just the + * single highest speed that the user requested. + */ + if (speedcnt > 1) + status = ixgbe_setup_mac_link_multispeed_fiber(hw, + highest_link_speed, autoneg, autoneg_wait_to_complete); + +out: + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + return status; +} + +/** + * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Implements the Intel SmartSpeed algorithm. + **/ +s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 i, j; + bool link_up = FALSE; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + + DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); + + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* + * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the + * autoneg advertisement if link is unable to be established at the + * highest negotiated rate. This can sometimes happen due to integrity + * issues with the physical media connection. + */ + + /* First, try to get link with full advertisement */ + hw->phy.smart_speed_active = FALSE; + for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * Wait for the controller to acquire link. Per IEEE 802.3ap, + * Section 73.10.2, we may have to wait up to 500ms if KR is + * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per + * Table 9 in the AN MAS. + */ + for (i = 0; i < 5; i++) { + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, + FALSE); + if (status != IXGBE_SUCCESS) + goto out; + + if (link_up) + goto out; + } + } + + /* + * We didn't get link. If we advertised KR plus one of KX4/KX + * (or BX4/BX), then disable KR and try again. + */ + if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || + ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) + goto out; + + /* Turn SmartSpeed on to disable KR support */ + hw->phy.smart_speed_active = TRUE; + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + autoneg_wait_to_complete); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * Wait for the controller to acquire link. 600ms will allow for + * the AN link_fail_inhibit_timer as well for multiple cycles of + * parallel detect, both 10g and 1g. This allows for the maximum + * connect attempts as defined in the AN MAS table 73-7. + */ + for (i = 0; i < 6; i++) { + msec_delay(100); + + /* If we have link, just jump out */ + status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); + if (status != IXGBE_SUCCESS) + goto out; + + if (link_up) + goto out; + } + + /* We didn't get link. Turn SmartSpeed back off. */ + hw->phy.smart_speed_active = FALSE; + status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, + autoneg_wait_to_complete); + +out: + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) + DEBUGOUT("Smartspeed has downgraded the link speed " + "from the maximum advertised\n"); + return status; +} + +/** + * ixgbe_setup_mac_link_82599 - Set MAC link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status = IXGBE_SUCCESS; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 start_autoc = autoc; + u32 orig_autoc = 0; + u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 links_reg; + u32 i; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + + DEBUGFUNC("ixgbe_setup_mac_link_82599"); + + /* Check to see if speed passed in is supported. */ + status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); + if (status != IXGBE_SUCCESS) + goto out; + + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) { + status = IXGBE_ERR_LINK_SETUP; + goto out; + } + + /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ + if (hw->mac.orig_link_settings_stored) + orig_autoc = hw->mac.orig_autoc; + else + orig_autoc = autoc; + + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + /* Set KX4/KX/KR support according to speed requested */ + autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) + autoc |= IXGBE_AUTOC_KX4_SUPP; + if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && + (hw->phy.smart_speed_active == FALSE)) + autoc |= IXGBE_AUTOC_KR_SUPP; + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + autoc |= IXGBE_AUTOC_KX_SUPP; + } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || + link_mode == IXGBE_AUTOC_LMS_1G_AN)) { + /* Switch from 1G SFI to 10G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && + (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; + } + } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && + (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { + /* Switch from 10G SFI to 1G SFI if requested */ + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && + (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; + if (autoneg) + autoc |= IXGBE_AUTOC_LMS_1G_AN; + else + autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; + } + } + + if (autoc != start_autoc) { + /* Restart link */ + autoc |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + + /* Only poll for autoneg to complete if specified to do so */ + if (autoneg_wait_to_complete) { + if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || + link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { + links_reg = 0; /*Just in case Autoneg time=0*/ + for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { + links_reg = + IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; + msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = + IXGBE_ERR_AUTONEG_NOT_COMPLETE; + DEBUGOUT("Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ + msec_delay(50); + } + +out: + return status; +} + +/** + * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * @autoneg_wait_to_complete: TRUE if waiting is needed to complete + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + s32 status; + + DEBUGFUNC("ixgbe_setup_copper_link_82599"); + + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); + /* Set up MAC */ + ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); + + return status; +} + +/** + * ixgbe_reset_hw_82599 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) +{ + ixgbe_link_speed link_speed; + s32 status; + u32 ctrl, i, autoc, autoc2; + bool link_up = FALSE; + + DEBUGFUNC("ixgbe_reset_hw_82599"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + /* PHY ops must be identified and initialized prior to reset */ + + /* Identify PHY and related function pointers */ + status = hw->phy.ops.init(hw); + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Setup SFP module if there is one present. */ + if (hw->phy.sfp_setup_needed) { + status = hw->mac.ops.setup_sfp(hw); + hw->phy.sfp_setup_needed = FALSE; + } + + if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto reset_hw_out; + + /* Reset PHY */ + if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) + hw->phy.ops.reset(hw); + +mac_reset_top: + /* + * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. + */ + ctrl = IXGBE_CTRL_LNK_RST; + if (!hw->force_full_reset) { + hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE); + if (link_up) + ctrl = IXGBE_CTRL_RST; + } + + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + + msec_delay(50); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* + * Store the original AUTOC/AUTOC2 values if they have not been + * stored off yet. Otherwise restore the stored original + * values since the reset operation sets back to defaults. + */ + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + if (hw->mac.orig_link_settings_stored == FALSE) { + hw->mac.orig_autoc = autoc; + hw->mac.orig_autoc2 = autoc2; + hw->mac.orig_link_settings_stored = TRUE; + } else { + if (autoc != hw->mac.orig_autoc) + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | + IXGBE_AUTOC_AN_RESTART)); + + if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != + (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { + autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; + autoc2 |= (hw->mac.orig_autoc2 & + IXGBE_AUTOC2_UPPER_MASK); + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); + } + } + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + +reset_hw_out: + return status; +} + +/** + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) +{ + int i; + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + + DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); + + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ + for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { + if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + IXGBE_FDIRCMD_CMD_MASK)) + break; + usec_delay(10); + } + if (i >= IXGBE_FDIRCMD_CMD_POLL) { + DEBUGOUT("Flow Director previous command isn't complete, " + "aborting table re-initialization. \n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); + IXGBE_WRITE_FLUSH(hw); + /* + * 82599 adapters flow director init flow cannot be restarted, + * Workaround 82599 silicon errata by performing the following steps + * before re-writing the FDIRCTRL control register with the same value. + * - write 1 to bit 8 of FDIRCMD register & + * - write 0 to bit 8 of FDIRCMD register + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | + IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & + ~IXGBE_FDIRCMD_CLEARHT)); + IXGBE_WRITE_FLUSH(hw); + /* + * Clear FDIR Hash register to clear any leftover hashes + * waiting to be programmed. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); + IXGBE_WRITE_FLUSH(hw); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll init-done after we write FDIRCTRL register */ + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + usec_delay(10); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { + DEBUGOUT("Flow Director Signature poll time exceeded!\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + + /* Clear FDIR statistics registers (read to clear) */ + IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); + IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + int i; + + DEBUGFUNC("ixgbe_fdir_enable_82599"); + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + DEBUGOUT("Flow Director poll time exceeded!\n"); +} + +/** + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + DEBUGFUNC("ixgbe_init_fdir_signature_82599"); + + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially + * contains just the value of the Rx packet buffer allocation + **/ +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); + + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | + IXGBE_FDIRCTRL_REPORT_STATUS | + (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | + (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + + return IXGBE_SUCCESS; +} + +/* + * These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define IXGBE_ATR_COMMON_HASH_KEY \ + (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) +#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0); + +/** + * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains + * several optomizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(input.dword); + + /* generate common hash dword */ + hi_hash_dword = IXGBE_NTOHL(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_SIG_HASH_ITERATION(1); + IXGBE_COMPUTE_SIG_HASH_ITERATION(2); + IXGBE_COMPUTE_SIG_HASH_ITERATION(3); + IXGBE_COMPUTE_SIG_HASH_ITERATION(4); + IXGBE_COMPUTE_SIG_HASH_ITERATION(5); + IXGBE_COMPUTE_SIG_HASH_ITERATION(6); + IXGBE_COMPUTE_SIG_HASH_ITERATION(7); + IXGBE_COMPUTE_SIG_HASH_ITERATION(8); + IXGBE_COMPUTE_SIG_HASH_ITERATION(9); + IXGBE_COMPUTE_SIG_HASH_ITERATION(10); + IXGBE_COMPUTE_SIG_HASH_ITERATION(11); + IXGBE_COMPUTE_SIG_HASH_ITERATION(12); + IXGBE_COMPUTE_SIG_HASH_ITERATION(13); + IXGBE_COMPUTE_SIG_HASH_ITERATION(14); + IXGBE_COMPUTE_SIG_HASH_ITERATION(15); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= IXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= IXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + return sig_hash ^ bucket_hash; +} + +/** + * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + **/ +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue) +{ + u64 fdirhashcmd; + u32 fdircmd; + + DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + */ + switch (input.formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + case IXGBE_ATR_FLOW_TYPE_TCPV6: + case IXGBE_ATR_FLOW_TYPE_UDPV6: + case IXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return IXGBE_ERR_CONFIG; + } + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhashcmd = (u64)fdircmd << 32; + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + + DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + return IXGBE_SUCCESS; +} + +#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0); + +/** + * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applys the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask) +{ + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + + /* Apply masks to input data */ + input->dword_stream[0] &= input_mask->dword_stream[0]; + input->dword_stream[1] &= input_mask->dword_stream[1]; + input->dword_stream[2] &= input_mask->dword_stream[2]; + input->dword_stream[3] &= input_mask->dword_stream[3]; + input->dword_stream[4] &= input_mask->dword_stream[4]; + input->dword_stream[5] &= input_mask->dword_stream[5]; + input->dword_stream[6] &= input_mask->dword_stream[6]; + input->dword_stream[7] &= input_mask->dword_stream[7]; + input->dword_stream[8] &= input_mask->dword_stream[8]; + input->dword_stream[9] &= input_mask->dword_stream[9]; + input->dword_stream[10] &= input_mask->dword_stream[10]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); + + /* generate common hash dword */ + hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^ + input->dword_stream[2] ^ + input->dword_stream[3] ^ + input->dword_stream[4] ^ + input->dword_stream[5] ^ + input->dword_stream[6] ^ + input->dword_stream[7] ^ + input->dword_stream[8] ^ + input->dword_stream[9] ^ + input->dword_stream[10]); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + IXGBE_COMPUTE_BKT_HASH_ITERATION(1); + IXGBE_COMPUTE_BKT_HASH_ITERATION(2); + IXGBE_COMPUTE_BKT_HASH_ITERATION(3); + IXGBE_COMPUTE_BKT_HASH_ITERATION(4); + IXGBE_COMPUTE_BKT_HASH_ITERATION(5); + IXGBE_COMPUTE_BKT_HASH_ITERATION(6); + IXGBE_COMPUTE_BKT_HASH_ITERATION(7); + IXGBE_COMPUTE_BKT_HASH_ITERATION(8); + IXGBE_COMPUTE_BKT_HASH_ITERATION(9); + IXGBE_COMPUTE_BKT_HASH_ITERATION(10); + IXGBE_COMPUTE_BKT_HASH_ITERATION(11); + IXGBE_COMPUTE_BKT_HASH_ITERATION(12); + IXGBE_COMPUTE_BKT_HASH_ITERATION(13); + IXGBE_COMPUTE_BKT_HASH_ITERATION(14); + IXGBE_COMPUTE_BKT_HASH_ITERATION(15); + + /* + * Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = bucket_hash & 0x1FFF; +} + +/** + * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) +{ + u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; + mask |= IXGBE_NTOHS(input_mask->formatted.src_port); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * These two macros are meant to address the fact that we have registers + * that are either all or in part big-endian. As a result on big-endian + * systems we will end up byte swapping the value to little-endian before + * it is byte swapped again and written to the hardware in the original + * big-endian format. + */ +#define IXGBE_STORE_AS_BE32(_value) \ + (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + +#define IXGBE_WRITE_REG_BE32(a, reg, value) \ + IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) + +#define IXGBE_STORE_AS_BE16(_value) \ + IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask) +{ + /* mask IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; + + DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) { + DEBUGOUT(" bucket hash should always be 0 in mask\n"); + } + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= IXGBE_FDIRM_POOL; + case 0x7F: + break; + default: + DEBUGOUT(" Error on vm pool mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + DEBUGOUT(" Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_L4TYPE_MASK: + break; + default: + DEBUGOUT(" Error on flow type mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: + /* mask VLAN ID, fall through to mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: + /* mask VLAN ID only, fall through */ + fdirm |= IXGBE_FDIRM_VLANID; + case 0xEFFF: + /* no VLAN fields masked */ + break; + default: + DEBUGOUT(" Error on VLAN mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes, fall through */ + fdirm |= IXGBE_FDIRM_FLEX; + case 0xFFFF: + break; + default: + DEBUGOUT(" Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + ~input_mask->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + ~input_mask->formatted.dst_ip[0]); + + return IXGBE_SUCCESS; +} + +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + + DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); + + /* currently IPv6 is not supported, must be programmed with 0 */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), + input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), + input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), + input->formatted.src_ip[2]); + + /* record the source address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + + /* record the first 32 bits of the destination address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + if (queue == IXGBE_FDIR_DROP_QUEUE) + fdircmd |= IXGBE_FDIRCMD_DROP; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); + + return IXGBE_SUCCESS; +} + +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash; + u32 fdircmd = 0; + u32 retry_count; + s32 err = IXGBE_SUCCESS; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + for (retry_count = 10; retry_count; retry_count--) { + /* allow 10us for query to process */ + usec_delay(10); + /* verify query completed successfully */ + fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + break; + } + + if (!retry_count) + err = IXGBE_ERR_FDIR_REINIT_FAILED; + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + + return err; +} + +/** + * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter + * @hw: pointer to hardware structure + * @input: input bitstream + * @input_mask: mask for the input bitstream + * @soft_id: software index for the filters + * @queue: queue index to direct traffic to + * + * Note that the caller to this function must lock before calling, since the + * hardware writes must be protected from one another. + **/ +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *input_mask, + u16 soft_id, u8 queue) +{ + s32 err = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); + + /* + * Check flow_type formatting, and bail out before we touch the hardware + * if there's a configuration issue + */ + switch (input->formatted.flow_type) { + case IXGBE_ATR_FLOW_TYPE_IPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; + if (input->formatted.dst_port || input->formatted.src_port) { + DEBUGOUT(" Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + break; + case IXGBE_ATR_FLOW_TYPE_SCTPV4: + if (input->formatted.dst_port || input->formatted.src_port) { + DEBUGOUT(" Error on src/dst port\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + break; + default: + DEBUGOUT(" Error on flow type input\n"); + return err; + } + + /* program input mask into the HW */ + err = ixgbe_fdir_set_input_mask_82599(hw, input_mask); + if (err) + return err; + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(input, input_mask); + + /* program filters to filter memory */ + return ixgbe_fdir_write_perfect_filter_82599(hw, input, + soft_id, queue); +} + +/** + * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs read operation to Omer analog register specified. + **/ +s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + u32 core_ctl; + + DEBUGFUNC("ixgbe_read_analog_reg8_82599"); + + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | + (reg << 8)); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); + *val = (u8)core_ctl; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register + * @hw: pointer to hardware structure + * @reg: atlas register to write + * @val: value to write + * + * Performs write operation to Omer analog register specified. + **/ +s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + u32 core_ctl; + + DEBUGFUNC("ixgbe_write_analog_reg8_82599"); + + core_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); + IXGBE_WRITE_FLUSH(hw); + usec_delay(10); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_82599"); + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = TRUE; + + if (ret_val == IXGBE_SUCCESS) + ret_val = ixgbe_verify_fw_version_82599(hw); +out: + return ret_val; +} + +/** + * ixgbe_identify_phy_82599 - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + + DEBUGFUNC("ixgbe_identify_phy_82599"); + + /* Detect PHY if not unknown - returns success if already detected. */ + status = ixgbe_identify_phy_generic(hw); + if (status != IXGBE_SUCCESS) { + /* 82599 10GBASE-T requires an external PHY */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) + goto out; + else + status = ixgbe_identify_sfp_module_generic(hw); + } + + /* Set PHY type none if no PHY detected */ + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.type = ixgbe_phy_none; + status = IXGBE_SUCCESS; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == ixgbe_phy_sfp_unsupported) + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + +out: + return status; +} + +/** + * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + u8 comp_codes_10g = 0; + u8 comp_codes_1g = 0; + + DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: + break; + } + + switch (autoc & IXGBE_AUTOC_LMS_MASK) { + case IXGBE_AUTOC_LMS_1G_AN: + case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: + if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | + IXGBE_PHYSICAL_LAYER_1000BASE_BX; + goto out; + } else + /* SFI mode so read SFP module */ + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: + if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; + goto out; + break; + case IXGBE_AUTOC_LMS_10G_SERIAL: + if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) + goto sfp_check; + break; + case IXGBE_AUTOC_LMS_KX4_KX_KR: + case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: + if (autoc & IXGBE_AUTOC_KX_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; + if (autoc & IXGBE_AUTOC_KX4_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; + if (autoc & IXGBE_AUTOC_KR_SUPP) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; + goto out; + break; + default: + goto out; + break; + } + +sfp_check: + /* SFP check must be done last since DA modules are sometimes used to + * test KR mode - we need to id KR mode correctly before SFP module. + * Call identify_sfp because the pluggable module may have changed */ + hw->phy.ops.identify_sfp(hw); + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + goto out; + + switch (hw->phy.type) { + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; + break; + case ixgbe_phy_sfp_ftl_active: + case ixgbe_phy_sfp_active_unknown: + physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; + break; + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); + if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; + else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) + physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; + break; + default: + break; + } + +out: + return physical_layer; +} + +/** + * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit for 82599 + **/ +s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) +{ +#define IXGBE_MAX_SECRX_POLL 30 + int i; + int secrxreg; + + DEBUGFUNC("ixgbe_enable_rx_dma_82599"); + + /* + * Workaround for 82599 silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); + if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) + break; + else + /* Use interrupt-safe sleep just in case */ + usec_delay(10); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECRX_POLL) + DEBUGOUT("Rx unit being enabled before security " + "path fully disabled. Continuing with init.\n"); + + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_verify_fw_version_82599 - verify fw version for 82599 + * @hw: pointer to hardware structure + * + * Verifies that installed the firmware version is 0.6 or higher + * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. + * + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. + **/ +static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM_VERSION; + u16 fw_offset, fw_ptp_cfg_offset; + u16 fw_version = 0; + + DEBUGFUNC("ixgbe_verify_fw_version_82599"); + + /* firmware check is only necessary for SFI devices */ + if (hw->phy.media_type != ixgbe_media_type_fiber) { + status = IXGBE_SUCCESS; + goto fw_version_out; + } + + /* get the offset to the Firmware Module block */ + hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if ((fw_offset == 0) || (fw_offset == 0xFFFF)) + goto fw_version_out; + + /* get the offset to the Pass Through Patch Configuration block */ + hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), + &fw_ptp_cfg_offset); + + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) + goto fw_version_out; + + /* get the firmware version */ + hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + + IXGBE_FW_PATCH_VERSION_4), + &fw_version); + + if (fw_version > 0x5) + status = IXGBE_SUCCESS; + +fw_version_out: + return status; +} + +/** + * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. + * @hw: pointer to hardware structure + * + * Returns TRUE if the LESM FW module is present and enabled. Otherwise + * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled. + **/ +bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) +{ + bool lesm_enabled = FALSE; + u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; + s32 status; + + DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); + + /* get the offset to the Firmware Module block */ + status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + + if ((status != IXGBE_SUCCESS) || + (fw_offset == 0) || (fw_offset == 0xFFFF)) + goto out; + + /* get the offset to the LESM Parameters block */ + status = hw->eeprom.ops.read(hw, (fw_offset + + IXGBE_FW_LESM_PARAMETERS_PTR), + &fw_lesm_param_offset); + + if ((status != IXGBE_SUCCESS) || + (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) + goto out; + + /* get the lesm state word */ + status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + + IXGBE_FW_LESM_STATE_1), + &fw_lesm_state); + + if ((status == IXGBE_SUCCESS) && + (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + lesm_enabled = TRUE; + +out: + return lesm_enabled; +} + +/** + * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Retrieves 16 bit word(s) read from EEPROM + **/ +static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, + data); + else + ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, + words, + data); + + return ret_val; +} + +/** + * ixgbe_read_eeprom_82599 - Read EEPROM word using + * fastest available method + * + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM + **/ +static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + + DEBUGFUNC("ixgbe_read_eeprom_82599"); + + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang + */ + if ((eeprom->type == ixgbe_eeprom_spi) && + (offset <= IXGBE_EERD_MAX_ADDR)) + ret_val = ixgbe_read_eerd_generic(hw, offset, data); + else + ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); + + return ret_val; +} diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c new file mode 100644 index 0000000000..cdee623850 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c @@ -0,0 +1,1130 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "ixgbe_api.h" +#include "ixgbe_common.h" + +extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); +extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw); + +/** + * ixgbe_init_shared_code - Initialize the shared code + * @hw: pointer to hardware structure + * + * This will assign function pointers and assign the MAC type and PHY code. + * Does not touch the hardware. This function must be called prior to any + * other function in the shared code. The ixgbe_hw structure should be + * memset to 0 prior to calling this function. The following fields in + * hw structure should be filled in prior to calling this function: + * hw_addr, back, device_id, vendor_id, subsystem_device_id, + * subsystem_vendor_id, and revision_id + **/ +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_init_shared_code"); + + /* + * Set the mac type + */ + ixgbe_set_mac_type(hw); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + status = ixgbe_init_ops_82598(hw); + break; + case ixgbe_mac_82599EB: + status = ixgbe_init_ops_82599(hw); + break; + case ixgbe_mac_82599_vf: + case ixgbe_mac_X540_vf: + status = ixgbe_init_ops_vf(hw); + break; + case ixgbe_mac_X540: + status = ixgbe_init_ops_X540(hw); + break; + default: + status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * ixgbe_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * vendor ID and device ID stored in the hw structure. + **/ +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_mac_type\n"); + + if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) { + switch (hw->device_id) { + case IXGBE_DEV_ID_82598: + case IXGBE_DEV_ID_82598_BX: + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: + case IXGBE_DEV_ID_82598AF_DUAL_PORT: + case IXGBE_DEV_ID_82598AT: + case IXGBE_DEV_ID_82598AT2: + case IXGBE_DEV_ID_82598EB_CX4: + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: + case IXGBE_DEV_ID_82598_DA_DUAL_PORT: + case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: + case IXGBE_DEV_ID_82598EB_XF_LR: + case IXGBE_DEV_ID_82598EB_SFP_LOM: + hw->mac.type = ixgbe_mac_82598EB; + break; + case IXGBE_DEV_ID_82599_KX4: + case IXGBE_DEV_ID_82599_KX4_MEZZ: + case IXGBE_DEV_ID_82599_XAUI_LOM: + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + case IXGBE_DEV_ID_82599_KR: + case IXGBE_DEV_ID_82599_SFP: + case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599EN_SFP: + case IXGBE_DEV_ID_82599_CX4: + case IXGBE_DEV_ID_82599_T3_LOM: + hw->mac.type = ixgbe_mac_82599EB; + break; + case IXGBE_DEV_ID_82599_VF: + hw->mac.type = ixgbe_mac_82599_vf; + break; + case IXGBE_DEV_ID_X540_VF: + hw->mac.type = ixgbe_mac_X540_vf; + break; + case IXGBE_DEV_ID_X540T: + hw->mac.type = ixgbe_mac_X540; + break; + default: + ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + break; + } + } else { + ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; + } + + DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", + hw->mac.type, ret_val); + return ret_val; +} + +/** + * ixgbe_init_hw - Initialize the hardware + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting and then starting the hardware + **/ +s32 ixgbe_init_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_reset_hw - Performs a hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks and + * clears all interrupts, performs a PHY reset, and performs a MAC reset + **/ +s32 ixgbe_reset_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_start_hw - Prepares hardware for Rx/Tx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, + * clears all on chip counters, initializes receive address registers, + * multicast table, VLAN filter table, calls routine to setup link and + * flow control settings, and leaves transmit and receive units disabled + * and uninitialized. + **/ +s32 ixgbe_start_hw(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering, + * which is disabled by default in ixgbe_start_hw(); + * + * @hw: pointer to hardware structure + * + * Enable relaxed ordering; + **/ +void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_relaxed_ordering) + hw->mac.ops.enable_relaxed_ordering(hw); +} + +/** + * ixgbe_clear_hw_cntrs - Clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_media_type - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), + ixgbe_media_type_unknown); +} + +/** + * ixgbe_get_mac_addr - Get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from the first Receive Address Register + * (RAR0) A reset of the adapter must have been performed prior to calling + * this function in order for the MAC address to have been loaded from the + * EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, + (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_san_mac_addr - Get SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + **/ +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_san_mac_addr - Write a SAN MAC address + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Writes A SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, + (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_device_caps - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word for device capabilities + * + * Reads the extra device capabilities from the EEPROM + **/ +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, + (hw, device_caps), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, + (hw, wwnn_prefix, wwpn_prefix), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, + (hw, bs), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_bus_info - Set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_of_tx_queues - Get Tx queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_tx_queues; +} + +/** + * ixgbe_get_num_of_rx_queues - Get Rx queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) +{ + return hw->mac.max_rx_queues; +} + +/** + * ixgbe_stop_adapter - Disable Rx/Tx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_pba_string - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * ixgbe_read_pba_num - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num) +{ + return ixgbe_read_pba_num_generic(hw, pba_num); +} + +/** + * ixgbe_identify_phy - Get PHY type + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw), + IXGBE_NOT_IMPLEMENTED); + } + + return status; +} + +/** + * ixgbe_reset_phy - Perform a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + + if (hw->phy.type == ixgbe_phy_unknown) { + if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) + status = IXGBE_ERR_PHY; + } + + if (status == IXGBE_SUCCESS) { + status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), + IXGBE_NOT_IMPLEMENTED); + } + return status; +} + +/** + * ixgbe_get_phy_firmware_version - + * @hw: pointer to hardware structure + * @firmware_version: pointer to firmware version + **/ +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, + (hw, firmware_version), + IXGBE_NOT_IMPLEMENTED); + return status; +} + +/** + * ixgbe_read_phy_reg - Read PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register + **/ +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_phy_reg - Write PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register + **/ +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data) +{ + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + + return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, + device_type, phy_data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link - Restart PHY autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_check_phy_link - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads a PHY register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, + link_up), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_setup_phy_link_speed - Set auto advertise + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * + * Sets the auto advertised capabilities + **/ +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, + autoneg, autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_check_link - Get link and speed status + * @hw: pointer to hardware structure + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, + link_up, link_up_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_tx_laser - Disable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to disable the laser on SFI optics. + **/ +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); +} + +/** + * ixgbe_enable_tx_laser - Enable Tx laser + * @hw: pointer to hardware structure + * + * If the driver needs to enable the laser on SFI optics. + **/ +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); +} + +/** + * ixgbe_flap_tx_laser - flap Tx laser to start autotry process + * @hw: pointer to hardware structure + * + * When the driver changes the link speeds that it can support then + * flap the tx laser to alert the link partner to start autotry + * process on its end. + **/ +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.flap_tx_laser) + hw->mac.ops.flap_tx_laser(hw); +} + +/** + * ixgbe_setup_link - Set link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * + * Configures link settings. Restarts the link. + * Performs autonegotiation if needed. + **/ +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, + autoneg, autoneg_wait_to_complete), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_link_capabilities - Returns link capabilities + * @hw: pointer to hardware structure + * + * Determines the link capabilities of the current configuration. + **/ +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, + speed, autoneg), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_on - Turn on LEDs + * @hw: pointer to hardware structure + * @index: led number to turn on + * + * Turns on the software controllable LEDs. + **/ +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_led_off - Turn off LEDs + * @hw: pointer to hardware structure + * @index: led number to turn off + * + * Turns off the software controllable LEDs. + **/ +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_start - Blink LEDs + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Blink LED based on index. + **/ +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_blink_led_stop - Stop blinking LEDs + * @hw: pointer to hardware structure + * + * Stop blinking LED based on index. + **/ +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_eeprom_params - Initialize EEPROM parameters + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), + IXGBE_NOT_IMPLEMENTED); +} + + +/** + * ixgbe_write_eeprom - Write word to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word(s) to be written to the EEPROM + * @words: number of words + * + * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not + * called after this function, the EEPROM will most likely contain an + * invalid checksum. + **/ +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom - Read word from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM + **/ +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit word(s) from EEPROM + * @words: number of words + * + * Reads 16 bit word(s) from EEPROM + **/ +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer, + (hw, offset, words, data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum + **/ +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, + (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_insert_mac_addr - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, + (hw, addr, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_rar - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, + enable_addr), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_rar - Clear Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vmdq - Associate a VMDq index with a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to associate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address + * @hw: pointer to hardware structure + * @rar: receive address register index to disassociate with VMDq index + * @vmdq: VMDq set or pool index + **/ +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_rx_addrs - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. + * @hw: pointer to hardware structure + **/ +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) +{ + return hw->mac.num_rar_entries; +} + +/** + * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new multicast addresses + * @addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + **/ +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, + addr_list, addr_count, func), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @func: iterator function to walk the multicast address list + * + * The given list replaces any existing list. Clears the MC addrs from receive + * address registers and the multicast table. Uses unused receive address + * registers for the first multicast addresses, and hashes the rest into the + * multicast table. + **/ +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear) +{ + return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, + mc_addr_list, mc_addr_count, func, clear), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_enable_mc - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_disable_mc - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_clear_vfta - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_vfta - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, + vlan_on), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_fc_enable - Enable flow control + * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) + * + * Configures the flow control settings based on SW configuration. + **/ +s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw, packetbuf_num), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_set_fw_drv_ver - Try to send the driver version number FW + * @hw: pointer to hardware structure + * @maj: driver major number to be sent to firmware + * @min: driver minor number to be sent to firmware + * @build: driver build number to be sent to firmware + * @ver: driver version number to be sent to firmware + **/ +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver) +{ + return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min, + build, ver), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_analog_reg8 - Reads 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to read + * @val: read value + * + * Performs write operation to analog register specified. + **/ +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) +{ + return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_analog_reg8 - Writes 8 bit analog register + * @hw: pointer to hardware structure + * @reg: analog register to write + * @val: value to write + * + * Performs write operation to Atlas analog register specified. + **/ +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) +{ + return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, + val), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. + * @hw: pointer to hardware structure + * + * Initializes the Unicast Table Arrays to zero on device load. This + * is part of the Rx init addr execution path. + **/ +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_byte - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface + * at a specified device address. + **/ +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, + dev_addr, data), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, + u8 byte_offset, u8 eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) +{ + return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, + (hw, byte_offset, eeprom_data), + IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_get_supported_physical_layer - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) +{ + return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, + (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); +} + +/** + * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependant on device specifics + * @hw: pointer to hardware structure + * @regval: bitfield to write to the Rx DMA register + * + * Enables the Rx DMA unit of the device. + **/ +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) +{ + return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, + (hw, regval), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask) +{ + return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, + (hw, mask), IXGBE_NOT_IMPLEMENTED); +} + +/** + * ixgbe_release_swfw_semaphore - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through SW_FW_SYNC register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask) +{ + if (hw->mac.ops.release_swfw_sync) + hw->mac.ops.release_swfw_sync(hw, mask); +} + diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h new file mode 100644 index 0000000000..c41dd36cf1 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h @@ -0,0 +1,168 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_API_H_ +#define _IXGBE_API_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); + +s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); +s32 ixgbe_init_hw(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw(struct ixgbe_hw *hw); +s32 ixgbe_start_hw(struct ixgbe_hw *hw); +void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); +enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num); +s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); + +s32 ixgbe_identify_phy(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 *phy_data); +s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + u16 phy_data); + +s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); +s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); +void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); +s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg, bool autoneg_wait_to_complete); +s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); +s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *autoneg); +s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); + +s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); + +s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); +u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); +s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr func, + bool clear); +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq); +s32 ixgbe_enable_mc(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc(struct ixgbe_hw *hw); +s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); + +s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num); +s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, + u8 ver); +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); +s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); +s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); +u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval); +s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common, + u8 queue); +s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input_mask); +s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id, u8 queue); +s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + u16 soft_id); +s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, + union ixgbe_atr_input *mask, + u16 soft_id, + u8 queue); +void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + union ixgbe_atr_input *mask); +u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + union ixgbe_atr_hash_dword common); +s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 *data); +s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, + u8 data); +s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); +s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); +s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask); +void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); +s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); + + +#endif /* _IXGBE_API_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c new file mode 100644 index 0000000000..e612f6a8b8 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c @@ -0,0 +1,4049 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "ixgbe_common.h" +#include "ixgbe_phy.h" +#include "ixgbe_api.h" + +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count); +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +static void ixgbe_release_eeprom(struct ixgbe_hw *hw); + +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset); +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw); +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw); +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw); +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset); + + +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan); + +/** + * ixgbe_init_ops_generic - Inits function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + struct ixgbe_mac_info *mac = &hw->mac; + u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + DEBUGFUNC("ixgbe_init_ops_generic"); + + /* EEPROM */ + eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic; + /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ + if (eec & IXGBE_EEC_PRES) { + eeprom->ops.read = &ixgbe_read_eerd_generic; + eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic; + } else { + eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic; + eeprom->ops.read_buffer = + &ixgbe_read_eeprom_buffer_bit_bang_generic; + } + eeprom->ops.write = &ixgbe_write_eeprom_generic; + eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic; + eeprom->ops.validate_checksum = + &ixgbe_validate_eeprom_checksum_generic; + eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic; + eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic; + + /* MAC */ + mac->ops.init_hw = &ixgbe_init_hw_generic; + mac->ops.reset_hw = NULL; + mac->ops.start_hw = &ixgbe_start_hw_generic; + mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic; + mac->ops.get_media_type = NULL; + mac->ops.get_supported_physical_layer = NULL; + mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic; + mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic; + mac->ops.stop_adapter = &ixgbe_stop_adapter_generic; + mac->ops.get_bus_info = &ixgbe_get_bus_info_generic; + mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie; + mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync; + mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync; + + /* LEDs */ + mac->ops.led_on = &ixgbe_led_on_generic; + mac->ops.led_off = &ixgbe_led_off_generic; + mac->ops.blink_led_start = &ixgbe_blink_led_start_generic; + mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic; + + /* RAR, Multicast, VLAN */ + mac->ops.set_rar = &ixgbe_set_rar_generic; + mac->ops.clear_rar = &ixgbe_clear_rar_generic; + mac->ops.insert_mac_addr = NULL; + mac->ops.set_vmdq = NULL; + mac->ops.clear_vmdq = NULL; + mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic; + mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic; + mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic; + mac->ops.enable_mc = &ixgbe_enable_mc_generic; + mac->ops.disable_mc = &ixgbe_disable_mc_generic; + mac->ops.clear_vfta = NULL; + mac->ops.set_vfta = NULL; + mac->ops.init_uta_tables = NULL; + + /* Flow Control */ + mac->ops.fc_enable = &ixgbe_fc_enable_generic; + + /* Link */ + mac->ops.get_link_capabilities = NULL; + mac->ops.setup_link = NULL; + mac->ops.check_link = NULL; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("ixgbe_start_hw_generic"); + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + + /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); + + /* Clear statistics registers */ + hw->mac.ops.clear_hw_cntrs(hw); + + /* Set No Snoop Disable */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + IXGBE_WRITE_FLUSH(hw); + + /* Setup flow control */ + ixgbe_setup_fc(hw, 0); + + /* Clear adapter stopped flag */ + hw->adapter_stopped = FALSE; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_start_hw_gen2 - Init sequence for common device family + * @hw: pointer to hw structure + * + * Performs the init sequence common to the second generation + * of 10 GbE devices. + * Devices in the second generation: + * 82599 + * X540 + **/ +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) +{ + u32 i; + u32 regval; + + /* Clear the rate limiters */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i); + IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); + } + IXGBE_WRITE_FLUSH(hw); + + /* Disable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN | + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_hw_generic - Generic hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware, filling the bus info + * structure and media type, clears all on chip counters, initializes receive + * address registers, multicast table, VLAN filter table, calls routine to set + * up link and flow control settings, and leaves transmit and receive units + * disabled and uninitialized + **/ +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) +{ + s32 status; + + DEBUGFUNC("ixgbe_init_hw_generic"); + + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + + if (status == IXGBE_SUCCESS) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } + + return status; +} + +/** + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters + * @hw: pointer to hardware structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) +{ + u16 i = 0; + + DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); + + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); + IXGBE_READ_REG(hw, IXGBE_MSPDC); + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_MPC(i)); + + IXGBE_READ_REG(hw, IXGBE_MLFC); + IXGBE_READ_REG(hw, IXGBE_MRFC); + IXGBE_READ_REG(hw, IXGBE_RLEC); + IXGBE_READ_REG(hw, IXGBE_LXONTXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + IXGBE_READ_REG(hw, IXGBE_LXONRXC); + IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + } + + for (i = 0; i < 8; i++) { + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + } + } + if (hw->mac.type >= ixgbe_mac_82599EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + IXGBE_READ_REG(hw, IXGBE_PRC64); + IXGBE_READ_REG(hw, IXGBE_PRC127); + IXGBE_READ_REG(hw, IXGBE_PRC255); + IXGBE_READ_REG(hw, IXGBE_PRC511); + IXGBE_READ_REG(hw, IXGBE_PRC1023); + IXGBE_READ_REG(hw, IXGBE_PRC1522); + IXGBE_READ_REG(hw, IXGBE_GPRC); + IXGBE_READ_REG(hw, IXGBE_BPRC); + IXGBE_READ_REG(hw, IXGBE_MPRC); + IXGBE_READ_REG(hw, IXGBE_GPTC); + IXGBE_READ_REG(hw, IXGBE_GORCL); + IXGBE_READ_REG(hw, IXGBE_GORCH); + IXGBE_READ_REG(hw, IXGBE_GOTCL); + IXGBE_READ_REG(hw, IXGBE_GOTCH); + if (hw->mac.type == ixgbe_mac_82598EB) + for (i = 0; i < 8; i++) + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + IXGBE_READ_REG(hw, IXGBE_RUC); + IXGBE_READ_REG(hw, IXGBE_RFC); + IXGBE_READ_REG(hw, IXGBE_ROC); + IXGBE_READ_REG(hw, IXGBE_RJC); + IXGBE_READ_REG(hw, IXGBE_MNGPRC); + IXGBE_READ_REG(hw, IXGBE_MNGPDC); + IXGBE_READ_REG(hw, IXGBE_MNGPTC); + IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); + IXGBE_READ_REG(hw, IXGBE_TPR); + IXGBE_READ_REG(hw, IXGBE_TPT); + IXGBE_READ_REG(hw, IXGBE_PTC64); + IXGBE_READ_REG(hw, IXGBE_PTC127); + IXGBE_READ_REG(hw, IXGBE_PTC255); + IXGBE_READ_REG(hw, IXGBE_PTC511); + IXGBE_READ_REG(hw, IXGBE_PTC1023); + IXGBE_READ_REG(hw, IXGBE_PTC1522); + IXGBE_READ_REG(hw, IXGBE_MPTC); + IXGBE_READ_REG(hw, IXGBE_BPTC); + for (i = 0; i < 16; i++) { + IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + if (hw->mac.type >= ixgbe_mac_82599EB) { + IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); + IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + } else { + IXGBE_READ_REG(hw, IXGBE_QBRC(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC(i)); + } + } + + if (hw->mac.type == ixgbe_mac_X540) { + if (hw->phy.id == 0) + ixgbe_identify_phy(hw); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i); + hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pba_string_generic - Reads part number string from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number string from the EEPROM + * @pba_num_size: part number string buffer length + * + * Reads the part number string from the EEPROM. + **/ +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("ixgbe_read_pba_string_generic"); + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* + * if data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (data != IXGBE_PBANUM_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { + DEBUGOUT("PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (data >> 12) & 0xF; + pba_num[1] = (data >> 8) & 0xF; + pba_num[2] = (data >> 4) & 0xF; + pba_num[3] = data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return IXGBE_SUCCESS; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return IXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); + pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_pba_num_generic - Reads part number from EEPROM + * @hw: pointer to hardware structure + * @pba_num: stores the part number from the EEPROM + * + * Reads the part number from the EEPROM. + **/ +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) +{ + s32 ret_val; + u16 data; + + DEBUGFUNC("ixgbe_read_pba_num_generic"); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } else if (data == IXGBE_PBANUM_PTR_GUARD) { + DEBUGOUT("NVM Not supported\n"); + return IXGBE_NOT_IMPLEMENTED; + } + *pba_num = (u32)(data << 16); + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + *pba_num |= data; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_mac_addr_generic - Generic get MAC address + * @hw: pointer to hardware structure + * @mac_addr: Adapter MAC address + * + * Reads the adapter's MAC address from first Receive Address Register (RAR0) + * A reset of the adapter must be performed prior to calling this function + * in order for the MAC address to have been loaded from the EEPROM into RAR0 + **/ +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + DEBUGFUNC("ixgbe_get_mac_addr_generic"); + + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + + for (i = 0; i < 4; i++) + mac_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * + * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure + **/ +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u16 link_status; + + DEBUGFUNC("ixgbe_get_bus_info_generic"); + + hw->bus.type = ixgbe_bus_type_pci_express; + + /* Get the negotiated link width and speed from PCI config space */ + link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); + + switch (link_status & IXGBE_PCI_LINK_WIDTH) { + case IXGBE_PCI_LINK_WIDTH_1: + hw->bus.width = ixgbe_bus_width_pcie_x1; + break; + case IXGBE_PCI_LINK_WIDTH_2: + hw->bus.width = ixgbe_bus_width_pcie_x2; + break; + case IXGBE_PCI_LINK_WIDTH_4: + hw->bus.width = ixgbe_bus_width_pcie_x4; + break; + case IXGBE_PCI_LINK_WIDTH_8: + hw->bus.width = ixgbe_bus_width_pcie_x8; + break; + default: + hw->bus.width = ixgbe_bus_width_unknown; + break; + } + + switch (link_status & IXGBE_PCI_LINK_SPEED) { + case IXGBE_PCI_LINK_SPEED_2500: + hw->bus.speed = ixgbe_bus_speed_2500; + break; + case IXGBE_PCI_LINK_SPEED_5000: + hw->bus.speed = ixgbe_bus_speed_5000; + break; + default: + hw->bus.speed = ixgbe_bus_speed_unknown; + break; + } + + mac->ops.set_lan_id(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) +{ + struct ixgbe_bus_info *bus = &hw->bus; + u32 reg; + + DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); + + reg = IXGBE_READ_REG(hw, IXGBE_STATUS); + bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; + bus->lan_id = bus->func; + + /* check for a port swap */ + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); + if (reg & IXGBE_FACTPS_LFS) + bus->func ^= 0x1; +} + +/** + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + DEBUGFUNC("ixgbe_stop_adapter_generic"); + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = TRUE; + + /* Disable the receive unit */ + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); + + /* Clear interrupt mask to stop interrupts from being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_READ_REG(hw, IXGBE_EICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + reg_val |= IXGBE_RXDCTL_SWFLSH; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); + } + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(2); + + /* + * Prevent the PCI-E bus from from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return ixgbe_disable_pcie_master(hw); +} + +/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on + **/ +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + DEBUGFUNC("ixgbe_led_on_generic"); + + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_led_off_generic - Turns off the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn off + **/ +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + DEBUGFUNC("ixgbe_led_off_generic"); + + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_generic"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + /* Set default semaphore delay to 10ms which is a well + * tested value */ + eeprom->semaphore_delay = 10; + /* Clear EEPROM page size, it will be initialized as needed */ + eeprom->word_page_size = 0; + + /* + * Check for EEPROM present first. + * If not present leave as none + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + + /* + * SPI EEPROM is assumed here. This code would need to + * change if a future EEPROM is not SPI. + */ + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; + DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " + "%d\n", eeprom->type, eeprom->word_size, + eeprom->address_bits); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to write + * @words: number of word(s) + * @data: 16 bit word(s) to write to EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u16 i, count; + + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* + * The EEPROM page size cannot be queried from the chip. We do lazy + * initialization. It is worth to do that when we write large buffer. + */ + if ((hw->eeprom.word_page_size == 0) && + (words > IXGBE_EEPROM_PAGE_SIZE_MAX)) + ixgbe_detect_eeprom_page_size_generic(hw, offset); + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != IXGBE_SUCCESS) + break; + } + +out: + return status; +} + +/** + * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @words: number of word(s) + * @data: 16 bit word(s) to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word; + u16 page_size; + u16 i; + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + + DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); + + /* Prepare the EEPROM for writing */ + status = ixgbe_acquire_eeprom(hw); + + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + ixgbe_shift_out_eeprom_bits(hw, + IXGBE_EEPROM_WREN_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + + ixgbe_standby_eeprom(hw); + + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, write_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + page_size = hw->eeprom.word_page_size; + + /* Send the data in burst via SPI*/ + do { + word = data[i]; + word = (word >> 8) | (word << 8); + ixgbe_shift_out_eeprom_bits(hw, word, 16); + + if (page_size == 0) + break; + + /* do not wrap around page */ + if (((offset + i) & (page_size - 1)) == + (page_size - 1)) + break; + } while (++i < words); + + ixgbe_standby_eeprom(hw); + msec_delay(10); + } + /* Done with writing - release the EEPROM */ + ixgbe_release_eeprom(hw); + } + + return status; +} + +/** + * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be written to + * @data: 16 bit word to be written to the EEPROM + * + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status; + + DEBUGFUNC("ixgbe_write_eeprom_generic"); + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data); + +out: + return status; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit words(s) from EEPROM + * @words: number of word(s) + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + u16 i, count; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset + words > hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + /* + * We cannot hold synchronization semaphores for too long + * to avoid other entity starvation. However it is more efficient + * to read in bursts than synchronizing access for each word. + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? + IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + + if (status != IXGBE_SUCCESS) + break; + } + +out: + return status; +} + +/** + * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @words: number of word(s) + * @data: read 16 bit word(s) from EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + s32 status; + u16 word_in; + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + u16 i; + + DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); + + /* Prepare the EEPROM for reading */ + status = ixgbe_acquire_eeprom(hw); + + if (status == IXGBE_SUCCESS) { + if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + + if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* + * Some SPI eeproms use the 8th address bit embedded + * in the opcode + */ + if ((hw->eeprom.address_bits == 8) && + ((offset + i) >= 128)) + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + ixgbe_shift_out_eeprom_bits(hw, read_opcode, + IXGBE_EEPROM_OPCODE_BITS); + ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2), + hw->eeprom.address_bits); + + /* Read the data. */ + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + + /* End this read operation */ + ixgbe_release_eeprom(hw); + } + + return status; +} + +/** + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read + * @data: read 16 bit value from EEPROM + * + * Reads 16 bit value from EEPROM through bit-bang method + **/ +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data) +{ + s32 status; + + DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); + + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + +out: + return status; +} + +/** + * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of word(s) + * @data: 16 bit word(s) from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eerd; + s32 status = IXGBE_SUCCESS; + u32 i; + + DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) + + IXGBE_EEPROM_RW_REG_START; + + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); + + if (status == IXGBE_SUCCESS) { + data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_RW_REG_DATA); + } else { + DEBUGOUT("Eeprom read timed out\n"); + goto out; + } + } +out: + return status; +} + +/** + * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be used as a scratch pad + * + * Discover EEPROM page size by writing marching data at given offset. + * This function is called only when we are writing a new large buffer + * at given offset so the data would be overwritten anyway. + **/ +static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset) +{ + u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; + s32 status = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); + + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) + data[i] = i; + + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX; + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, + IXGBE_EEPROM_PAGE_SIZE_MAX, data); + hw->eeprom.word_page_size = 0; + if (status != IXGBE_SUCCESS) + goto out; + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); + if (status != IXGBE_SUCCESS) + goto out; + + /* + * When writing in burst more than the actual page size + * EEPROM address wraps around current page. + */ + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; + + DEBUGOUT1("Detected EEPROM page size = %d words.", + hw->eeprom.word_page_size); +out: + return status; +} + +/** + * ixgbe_read_eerd_generic - Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data); +} + +/** + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of word(s) + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) +{ + u32 eewr; + s32 status = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_write_eewr_generic"); + + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | + (data[i] << IXGBE_EEPROM_RW_REG_DATA) | + IXGBE_EEPROM_RW_REG_START; + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; + } + + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; + } + } + +out: + return status; +} + +/** + * ixgbe_write_eewr_generic - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data); +} + +/** + * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status + * @hw: pointer to hardware structure + * @ee_reg: EEPROM flag for polling + * + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the + * read or write is done respectively. + **/ +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); + + for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { + if (ee_reg == IXGBE_NVM_POLL_READ) + reg = IXGBE_READ_REG(hw, IXGBE_EERD); + else + reg = IXGBE_READ_REG(hw, IXGBE_EEWR); + + if (reg & IXGBE_EEPROM_RW_REG_DONE) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(5); + } + return status; +} + +/** + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang + * @hw: pointer to hardware structure + * + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. + **/ +static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 eec; + u32 i; + + DEBUGFUNC("ixgbe_acquire_eeprom"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) + != IXGBE_SUCCESS) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == IXGBE_SUCCESS) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + if (eec & IXGBE_EEC_GNT) + break; + usec_delay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + DEBUGOUT("Could not acquire EEPROM grant\n"); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ + if (status == IXGBE_SUCCESS) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); + } + } + return status; +} + +/** + * ixgbe_get_eeprom_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + DEBUGFUNC("ixgbe_get_eeprom_semaphore"); + + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(50); + } + + if (i == timeout) { + DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " + "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there + * was a timeout, we should unconditionally clear the semaphore + * bits to free the driver to make progress + */ + ixgbe_release_eeprom_semaphore(hw); + + usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) + status = IXGBE_SUCCESS; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + /* + * If we set the bit successfully then we got the + * semaphore. + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + /* + * Release semaphores and return error if SW EEPROM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + DEBUGOUT("SWESMBI Software EEPROM semaphore " + "not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + DEBUGOUT("Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_eeprom_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("ixgbe_release_eeprom_semaphore"); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ + swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 i; + u8 spi_stat_reg; + + DEBUGFUNC("ixgbe_ready_eeprom"); + + /* + * Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, + IXGBE_EEPROM_OPCODE_BITS); + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + ixgbe_standby_eeprom(hw); + }; + + /* + * On some parts, SPI write time could vary from 0-20mSec on 3.3V + * devices (and only 0-5mSec on 5V devices) + */ + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { + DEBUGOUT("SPI EEPROM Status error\n"); + status = IXGBE_ERR_EEPROM; + } + + return status; +} + +/** + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure + **/ +static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + DEBUGFUNC("ixgbe_standby_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* Toggle CS to flush commands */ + eec |= IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); + eec &= ~IXGBE_EEC_CS; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. + * @hw: pointer to hardware structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + **/ +static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count) +{ + u32 eec; + u32 mask; + u32 i; + + DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + /* + * Mask is used to shift "count" bits of "data" out to the EEPROM + * one bit at a time. Determine the starting bit based on count + */ + mask = 0x01 << (count - 1); + + for (i = 0; i < count; i++) { + /* + * A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK + * bit controls the clock input to the EEPROM). A "0" is + * shifted out to the EEPROM by setting "DI" to "0" and then + * raising and then lowering the clock. + */ + if (data & mask) + eec |= IXGBE_EEC_DI; + else + eec &= ~IXGBE_EEC_DI; + + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + + usec_delay(1); + + ixgbe_raise_eeprom_clk(hw, &eec); + ixgbe_lower_eeprom_clk(hw, &eec); + + /* + * Shift mask to signify next bit of data to shift in to the + * EEPROM + */ + mask = mask >> 1; + }; + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eec &= ~IXGBE_EEC_DI; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + **/ +static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) +{ + u32 eec; + u32 i; + u16 data = 0; + + DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); + + /* + * In order to read a register from the EEPROM, we need to shift + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising + * the clock input to the EEPROM (setting the SK bit), and then reading + * the value of the "DO" bit. During this "shifting in" process the + * "DI" bit should always be clear. + */ + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + + for (i = 0; i < count; i++) { + data = data << 1; + ixgbe_raise_eeprom_clk(hw, &eec); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec &= ~(IXGBE_EEC_DI); + if (eec & IXGBE_EEC_DO) + data |= 1; + + ixgbe_lower_eeprom_clk(hw, &eec); + } + + return data; +} + +/** + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eec: EEC register's current value + **/ +static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + DEBUGFUNC("ixgbe_raise_eeprom_clk"); + + /* + * Raise the clock input to the EEPROM + * (setting the SK bit), then delay + */ + *eec = *eec | IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. + * @hw: pointer to hardware structure + * @eecd: EECD's current value + **/ +static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) +{ + DEBUGFUNC("ixgbe_lower_eeprom_clk"); + + /* + * Lower the clock input to the EEPROM (clearing the SK bit), then + * delay + */ + *eec = *eec & ~IXGBE_EEC_SK; + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_FLUSH(hw); + usec_delay(1); +} + +/** + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure + **/ +static void ixgbe_release_eeprom(struct ixgbe_hw *hw) +{ + u32 eec; + + DEBUGFUNC("ixgbe_release_eeprom"); + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + + eec |= IXGBE_EEC_CS; /* Pull CS high */ + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_FLUSH(hw); + + usec_delay(1); + + /* Stop requesting EEPROM access */ + eec &= ~IXGBE_EEC_REQ; + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + /* Delay before attempt to obtain semaphore again to allow FW access */ + msec_delay(hw->eeprom.semaphore_delay); +} + +/** + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + * @hw: pointer to hardware structure + **/ +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + hw->eeprom.ops.read(hw, i, &pointer); + + /* Make sure the pointer seems valid */ + if (pointer != 0xFFFF && pointer != 0) { + hw->eeprom.ops.read(hw, pointer, &length); + + if (length != 0xFFFF && length != 0) { + for (j = pointer+1; j <= pointer+length; j++) { + hw->eeprom.ops.read(hw, j, &word); + checksum += word; + } + } + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status == IXGBE_SUCCESS) { + checksum = hw->eeprom.ops.calc_checksum(hw); + + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); + + /* + * Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + } else { + DEBUGOUT("EEPROM read failed\n"); + } + + return status; +} + +/** + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum + * @hw: pointer to hardware structure + **/ +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status == IXGBE_SUCCESS) { + checksum = hw->eeprom.ops.calc_checksum(hw); + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + } else { + DEBUGOUT("EEPROM read failed\n"); + } + + return status; +} + +/** + * ixgbe_validate_mac_addr - Validate MAC address + * @mac_addr: pointer to MAC address. + * + * Tests a MAC address to ensure it is a valid Individual Address + **/ +s32 ixgbe_validate_mac_addr(u8 *mac_addr) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_validate_mac_addr"); + + /* Make sure it is not a multicast address */ + if (IXGBE_IS_MULTICAST(mac_addr)) { + DEBUGOUT("MAC address is multicast\n"); + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Not a broadcast address */ + } else if (IXGBE_IS_BROADCAST(mac_addr)) { + DEBUGOUT("MAC address is broadcast\n"); + status = IXGBE_ERR_INVALID_MAC_ADDR; + /* Reject the zero address */ + } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { + DEBUGOUT("MAC address is all zeros\n"); + status = IXGBE_ERR_INVALID_MAC_ADDR; + } + return status; +} + +/** + * ixgbe_set_rar_generic - Set Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + * + * Puts an ethernet address into a receive address register. + **/ +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_rar_generic"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* setup VMDq pool selection before this RAR gets enabled */ + hw->mac.ops.set_vmdq(hw, index, vmdq); + + /* + * HW expects these in little endian so we reverse the byte + * order from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | + ((u32)addr[3] << 24)); + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); + + if (enable_addr != 0) + rar_high |= IXGBE_RAH_AV; + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_rar_generic - Remove Rx address register + * @hw: pointer to hardware structure + * @index: Receive address register to write + * + * Clears an ethernet address from a receive address register. + **/ +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_clear_rar_generic"); + + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + /* + * Some parts put the VMDq setting in the extra RAH bits, + * so save everything except the lower 16 bits that hold part + * of the address and the address valid bit. + */ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); + + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. + * @hw: pointer to hardware structure + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + **/ +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) +{ + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_init_rx_addrs_generic"); + + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ + if (ixgbe_validate_mac_addr(hw->mac.addr) == + IXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ + DEBUGOUT("Overriding MAC Address in RAR[0]\n"); + DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", + hw->mac.addr[0], hw->mac.addr[1], + hw->mac.addr[2]); + DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], + hw->mac.addr[4], hw->mac.addr[5]); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + } + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ + DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); + } + + /* Clear the MTA */ + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + + ixgbe_init_uta_tables(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_add_uc_addr - Adds a secondary unicast address. + * @hw: pointer to hardware structure + * @addr: new address + * + * Adds it to unused receive address register or goes into promiscuous mode. + **/ +void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + u32 rar_entries = hw->mac.num_rar_entries; + u32 rar; + + DEBUGFUNC("ixgbe_add_uc_addr"); + + DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + + /* + * Place this address in the RAR if there is room, + * else put the controller into promiscuous mode + */ + if (hw->addr_ctrl.rar_used_count < rar_entries) { + rar = hw->addr_ctrl.rar_used_count; + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); + hw->addr_ctrl.rar_used_count++; + } else { + hw->addr_ctrl.overflow_promisc++; + } + + DEBUGOUT("ixgbe_add_uc_addr Complete\n"); +} + +/** + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses + * @hw: pointer to hardware structure + * @addr_list: the list of new addresses + * @addr_count: number of addresses + * @next: iterator function to walk the address list + * + * The given list replaces any existing list. Clears the secondary addrs from + * receive address registers. Uses unused receive address registers for the + * first secondary addresses, and falls back to promiscuous mode as needed. + * + * Drivers using secondary unicast addresses must set user_set_promisc when + * manually putting the device into promiscuous mode. + **/ +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr next) +{ + u8 *addr; + u32 i; + u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; + u32 uc_addr_in_use; + u32 fctrl; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); + + /* + * Clear accounting of old secondary address list, + * don't count RAR[0] + */ + uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; + hw->addr_ctrl.rar_used_count -= uc_addr_in_use; + hw->addr_ctrl.overflow_promisc = 0; + + /* Zero out the other receive addresses */ + DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); + for (i = 0; i < uc_addr_in_use; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); + } + + /* Add the new addresses */ + for (i = 0; i < addr_count; i++) { + DEBUGOUT(" Adding the secondary addresses:\n"); + addr = next(hw, &addr_list, &vmdq); + ixgbe_add_uc_addr(hw, addr, vmdq); + } + + if (hw->addr_ctrl.overflow_promisc) { + /* enable promisc if not already in overflow or set by user */ + if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Entering address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } else { + /* only disable if set by overflow, not by user */ + if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { + DEBUGOUT(" Leaving address overflow promisc mode\n"); + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= ~IXGBE_FCTRL_UPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + } + } + + DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + DEBUGFUNC("ixgbe_mta_vector"); + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_mta - Set bit-vector in multicast table + * @hw: pointer to hardware structure + * @hash_value: Multicast address hash value + * + * Sets the bit-vector in the multicast table. + **/ +void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector; + u32 vector_bit; + u32 vector_reg; + + DEBUGFUNC("ixgbe_set_mta"); + + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); + DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated + * like an array of 4096 bits. We want to set bit + * BitArray[vector_value]. So we figure out what register the bit is + * in, read it, OR in the new bit, then write back the new value. The + * register is determined by the upper 7 bits of the vector value and + * the bit within that register are determined by the lower 5 bits of + * the value. + */ + vector_reg = (vector >> 5) & 0x7F; + vector_bit = vector & 0x1F; + hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit); +} + +/** + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure + * @mc_addr_list: the list of new multicast addresses + * @mc_addr_count: number of addresses + * @next: iterator function to walk the multicast address list + * @clear: flag, when set clears the table beforehand + * + * When the clear flag is set, the given list replaces any existing list. + * Hashes the given addresses into the multicast table. + **/ +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + u32 i; + u32 vmdq; + + DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ + hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ + if (clear) { + DEBUGOUT(" Clearing MTA\n"); + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + } + + /* Update mta_shadow */ + for (i = 0; i < mc_addr_count; i++) { + DEBUGOUT(" Adding the multicast addresses:\n"); + ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i, + hw->mac.mta_shadow[i]); + + if (hw->addr_ctrl.mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + + DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_mc_generic - Enable multicast address in RAR + * @hw: pointer to hardware structure + * + * Enables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + DEBUGFUNC("ixgbe_enable_mc_generic"); + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | + hw->mac.mc_filter_type); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_disable_mc_generic - Disable multicast address in RAR + * @hw: pointer to hardware structure + * + * Disables multicast address in RAR and the use of the multicast hash table. + **/ +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + + DEBUGFUNC("ixgbe_disable_mc_generic"); + + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_fc_enable_generic - Enable flow control + * @hw: pointer to hardware structure + * @packetbuf_num: packet buffer number (0-7) + * + * Enable flow control according to the current settings. + **/ +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + + DEBUGFUNC("ixgbe_fc_enable_generic"); + + /* Negotiate the fc mode to use */ + ret_val = ixgbe_fc_autoneg(hw); + if (ret_val == IXGBE_ERR_FLOW_CONTROL) + goto out; + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); + mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); + + fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); + + /* + * The possible values of fc.current_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.current_mode) { + case ixgbe_fc_none: + /* + * Flow control is disabled by software override or autoneg. + * The code below will actually disable it in the HW. + */ + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + mflcn_reg |= IXGBE_MFLCN_RFCE; + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + /* Set 802.3x based flow control settings. */ + mflcn_reg |= IXGBE_MFLCN_DPF; + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + + fcrth = hw->fc.high_water[packetbuf_num] << 10; + fcrtl = hw->fc.low_water << 10; + + if (hw->fc.current_mode & ixgbe_fc_tx_pause) { + fcrth |= IXGBE_FCRTH_FCEN; + if (hw->fc.send_xon) + fcrtl |= IXGBE_FCRTL_XONE; + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); + + /* Configure pause time (2 TCs per register) */ + reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); + if ((packetbuf_num & 1) == 0) + reg = (reg & 0xFFFF0000) | hw->fc.pause_time; + else + reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16); + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg); + + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg - Configure flow control + * @hw: pointer to hardware structure + * + * Compares our advertised flow control capabilities to those advertised by + * our link partner, and determines the proper flow control mode to use. + **/ +s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + DEBUGFUNC("ixgbe_fc_autoneg"); + + if (hw->fc.disable_fc_autoneg) + goto out; + + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + * + * Since we're being called from an LSC, link is already known to be up. + * So use link_up_wait_to_complete=FALSE. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); + if (!link_up) { + ret_val = IXGBE_ERR_FLOW_CONTROL; + goto out; + } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ + case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); + break; + + /* Autoneg flow control on backplane adapters */ + case ixgbe_media_type_backplane: + ret_val = ixgbe_fc_autoneg_backplane(hw); + break; + + /* Autoneg flow control on copper adapters */ + case ixgbe_media_type_copper: + if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS) + ret_val = ixgbe_fc_autoneg_copper(hw); + break; + + default: + break; + } + +out: + if (ret_val == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = TRUE; + } else { + hw->fc.fc_was_autonegged = FALSE; + hw->fc.current_mode = hw->fc.requested_mode; + } + return ret_val; +} + +/** + * ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber + * @hw: pointer to hardware structure + * + * Enable flow control according on 1 gig fiber. + **/ +static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) +{ + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + s32 ret_val; + + /* + * On multispeed fiber at 1g, bail out if + * - link is up but AN did not complete, or if + * - link is up and AN completed but timed out + */ + + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || + (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, + pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE, + IXGBE_PCS1GANA_SYM_PAUSE, + IXGBE_PCS1GANA_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) +{ + u32 links2, anlp1_reg, autoc_reg, links; + s32 ret_val; + + /* + * On backplane, bail out if + * - backplane autoneg was not completed, or if + * - we are 82599 and link partner is not AN enabled + */ + links = IXGBE_READ_REG(hw, IXGBE_LINKS); + if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { + hw->fc.fc_was_autonegged = FALSE; + hw->fc.current_mode = hw->fc.requested_mode; + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + + if (hw->mac.type == ixgbe_mac_82599EB) { + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); + if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { + hw->fc.fc_was_autonegged = FALSE; + hw->fc.current_mode = hw->fc.requested_mode; + ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; + goto out; + } + } + /* + * Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + + ret_val = ixgbe_negotiate_fc(hw, autoc_reg, + anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE, + IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE); + +out: + return ret_val; +} + +/** + * ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) +{ + u16 technology_ability_reg = 0; + u16 lp_technology_ability_reg = 0; + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &technology_ability_reg); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &lp_technology_ability_reg); + + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, + (u32)lp_technology_ability_reg, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE, + IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE); +} + +/** + * ixgbe_negotiate_fc - Negotiate flow control + * @hw: pointer to hardware structure + * @adv_reg: flow control advertised settings + * @lp_reg: link partner's flow control settings + * @adv_sym: symmetric pause bit in advertisement + * @adv_asm: asymmetric pause bit in advertisement + * @lp_sym: symmetric pause bit in link partner advertisement + * @lp_asm: asymmetric pause bit in link partner advertisement + * + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) +{ + if ((!(adv_reg)) || (!(lp_reg))) + return IXGBE_ERR_FC_NOT_NEGOTIATED; + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* + * Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise RX + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; + DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; + DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; + DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_setup_fc - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) +{ + s32 ret_val = IXGBE_SUCCESS; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; + + DEBUGFUNC("ixgbe_setup_fc"); + + /* Validate the packetbuf configuration */ + if (packetbuf_num < 0 || packetbuf_num > 7) { + DEBUGOUT1("Invalid packet buffer number [%d], expected range " + "is 0-7\n", packetbuf_num); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * Validate the water mark configuration. Zero water marks are invalid + * because it causes the controller to just blast out fc packets. + */ + if (!hw->fc.low_water || + !hw->fc.high_water[packetbuf_num] || + !hw->fc.pause_time) { + DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * Validate the requested mode. Strict IEEE mode does not allow + * ixgbe_fc_rx_pause because it will cause us to fail at UNH. + */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* + * 10gig parts do not have a word in the EEPROM to determine the + * default flow control setting, so we explicitly set it to full. + */ + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* + * Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do fc autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + + switch (hw->phy.media_type) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_backplane: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); + break; + + case ixgbe_media_type_copper: + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); + break; + + default: + ; + } + + /* + * The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_rx_pause: + /* + * Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* + * Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + reg |= (IXGBE_PCS1GANA_ASM_PAUSE); + reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= (IXGBE_AUTOC_ASM_PAUSE); + reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE); + } else if (hw->phy.media_type == ixgbe_media_type_copper) { + reg_cu |= (IXGBE_TAF_ASM_PAUSE); + reg_cu &= ~(IXGBE_TAF_SYM_PAUSE); + } + break; + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE); + if (hw->phy.media_type == ixgbe_media_type_backplane) + reg_bp |= (IXGBE_AUTOC_SYM_PAUSE | + IXGBE_AUTOC_ASM_PAUSE); + else if (hw->phy.media_type == ixgbe_media_type_copper) + reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + + if (hw->mac.type != ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. + */ + IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg); + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); + + /* Disable AN timeout */ + if (hw->fc.strict_ieee) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); + DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } + + /* + * AUTOC restart handles negotiation of 1G and 10G on backplane + * and copper. There is no need to set the PCS1GCTL register. + * + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { + reg_bp |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp); + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && + (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) { + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); + } + + DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); +out: + return ret_val; +} + +/** + * ixgbe_disable_pcie_master - Disable PCI-express master access + * @hw: pointer to hardware structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable + * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS + * is returned signifying master requests disabled. + **/ +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 i; + + DEBUGFUNC("ixgbe_disable_pcie_master"); + + /* Always set this bit to ensure any future transactions are blocked */ + IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + + /* Exit if master requets are blocked */ + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto out; + + /* Poll for master request bit to clear */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto out; + } + + /* + * Two consecutive resets are required via CTRL.RST per datasheet + * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine + * of this need. The first reset prevents new master requests from + * being issued by our device. We then must wait 1usec or more for any + * remaining completions from the PCIe bus to trickle in, and then reset + * again to clear out any effects they may have had on our device. + */ + DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { + usec_delay(100); + if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) & + IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + goto out; + } + + DEBUGOUT("PCIe transaction pending bit also did not clear.\n"); + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + +out: + return status; +} + +/** + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ + u32 gssr; + u32 swmask = mask; + u32 fwmask = mask << 5; + s32 timeout = 200; + + DEBUGFUNC("ixgbe_acquire_swfw_sync"); + + while (timeout) { + /* + * SW EEPROM semaphore bit is used for access to all + * SW_FW_SYNC/GSSR bits (not just EEPROM) + */ + if (ixgbe_get_eeprom_semaphore(hw)) + return IXGBE_ERR_SWFW_SYNC; + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + if (!(gssr & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) or other software + * thread currently using resource (swmask) + */ + ixgbe_release_eeprom_semaphore(hw); + msec_delay(5); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return IXGBE_ERR_SWFW_SYNC; + } + + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_release_swfw_sync - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) +{ + u32 gssr; + u32 swmask = mask; + + DEBUGFUNC("ixgbe_release_swfw_sync"); + + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); + gssr &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + + ixgbe_release_eeprom_semaphore(hw); +} + +/** + * ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit + * @hw: pointer to hardware structure + * @regval: register value to write to RXCTRL + * + * Enables the Rx DMA unit + **/ +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) +{ + DEBUGFUNC("ixgbe_enable_rx_dma_generic"); + + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = 0; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + DEBUGFUNC("ixgbe_blink_led_start_generic"); + + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, FALSE); + + if (!link_up) { + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + autoc_reg |= IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + IXGBE_WRITE_FLUSH(hw); + msec_delay(10); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + DEBUGFUNC("ixgbe_blink_led_stop_generic"); + + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_offset: SAN MAC address offset + * + * This function will read the EEPROM location for the SAN MAC address + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, + u16 *san_mac_offset) +{ + DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Reads the SAN MAC address from the EEPROM, if it's available. This is + * per-port, so set_lan_id() must be called before reading the addresses. + * set_lan_id() is called by identify_sfp(), but this cannot be relied + * upon for non-SFP connections, so we must call it here. + **/ +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + u16 san_mac_data, san_mac_offset; + u8 i; + + DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); + + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + + if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { + /* + * No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; + + goto san_mac_addr_out; + } + + /* make sure we know which port we need to program */ + hw->mac.ops.set_lan_id(hw); + /* apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + for (i = 0; i < 3; i++) { + hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } + +san_mac_addr_out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM + * @hw: pointer to hardware structure + * @san_mac_addr: SAN MAC address + * + * Write a SAN MAC address to the EEPROM. + **/ +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) +{ + s32 status = IXGBE_SUCCESS; + u16 san_mac_data, san_mac_offset; + u8 i; + + DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); + + /* Look for SAN mac address pointer. If not defined, return */ + ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + + if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { + status = IXGBE_ERR_NO_SAN_ADDR_PTR; + goto san_mac_addr_out; + } + + /* Make sure we know which port we need to write */ + hw->mac.ops.set_lan_id(hw); + /* Apply the port offset to the address offset */ + (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : + (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); + + for (i = 0; i < 3; i++) { + san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); + san_mac_data |= (u16)(san_mac_addr[i * 2]); + hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); + san_mac_offset++; + } + +san_mac_addr_out: + return status; +} + +/** + * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count + * @hw: pointer to hardware structure + * + * Read PCIe configuration space, and get the MSI-X vector count from + * the capabilities table. + **/ +u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) +{ + u32 msix_count = 64; + + DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); + if (hw->mac.msix_vectors_from_pcie) { + msix_count = IXGBE_READ_PCIE_WORD(hw, + IXGBE_PCIE_MSIX_82599_CAPS); + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + + /* MSI-X count is zero-based in HW, so increment to give + * proper value */ + msix_count++; + } + + return msix_count; +} + +/** + * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address + * @hw: pointer to hardware structure + * @addr: Address to put into receive address register + * @vmdq: VMDq pool to assign + * + * Puts an ethernet address into a receive address register, or + * finds the rar that it is aleady in; adds to the pool list + **/ +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) +{ + static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; + u32 first_empty_rar = NO_EMPTY_RAR_FOUND; + u32 rar; + u32 rar_low, rar_high; + u32 addr_low, addr_high; + + DEBUGFUNC("ixgbe_insert_mac_addr_generic"); + + /* swap bytes for HW little endian */ + addr_low = addr[0] | (addr[1] << 8) + | (addr[2] << 16) + | (addr[3] << 24); + addr_high = addr[4] | (addr[5] << 8); + + /* + * Either find the mac_id in rar or find the first empty space. + * rar_highwater points to just after the highest currently used + * rar in order to shorten the search. It grows when we add a new + * rar to the top. + */ + for (rar = 0; rar < hw->mac.rar_highwater; rar++) { + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); + + if (((IXGBE_RAH_AV & rar_high) == 0) + && first_empty_rar == NO_EMPTY_RAR_FOUND) { + first_empty_rar = rar; + } else if ((rar_high & 0xFFFF) == addr_high) { + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); + if (rar_low == addr_low) + break; /* found it already in the rars */ + } + } + + if (rar < hw->mac.rar_highwater) { + /* already there so just add to the pool bits */ + ixgbe_set_vmdq(hw, rar, vmdq); + } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { + /* stick it into first empty RAR slot we found */ + rar = first_empty_rar; + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + } else if (rar == hw->mac.rar_highwater) { + /* add it to the top of the list and inc the highwater mark */ + ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); + hw->mac.rar_highwater++; + } else if (rar >= hw->mac.num_rar_entries) { + return IXGBE_ERR_INVALID_MAC_ADDR; + } + + /* + * If we found rar[0], make sure the default pool bit (we use pool 0) + * remains cleared to be sure default pool packets will get delivered + */ + if (rar == 0) + ixgbe_clear_vmdq(hw, rar, 0); + + return rar; +} + +/** + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate + * @vmdq: VMDq pool index to remove from the rar + **/ +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_clear_vmdq_generic"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + + if (!mpsar_lo && !mpsar_hi) + goto done; + + if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { + if (mpsar_lo) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); + mpsar_lo = 0; + } + if (mpsar_hi) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); + mpsar_hi = 0; + } + } else if (vmdq < 32) { + mpsar_lo &= ~(1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); + } else { + mpsar_hi &= ~(1 << (vmdq - 32)); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); + } + + /* was that the last pool using this rar? */ + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) + hw->mac.ops.clear_rar(hw, rar); +done: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq pool index + **/ +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) +{ + u32 mpsar; + u32 rar_entries = hw->mac.num_rar_entries; + + DEBUGFUNC("ixgbe_set_vmdq_generic"); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { + DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + if (vmdq < 32) { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar |= 1 << vmdq; + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); + } else { + mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + mpsar |= 1 << (vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); + } + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array + * @hw: pointer to hardware structure + **/ +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) +{ + int i; + + DEBUGFUNC("ixgbe_init_uta_tables_generic"); + DEBUGOUT(" Clearing UTA\n"); + + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * + * return the VLVF index where this VLAN id should be placed + * + **/ +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +{ + u32 bits = 0; + u32 first_empty_slot = 0; + s32 regindex; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* + * Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way + */ + for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); + if (!bits && !(first_empty_slot)) + first_empty_slot = regindex; + else if ((bits & 0x0FFF) == vlan) + break; + } + + /* + * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan + * in the VLVF. Else use the first empty VLVF register for this + * vlan id. + */ + if (regindex >= IXGBE_VLVF_ENTRIES) { + if (first_empty_slot) + regindex = first_empty_slot; + else { + DEBUGOUT("No space in VLVF.\n"); + regindex = IXGBE_ERR_NO_SPACE; + } + } + + return regindex; +} + +/** + * ixgbe_set_vfta_generic - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFVFB + * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on) +{ + s32 regindex; + u32 bitindex; + u32 vfta; + u32 bits; + u32 vt; + u32 targetbit; + bool vfta_changed = FALSE; + + DEBUGFUNC("ixgbe_set_vfta_generic"); + + if (vlan > 4095) + return IXGBE_ERR_PARAM; + + /* + * this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regindex = (vlan >> 5) & 0x7F; + bitindex = vlan & 0x1F; + targetbit = (1 << bitindex); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); + + if (vlan_on) { + if (!(vfta & targetbit)) { + vfta |= targetbit; + vfta_changed = TRUE; + } + } else { + if ((vfta & targetbit)) { + vfta &= ~targetbit; + vfta_changed = TRUE; + } + } + + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); + if (vt & IXGBE_VT_CTL_VT_ENABLE) { + s32 vlvf_index; + + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); + if (vlvf_index < 0) + return vlvf_index; + + if (vlan_on) { + /* set the pool bit */ + if (vind < 32) { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + bits |= (1 << vind); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB(vlvf_index*2), + bits); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + bits |= (1 << (vind-32)); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1), + bits); + } + } else { + /* clear the pool bit */ + if (vind < 32) { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + bits &= ~(1 << vind); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB(vlvf_index*2), + bits); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + } else { + bits = IXGBE_READ_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1)); + bits &= ~(1 << (vind-32)); + IXGBE_WRITE_REG(hw, + IXGBE_VLVFB((vlvf_index*2)+1), + bits); + bits |= IXGBE_READ_REG(hw, + IXGBE_VLVFB(vlvf_index*2)); + } + } + + /* + * If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + if (bits) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), + (IXGBE_VLVF_VIEN | vlan)); + if (!vlan_on) { + /* someone wants to clear the vfta entry + * but some pools/VFs are still using it. + * Ignore it. */ + vfta_changed = FALSE; + } + } + else + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + } + + if (vfta_changed) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to hardware structure + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) +{ + u32 offset; + + DEBUGFUNC("ixgbe_clear_vfta_generic"); + + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_mac_link_generic - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: TRUE when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete) +{ + u32 links_reg, links_orig; + u32 i; + + DEBUGFUNC("ixgbe_check_mac_link_generic"); + + /* clear the old state */ + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_orig != links_reg) { + DEBUGOUT2("LINKS changed from %08X to %08X\n", + links_orig, links_reg); + } + + if (link_up_wait_to_complete) { + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = TRUE; + break; + } else { + *link_up = FALSE; + } + msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { + if (links_reg & IXGBE_LINKS_UP) + *link_up = TRUE; + else + *link_up = FALSE; + } + + if ((links_reg & IXGBE_LINKS_SPEED_82599) == + IXGBE_LINKS_SPEED_10G_82599) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else if ((links_reg & IXGBE_LINKS_SPEED_82599) == + IXGBE_LINKS_SPEED_1G_82599) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + else if ((links_reg & IXGBE_LINKS_SPEED_82599) == + IXGBE_LINKS_SPEED_100_82599) + *speed = IXGBE_LINK_SPEED_100_FULL; + else + *speed = IXGBE_LINK_SPEED_UNKNOWN; + + /* if link is down, zero out the current_mode */ + if (*link_up == FALSE) { + hw->fc.current_mode = ixgbe_fc_none; + hw->fc.fc_was_autonegged = FALSE; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from + * the EEPROM + * @hw: pointer to hardware structure + * @wwnn_prefix: the alternative WWNN prefix + * @wwpn_prefix: the alternative WWPN prefix + * + * This function will read the EEPROM from the alternative SAN MAC address + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix) +{ + u16 offset, caps; + u16 alt_san_mac_blk_offset; + + DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); + + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; + + /* check if alternative SAN MAC is supported */ + hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR, + &alt_san_mac_blk_offset); + + if ((alt_san_mac_blk_offset == 0) || + (alt_san_mac_blk_offset == 0xFFFF)) + goto wwn_prefix_out; + + /* check capability in alternative san mac address block */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET; + hw->eeprom.ops.read(hw, offset, &caps); + if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN)) + goto wwn_prefix_out; + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; + hw->eeprom.ops.read(hw, offset, wwnn_prefix); + + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + hw->eeprom.ops.read(hw, offset, wwpn_prefix); + +wwn_prefix_out: + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM + * @hw: pointer to hardware structure + * @bs: the fcoe boot status + * + * This function will read the FCOE boot status from the iSCSI FCOE block + **/ +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) +{ + u16 offset, caps, flags; + s32 status; + + DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); + + /* clear output first */ + *bs = ixgbe_fcoe_bootstatus_unavailable; + + /* check if FCOE IBA block is present */ + offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; + status = hw->eeprom.ops.read(hw, offset, &caps); + if (status != IXGBE_SUCCESS) + goto out; + + if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) + goto out; + + /* check if iSCSI FCOE block is populated */ + status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); + if (status != IXGBE_SUCCESS) + goto out; + + if ((offset == 0) || (offset == 0xFFFF)) + goto out; + + /* read fcoe flags in iSCSI FCOE block */ + offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; + status = hw->eeprom.ops.read(hw, offset, &flags); + if (status != IXGBE_SUCCESS) + goto out; + + if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) + *bs = ixgbe_fcoe_bootstatus_enabled; + else + *bs = ixgbe_fcoe_bootstatus_disabled; + +out: + return status; +} + +/** + * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow + * control + * @hw: pointer to hardware structure + * + * There are several phys that do not support autoneg flow control. This + * function check the device id to see if the associated phy supports + * autoneg flow control. + **/ +static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) +{ + + DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); + + switch (hw->device_id) { + case IXGBE_DEV_ID_X540T: + return IXGBE_SUCCESS; + case IXGBE_DEV_ID_82599_T3_LOM: + return IXGBE_SUCCESS; + default: + return IXGBE_ERR_FC_NOT_SUPPORTED; + } +} + +/** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for anti-spoofing + * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * + **/ +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +{ + int j; + int pf_target_reg = pf >> 3; + int pf_target_shift = pf % 8; + u32 pfvfspoof = 0; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + if (enable) + pfvfspoof = IXGBE_SPOOF_MACAS_MASK; + + /* + * PFVFSPOOF register array is size 8 with 8 bits assigned to + * MAC anti-spoof enables in each register array element. + */ + for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); + + /* If not enabling anti-spoofing then done */ + if (!enable) + return; + + /* + * The PF should be allowed to spoof so that it can support + * emulation mode NICs. Reset the bit assigned to the PF + */ + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg)); + pfvfspoof ^= (1 << pf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof); +} + +/** + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing + * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +{ + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT; + u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); +} + +/** + * ixgbe_get_device_caps_generic - Get additional device capabilities + * @hw: pointer to hardware structure + * @device_caps: the EEPROM word with the extra device capabilities + * + * This function will read the EEPROM location for the device capabilities, + * and return the word through device_caps. + **/ +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) +{ + DEBUGFUNC("ixgbe_get_device_caps_generic"); + + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering + * @hw: pointer to hardware structure + * + **/ +void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw) +{ + u32 regval; + u32 i; + + DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2"); + + /* Enable relaxed ordering */ + for (i = 0; i < hw->mac.max_tx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); + regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); + } + + for (i = 0; i < hw->mac.max_rx_queues; i++) { + regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); + regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN | + IXGBE_DCA_RXCTRL_DESC_HSRO_EN); + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + +} + +/** + * ixgbe_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("ixgbe_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * ixgbe_host_interface_command - Issue command to manageability block + * @hw: pointer to the HW structure + * @buffer: contains the command to write and where the return status will + * be placed + * @lenght: lenght of buffer, must be multiple of 4 bytes + * + * Communicates with the manageability block. On success return IXGBE_SUCCESS + * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + **/ +static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer, + u32 length) +{ + u32 hicr, i; + u32 hdr_size = sizeof(struct ixgbe_hic_hdr); + u8 buf_len, dword_len; + + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_host_interface_command"); + + if (length == 0 || length & 0x3 || + length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Check that the host interface is enabled. */ + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if ((hicr & IXGBE_HICR_EN) == 0) { + DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Calculate length in DWORDs */ + dword_len = length >> 2; + + /* + * The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, + i, *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); + + for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == IXGBE_HI_COMMAND_TIMEOUT || + (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (i = 0; i < dword_len; i++) + *((u32 *)buffer + i) = + IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; + if (buf_len == 0) + goto out; + + if (length < (buf_len + hdr_size)) { + DEBUGOUT("Buffer not large enough for reply message.\n"); + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto out; + } + + /* Calculate length in DWORDs, add one for odd lengths */ + dword_len = (buf_len + 1) >> 2; + + /* Pull in the rest of the buffer (i is where we left off)*/ + for (; i < buf_len; i++) + *((u32 *)buffer + i) = + IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i); + +out: + return ret_val; +} + +/** + * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware + * @hw: pointer to the HW structure + * @maj: driver version major number + * @min: driver version minor number + * @build: driver version build number + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability + * block. On success return IXGBE_SUCCESS + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 sub) +{ + struct ixgbe_hic_drv_info fw_cmd; + int i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) + != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto out; + } + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; + fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; + fw_cmd.port_num = (u8)hw->bus.func; + fw_cmd.ver_maj = maj; + fw_cmd.ver_min = min; + fw_cmd.ver_build = build; + fw_cmd.ver_sub = sub; + fw_cmd.hdr.checksum = 0; + fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, + (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); + fw_cmd.pad = 0; + fw_cmd.pad2 = 0; + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd, + sizeof(fw_cmd)); + if (ret_val != IXGBE_SUCCESS) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) + ret_val = IXGBE_SUCCESS; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); +out: + return ret_val; +} + +/** + * ixgbe_set_rxpba_generic - Initialize Rx packet buffer + * @hw: pointer to hardware structure + * @num_pb: number of packet buffers to allocate + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy) +{ + u32 pbsize = hw->mac.rx_pb_size; + int i = 0; + u32 rxpktsize, txpktsize, txpbthresh; + + /* Reserve headroom */ + pbsize -= headroom; + + if (!num_pb) + num_pb = 1; + + /* Divide remaining packet buffer space amongst the number of packet + * buffers requested using supplied strategy. + */ + switch (strategy) { + case (PBA_STRATEGY_WEIGHTED): + /* pba_80_48 strategy weight first half of packet buffer with + * 5/8 of the packet buffer space. + */ + rxpktsize = (pbsize * 5 * 2) / (num_pb * 8); + pbsize -= rxpktsize * (num_pb / 2); + rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; + for (; i < (num_pb / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + /* Fall through to configure remaining packet buffers */ + case (PBA_STRATEGY_EQUAL): + rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; + for (; i < num_pb; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); + break; + default: + break; + } + + /* Only support an equally distributed Tx packet buffer strategy. */ + txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; + txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < num_pb; i++) { + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); + } + + /* Clear unused TCs, if any, to zero buffer size*/ + for (; i < IXGBE_MAX_PB; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); + } +} + +/** + * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo + * @hw: pointer to the hardware structure + * + * The 82599 and x540 MACs can experience issues if TX work is still pending + * when a reset occurs. This function prevents this by flushing the PCIe + * buffers on the system. + **/ +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) +{ + u32 gcr_ext, hlreg0; + + /* + * If double reset is not requested then all transactions should + * already be clear and as such there is no work to do + */ + if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED)) + return; + + /* + * Set loopback enable to prevent any transmits from being sent + * should the link come up. This assumes that the RXCTRL.RXEN bit + * has already been cleared. + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + + /* initiate cleaning flow for buffers in the PCIe transaction layer */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, + gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR); + + /* Flush all writes and allow 20usec for all transactions to clear */ + IXGBE_WRITE_FLUSH(hw); + usec_delay(20); + + /* restore previous register values */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); +} diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h new file mode 100644 index 0000000000..35f1e3341a --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h @@ -0,0 +1,135 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_COMMON_H_ +#define _IXGBE_COMMON_H_ + +#include "ixgbe_type.h" +#define IXGBE_WRITE_REG64(hw, reg, value) \ + do { \ + IXGBE_WRITE_REG(hw, reg, (u32) value); \ + IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \ + } while (0) + +u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); + +s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw); +s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); +s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); +void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + +s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); +s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); +s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); +s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); +s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); + +s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, + ixgbe_mc_addr_itr func, bool clear); +s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, + u32 addr_count, ixgbe_mc_addr_itr func); +s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); + +s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num); +s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); + +s32 ixgbe_validate_mac_addr(u8 *mac_addr); +s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); +void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); +s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + +s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); +s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); + +s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); +s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); +s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); +s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, + u32 vind, bool vlan_on); +s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); + +s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, bool link_up_wait_to_complete); + +s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); +s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); +void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, + int strategy); +void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw); +s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + u8 build, u8 ver); +void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); +#endif /* IXGBE_COMMON */ diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c new file mode 100644 index 0000000000..0fe4ca7057 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c @@ -0,0 +1,751 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "ixgbe_type.h" +#include "ixgbe_mbx.h" + +/** + * ixgbe_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_read_mbx"); + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_mbx"); + + if (size > mbx->size) + ret_val = IXGBE_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_msg"); + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_ack"); + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_rst"); + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * ixgbe_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("ixgbe_poll_for_msg"); + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + +out: + return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; +} + +/** + * ixgbe_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("ixgbe_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + +out: + return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; +} + +/** + * ixgbe_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_read_posted_mbx"); + + if (!mbx->ops.read) + goto out; + + ret_val = ixgbe_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_write_posted_mbx"); + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = ixgbe_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * ixgbe_init_mbx_ops_generic - Initialize MB function pointers + * @hw: pointer to the HW structure + * + * Setups up the mailbox read and write message function pointers + **/ +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; +} + +/** + * ixgbe_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw) +{ + u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); + + v2p_mailbox |= hw->mbx.v2p_mailbox; + hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * ixgbe_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) +{ + u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw); + s32 ret_val = IXGBE_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = IXGBE_SUCCESS; + + hw->mbx.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_msg_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_ack_vf"); + + if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns TRUE if the PF has set the reset done bit or else FALSE + **/ +static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) +{ + s32 ret_val = IXGBE_ERR_MBX; + + UNREFERENCED_1PARAMETER(mbx_id); + DEBUGFUNC("ixgbe_check_for_rst_vf"); + + if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | + IXGBE_VFMAILBOX_RSTI))) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_vf"); + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) + ret_val = IXGBE_SUCCESS; + + return ret_val; +} + +/** + * ixgbe_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val; + u16 i; + + UNREFERENCED_1PARAMETER(mbx_id); + + DEBUGFUNC("ixgbe_write_mbx_vf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_vf(hw, 0); + ixgbe_check_for_ack_vf(hw, 0); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); + +out_no_write: + return ret_val; +} + +/** + * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) +{ + s32 ret_val = IXGBE_SUCCESS; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_vf"); + UNREFERENCED_1PARAMETER(mbx_id); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_vf(hw); + if (ret_val) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); + + /* Acknowledge receipt and release mailbox, then we're done */ + IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbe_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to begin communications */ + mbx->timeout = 0; + mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops.read = ixgbe_read_mbx_vf; + mbx->ops.write = ixgbe_write_mbx_vf; + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; + mbx->ops.check_for_msg = ixgbe_check_for_msg_vf; + mbx->ops.check_for_ack = ixgbe_check_for_ack_vf; + mbx->ops.check_for_rst = ixgbe_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} + +static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) +{ + u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); + s32 ret_val = IXGBE_ERR_MBX; + + if (mbvficr & mask) { + ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); + } + + return ret_val; +} + +/** + * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + DEBUGFUNC("ixgbe_check_for_msg_pf"); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + + DEBUGFUNC("ixgbe_check_for_ack_pf"); + + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { + ret_val = IXGBE_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * ixgbe_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + u32 reg_offset = (vf_number < 32) ? 0 : 1; + u32 vf_shift = vf_number % 32; + u32 vflre = 0; + s32 ret_val = IXGBE_ERR_MBX; + + DEBUGFUNC("ixgbe_check_for_rst_pf"); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); + break; + case ixgbe_mac_X540: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); + break; + default: + break; + } + + if (vflre & (1 << vf_shift)) { + ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) +{ + s32 ret_val = IXGBE_ERR_MBX; + u32 p2v_mailbox; + + DEBUGFUNC("ixgbe_obtain_mbx_lock_pf"); + + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); + if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) + ret_val = IXGBE_SUCCESS; + + return ret_val; +} + +/** + * ixgbe_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + ixgbe_check_for_msg_pf(hw, vf_number); + ixgbe_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * ixgbe_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("ixgbe_read_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (hw->mac.type != ixgbe_mac_82599EB && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = IXGBE_VFMAILBOX_SIZE; + + mbx->ops.read = ixgbe_read_mbx_pf; + mbx->ops.write = ixgbe_write_mbx_pf; + mbx->ops.read_posted = ixgbe_read_posted_mbx; + mbx->ops.write_posted = ixgbe_write_posted_mbx; + mbx->ops.check_for_msg = ixgbe_check_for_msg_pf; + mbx->ops.check_for_ack = ixgbe_check_for_ack_pf; + mbx->ops.check_for_rst = ixgbe_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +} diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h new file mode 100644 index 0000000000..398d0a3c22 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h @@ -0,0 +1,112 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_MBX_H_ +#define _IXGBE_MBX_H_ + +#include "ixgbe_type.h" + +#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +#define IXGBE_ERR_MBX -100 + +#define IXGBE_VFMAILBOX 0x002FC +#define IXGBE_VFMBMEM 0x00200 + +/* Define mailbox register bits */ +#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + +/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is TRUE if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ +#define IXGBE_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + +#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ + +/* length of permanent address message returned from PF */ +#define IXGBE_VF_PERMADDR_MSG_LEN 4 +/* word in permanent address message with the current multicast type */ +#define IXGBE_VF_MC_TYPE_WORD 3 + +#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + + +#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); +s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); +s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); +void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); +void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); + +#endif /* _IXGBE_MBX_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h new file mode 100644 index 0000000000..fe7ac498dd --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h @@ -0,0 +1,145 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_OS_H_ +#define _IXGBE_OS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../ixgbe_logs.h" + +/* Remove some compiler warnings for the files in this dir */ +#ifdef __INTEL_COMPILER +#pragma warning(disable:2259) /* Conversion may lose significant bits */ +#pragma warning(disable:869) /* Parameter was never referenced */ +#pragma warning(disable:181) /* Arg incompatible with format string */ +#pragma warning(disable:1419) /* External declaration in primary source file */ +#pragma warning(disable:111) /* Statement is unreachable */ +#pragma warning(disable:981) /* Operands are evaluated in unspecified order */ +#else +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wformat" +#pragma GCC diagnostic ignored "-Wuninitialized" +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7)) +#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" +#endif +#endif + +#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x") + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000*(x)) + +#define DEBUGFUNC(F) DEBUGOUT(F); +#define DEBUGOUT(S, args...) PMD_DRV_LOG(DEBUG, S, ##args) +#define DEBUGOUT1(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT2(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT3(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT6(S, args...) DEBUGOUT(S, ##args) +#define DEBUGOUT7(S, args...) DEBUGOUT(S, ##args) + +#define FALSE 0 +#define TRUE 1 + +/* Bunch of defines for shared code bogosity */ +#define UNREFERENCED_PARAMETER(_p) +#define UNREFERENCED_1PARAMETER(_p) +#define UNREFERENCED_2PARAMETER(_p, _q) +#define UNREFERENCED_3PARAMETER(_p, _q, _r) +#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) + + +#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i) +#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i) + +typedef uint8_t u8; +typedef int8_t s8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef int32_t s32; +typedef uint64_t u64; +typedef int bool; + +#define mb() rte_mb() +#define wmb() rte_wmb() +#define rmb() rte_rmb() + +#define prefetch(x) rte_prefetch0(x) + +#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg))) + +static inline uint32_t ixgbe_read_addr(volatile void* addr) +{ + return IXGBE_PCI_REG(addr); +} + +#define IXGBE_PCI_REG_WRITE(reg, value) do { \ + IXGBE_PCI_REG((reg)) = (value); \ +} while(0) + +#define IXGBE_PCI_REG_ADDR(hw, reg) \ + ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg))) + +#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \ + IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2)) + +/* Not implemented !! */ +#define IXGBE_READ_PCIE_WORD(hw, reg) 0 +#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0) + +#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) + +#define IXGBE_READ_REG(hw, reg) \ + ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg))) + +#define IXGBE_WRITE_REG(hw, reg, value) \ + IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value)) + +#define IXGBE_READ_REG_ARRAY(hw, reg, index) \ + IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index))) + +#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \ + IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value)) + +#endif /* _IXGBE_OS_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c new file mode 100644 index 0000000000..56565cdec8 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c @@ -0,0 +1,1843 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +static void ixgbe_i2c_start(struct ixgbe_hw *hw); +static void ixgbe_i2c_stop(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +static bool ixgbe_get_i2c_data(u32 *i2cctl); +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); + +/** + * ixgbe_init_phy_ops_generic - Inits PHY function ptrs + * @hw: pointer to the hardware structure + * + * Initialize the function pointers. + **/ +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + + DEBUGFUNC("ixgbe_init_phy_ops_generic"); + + /* PHY */ + phy->ops.identify = &ixgbe_identify_phy_generic; + phy->ops.reset = &ixgbe_reset_phy_generic; + phy->ops.read_reg = &ixgbe_read_phy_reg_generic; + phy->ops.write_reg = &ixgbe_write_phy_reg_generic; + phy->ops.setup_link = &ixgbe_setup_phy_link_generic; + phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic; + phy->ops.check_link = NULL; + phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; + phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic; + phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic; + phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic; + phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic; + phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear; + phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic; + phy->sfp_type = ixgbe_sfp_type_unknown; + phy->ops.check_overtemp = &ixgbe_tn_check_overtemp; + return IXGBE_SUCCESS; +} + +/** + * ixgbe_identify_phy_generic - Get physical layer module + * @hw: pointer to hardware structure + * + * Determines the physical layer module found on the current adapter. + **/ +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 phy_addr; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_identify_phy_generic"); + + if (hw->phy.type == ixgbe_phy_unknown) { + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { + if (ixgbe_validate_phy_addr(hw, phy_addr)) { + hw->phy.addr = phy_addr; + ixgbe_get_phy_id(hw); + hw->phy.type = + ixgbe_get_phy_type_from_id(hw->phy.id); + + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.ops.read_reg(hw, + IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &ext_ability); + if (ext_ability & + (IXGBE_MDIO_PHY_10GBASET_ABILITY | + IXGBE_MDIO_PHY_1000BASET_ABILITY)) + hw->phy.type = + ixgbe_phy_cu_unknown; + else + hw->phy.type = + ixgbe_phy_generic; + } + + status = IXGBE_SUCCESS; + break; + } + } + /* clear value if nothing found */ + if (status != IXGBE_SUCCESS) + hw->phy.addr = 0; + } else { + status = IXGBE_SUCCESS; + } + + return status; +} + +/** + * ixgbe_validate_phy_addr - Determines phy address is valid + * @hw: pointer to hardware structure + * + **/ +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) +{ + u16 phy_id = 0; + bool valid = FALSE; + + DEBUGFUNC("ixgbe_validate_phy_addr"); + + hw->phy.addr = phy_addr; + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); + + if (phy_id != 0xFFFF && phy_id != 0x0) + valid = TRUE; + + return valid; +} + +/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) +{ + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + + DEBUGFUNC("ixgbe_get_phy_id"); + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_high); + + if (status == IXGBE_SUCCESS) { + hw->phy.id = (u32)(phy_id_high << 16); + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } + return status; +} + +/** + * ixgbe_get_phy_type_from_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) +{ + enum ixgbe_phy_type phy_type; + + DEBUGFUNC("ixgbe_get_phy_type_from_id"); + + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; + case QT2022_PHY_ID: + phy_type = ixgbe_phy_qt; + break; + case ATH_PHY_ID: + phy_type = ixgbe_phy_nl; + break; + default: + phy_type = ixgbe_phy_unknown; + break; + } + + DEBUGOUT1("phy type found is %d\n", phy_type); + return phy_type; +} + +/** + * ixgbe_reset_phy_generic - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) +{ + u32 i; + u16 ctrl = 0; + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_reset_phy_generic"); + + if (hw->phy.type == ixgbe_phy_unknown) + status = ixgbe_identify_phy_generic(hw); + + if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none) + goto out; + + /* Don't reset PHY if it's shut down due to overtemp. */ + if (!hw->phy.reset_if_overtemp && + (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) + goto out; + + /* + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + IXGBE_MDIO_PHY_XS_RESET); + + /* + * Poll for reset bit to self-clear indicating reset is complete. + * Some PHYs could take up to 3 seconds to complete and need about + * 1.7 usec delay after the reset is complete. + */ + for (i = 0; i < 30; i++) { + msec_delay(100); + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl); + if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { + usec_delay(2); + break; + } + } + + if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("PHY reset polling failed to complete.\n"); + } + +out: + return status; +} + +/** + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + **/ +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + u32 command; + u32 i; + u32 data; + s32 status = IXGBE_SUCCESS; + u16 gssr; + + DEBUGFUNC("ixgbe_read_phy_reg_generic"); + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == IXGBE_SUCCESS) { + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + DEBUGOUT("PHY address command did not complete.\n"); + status = IXGBE_ERR_PHY; + } + + if (status == IXGBE_SUCCESS) { + /* + * Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + DEBUGOUT("PHY read command didn't complete\n"); + status = IXGBE_ERR_PHY; + } else { + /* + * Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + } + } + + hw->mac.ops.release_swfw_sync(hw, gssr); + } + + return status; +} + +/** + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + **/ +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + u32 command; + u32 i; + s32 status = IXGBE_SUCCESS; + u16 gssr; + + DEBUGFUNC("ixgbe_write_phy_reg_generic"); + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) + status = IXGBE_ERR_SWFW_SYNC; + + if (status == IXGBE_SUCCESS) { + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + DEBUGOUT("PHY address cmd didn't complete\n"); + status = IXGBE_ERR_PHY; + } + + if (status == IXGBE_SUCCESS) { + /* + * Address cycle complete, setup and write the write + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* + * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) + break; + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { + DEBUGOUT("PHY address cmd didn't complete\n"); + status = IXGBE_ERR_PHY; + } + } + + hw->mac.ops.release_swfw_sync(hw, gssr); + } + + return status; +} + +/** + * ixgbe_setup_phy_link_generic - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = FALSE; + ixgbe_link_speed speed; + + DEBUGFUNC("ixgbe_setup_phy_link_generic"); + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, + IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE | + IXGBE_MII_100BASE_T_ADVERTISE_HALF); + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + + autoneg_reg |= IXGBE_MII_RESTART; + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + usec_delay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; + if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { + break; + } + } + + if (time_out == max_time_out) { + status = IXGBE_ERR_LINK_SETUP; + DEBUGOUT("ixgbe_setup_phy_link_generic: time out"); + } + + return status; +} + +/** + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + **/ +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete) +{ + UNREFERENCED_2PARAMETER(autoneg, autoneg_wait_to_complete); + + DEBUGFUNC("ixgbe_setup_phy_link_speed_generic"); + + /* + * Clear autoneg_advertised and set new values based on input link + * speed. + */ + hw->phy.autoneg_advertised = 0; + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + + /* Setup link based on the new speed settings */ + hw->phy.ops.setup_link(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: boolean auto-negotiation value + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + s32 status = IXGBE_ERR_LINK_SETUP; + u16 speed_ability; + + DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); + + *speed = 0; + *autoneg = TRUE; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &speed_ability); + + if (status == IXGBE_SUCCESS) { + if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) + *speed |= IXGBE_LINK_SPEED_10GB_FULL; + if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) + *speed |= IXGBE_LINK_SPEED_1GB_FULL; + if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) + *speed |= IXGBE_LINK_SPEED_100_FULL; + } + + return status; +} + +/** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * + * Reads the VS1 register to determine if link is up and the current speed for + * the PHY. + **/ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) +{ + s32 status = IXGBE_SUCCESS; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + + DEBUGFUNC("ixgbe_check_phy_link_tnx"); + + /* Initialize speed and link to default case */ + *link_up = FALSE; + *speed = IXGBE_LINK_SPEED_10GB_FULL; + + /* + * Check current speed and link status of the PHY register. + * This is a vendor specific register and may have to + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { + usec_delay(10); + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + &phy_data); + phy_link = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = TRUE; + if (phy_speed == + IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + } + } + + return status; +} + +/** + * ixgbe_setup_phy_link_tnx - Set and restart autoneg + * @hw: pointer to hardware structure + * + * Restart autonegotiation and PHY and waits for completion. + **/ +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 time_out; + u32 max_time_out = 10; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = FALSE; + ixgbe_link_speed speed; + + DEBUGFUNC("ixgbe_setup_phy_link_tnx"); + + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) + autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + /* Restart PHY autonegotiation and wait for completion */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + + autoneg_reg |= IXGBE_MII_RESTART; + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + /* Wait for autonegotiation to finish */ + for (time_out = 0; time_out < max_time_out; time_out++) { + usec_delay(10); + /* Restart PHY autonegotiation and wait for completion */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; + if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { + break; + } + } + + if (time_out == max_time_out) { + status = IXGBE_ERR_LINK_SETUP; + DEBUGOUT("ixgbe_setup_phy_link_tnx: time out"); + } + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version + * @hw: pointer to hardware structure + * @firmware_version: pointer to the PHY Firmware Version + **/ +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +} + +/** + * ixgbe_reset_phy_nl - Performs a PHY reset + * @hw: pointer to hardware structure + **/ +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) +{ + u16 phy_offset, control, eword, edata, block_crc; + bool end_data = FALSE; + u16 list_offset, data_offset; + u16 phy_data = 0; + s32 ret_val = IXGBE_SUCCESS; + u32 i; + + DEBUGFUNC("ixgbe_reset_phy_nl"); + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + + /* reset the PHY and poll for completion */ + hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, + (phy_data | IXGBE_MDIO_PHY_XS_RESET)); + + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, + IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) + break; + msec_delay(10); + } + + if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { + DEBUGOUT("PHY reset did not complete.\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + + /* Get init offsets */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); + data_offset++; + while (!end_data) { + /* + * Read control word from PHY init contents offset + */ + ret_val = hw->eeprom.ops.read(hw, data_offset, &eword); + control = (eword & IXGBE_CONTROL_MASK_NL) >> + IXGBE_CONTROL_SHIFT_NL; + edata = eword & IXGBE_DATA_MASK_NL; + switch (control) { + case IXGBE_DELAY_NL: + data_offset++; + DEBUGOUT1("DELAY: %d MS\n", edata); + msec_delay(edata); + break; + case IXGBE_DATA_NL: + DEBUGOUT("DATA: \n"); + data_offset++; + hw->eeprom.ops.read(hw, data_offset++, + &phy_offset); + for (i = 0; i < edata; i++) { + hw->eeprom.ops.read(hw, data_offset, &eword); + hw->phy.ops.write_reg(hw, phy_offset, + IXGBE_TWINAX_DEV, eword); + DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword, + phy_offset); + data_offset++; + phy_offset++; + } + break; + case IXGBE_CONTROL_NL: + data_offset++; + DEBUGOUT("CONTROL: \n"); + if (edata == IXGBE_CONTROL_EOL_NL) { + DEBUGOUT("EOL\n"); + end_data = TRUE; + } else if (edata == IXGBE_CONTROL_SOL_NL) { + DEBUGOUT("SOL\n"); + } else { + DEBUGOUT("Bad control value\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + break; + default: + DEBUGOUT("Bad control type\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + } + +out: + return ret_val; +} + +/** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure + * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; + u8 identifier = 0; + u8 comp_codes_1g = 0; + u8 comp_codes_10g = 0; + u8 oui_bytes[3] = {0, 0, 0}; + u8 cable_tech = 0; + u8 cable_spec = 0; + u16 enforce_sfp = 0; + + DEBUGFUNC("ixgbe_identify_sfp_module_generic"); + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_IDENTIFIER, + &identifier); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + /* ID Module + * ========= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CORE0 - 82599-specific + * 4 SFP_DA_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + * 7 SFP_act_lmt_DA_CORE0 - 82599-specific + * 8 SFP_act_lmt_DA_CORE1 - 82599-specific + * 9 SFP_1g_cu_CORE0 - 82599-specific + * 10 SFP_1g_cu_CORE1 - 82599-specific + */ + if (hw->mac.type == ixgbe_mac_82598EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.sfp_type = ixgbe_sfp_type_da_cu; + else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_sr; + else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) + hw->phy.sfp_type = ixgbe_sfp_type_lr; + else + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } else if (hw->mac.type == ixgbe_mac_82599EB) { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_cu_core1; + } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) { + hw->phy.ops.read_i2c_eeprom( + hw, IXGBE_SFF_CABLE_SPEC_COMP, + &cable_spec); + if (cable_spec & + IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_da_act_lmt_core1; + } else { + hw->phy.sfp_type = + ixgbe_sfp_type_unknown; + } + } else if (comp_codes_10g & + (IXGBE_SFF_10GBASESR_CAPABLE | + IXGBE_SFF_10GBASELR_CAPABLE)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_srlr_core1; + } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_cu_core1; + } else { + hw->phy.sfp_type = ixgbe_sfp_type_unknown; + } + } + + if (hw->phy.sfp_type != stored_sfp_type) + hw->phy.sfp_setup_needed = TRUE; + + /* Determine if the SFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = FALSE; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || + ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && + (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = TRUE; + + /* Determine PHY vendor */ + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = identifier; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + + if (status == IXGBE_ERR_SWFW_SYNC || + status == IXGBE_ERR_I2C || + status == IXGBE_ERR_SFP_NOT_PRESENT) + goto err_read_i2c_eeprom; + + vendor_oui = + ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | + (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | + (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + switch (vendor_oui) { + case IXGBE_SFF_VENDOR_OUI_TYCO: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_tyco; + break; + case IXGBE_SFF_VENDOR_OUI_FTL: + if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = ixgbe_phy_sfp_ftl_active; + else + hw->phy.type = ixgbe_phy_sfp_ftl; + break; + case IXGBE_SFF_VENDOR_OUI_AVAGO: + hw->phy.type = ixgbe_phy_sfp_avago; + break; + case IXGBE_SFF_VENDOR_OUI_INTEL: + hw->phy.type = ixgbe_phy_sfp_intel; + break; + default: + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_passive_unknown; + else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) + hw->phy.type = + ixgbe_phy_sfp_active_unknown; + else + hw->phy.type = ixgbe_phy_sfp_unknown; + break; + } + } + + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) { + status = IXGBE_SUCCESS; + goto out; + } + + /* Verify supported 1G SFP modules */ + if (comp_codes_10g == 0 && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) { + status = IXGBE_SUCCESS; + goto out; + } + + ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || + (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) { + status = IXGBE_SUCCESS; + } else { + DEBUGOUT("SFP+ module not supported\n"); + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } else { + status = IXGBE_SUCCESS; + } + } + +out: + return status; + +err_read_i2c_eeprom: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + if (hw->phy.type != ixgbe_phy_nl) { + hw->phy.id = 0; + hw->phy.type = ixgbe_phy_unknown; + } + return IXGBE_ERR_SFP_NOT_PRESENT; +} + +/** + * ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence + * @hw: pointer to hardware structure + * @list_offset: offset to the SFP ID list + * @data_offset: offset to the SFP data block + * + * Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if + * so it returns the offsets to the phy init sequence block. + **/ +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset) +{ + u16 sfp_id; + u16 sfp_type = hw->phy.sfp_type; + + DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets"); + + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) + return IXGBE_ERR_SFP_NOT_PRESENT; + + if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) && + (hw->phy.sfp_type == ixgbe_sfp_type_da_cu)) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + + /* + * Limiting active cables and 1G Phys must be initialized as + * SR modules + */ + if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || + sfp_type == ixgbe_sfp_type_1g_cu_core0) + sfp_type = ixgbe_sfp_type_srlr_core0; + else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || + sfp_type == ixgbe_sfp_type_1g_cu_core1) + sfp_type = ixgbe_sfp_type_srlr_core1; + + /* Read offset to PHY init contents */ + hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset); + + if ((!*list_offset) || (*list_offset == 0xFFFF)) + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + + /* Shift offset to first ID word */ + (*list_offset)++; + + /* + * Find the matching SFP ID in the EEPROM + * and program the init sequence + */ + hw->eeprom.ops.read(hw, *list_offset, &sfp_id); + + while (sfp_id != IXGBE_PHY_INIT_END_NL) { + if (sfp_id == sfp_type) { + (*list_offset)++; + hw->eeprom.ops.read(hw, *list_offset, data_offset); + if ((!*data_offset) || (*data_offset == 0xFFFF)) { + DEBUGOUT("SFP+ module not supported\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + break; + } + } else { + (*list_offset) += 2; + if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id)) + return IXGBE_ERR_PHY; + } + } + + if (sfp_id == IXGBE_PHY_INIT_END_NL) { + DEBUGOUT("No matching SFP+ module found\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to read + * @eeprom_data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) +{ + DEBUGFUNC("ixgbe_read_i2c_eeprom_generic"); + + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface + * @hw: pointer to hardware structure + * @byte_offset: EEPROM byte offset to write + * @eeprom_data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface. + **/ +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) +{ + DEBUGFUNC("ixgbe_write_i2c_eeprom_generic"); + + return hw->phy.ops.write_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +} + +/** + * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = IXGBE_SUCCESS; + u32 max_retry = 10; + u32 retry = 0; + u16 swfw_mask = 0; + bool nack = 1; + *data = 0; + + DEBUGFUNC("ixgbe_read_i2c_byte_generic"); + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != IXGBE_SUCCESS) { + status = IXGBE_ERR_SWFW_SYNC; + goto read_byte_out; + } + + ixgbe_i2c_start(hw); + + /* Device Address and write indication */ + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_start(hw); + + /* Device Address and read indication */ + status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_in_i2c_byte(hw, data); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_bit(hw, nack); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + return status; +} + +/** + * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = IXGBE_SUCCESS; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + DEBUGFUNC("ixgbe_write_i2c_byte_generic"); + + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + swfw_mask = IXGBE_GSSR_PHY1_SM; + else + swfw_mask = IXGBE_GSSR_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) { + status = IXGBE_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + ixgbe_i2c_start(hw); + + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, data); + if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); + if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_stop(hw); + break; + +fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + return status; +} + +/** + * ixgbe_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void ixgbe_i2c_start(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + + DEBUGFUNC("ixgbe_i2c_start"); + + /* Start condition must begin with data and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 1); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(IXGBE_I2C_T_SU_STA); + + ixgbe_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(IXGBE_I2C_T_HD_STA); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + +} + +/** + * ixgbe_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void ixgbe_i2c_stop(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + + DEBUGFUNC("ixgbe_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 0); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(IXGBE_I2C_T_SU_STO); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(IXGBE_I2C_T_BUF); +} + +/** + * ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("ixgbe_clock_in_i2c_byte"); + + for (i = 7; i >= 0; i--) { + ixgbe_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) +{ + s32 status = IXGBE_SUCCESS; + s32 i; + u32 i2cctl; + bool bit = 0; + + DEBUGFUNC("ixgbe_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = ixgbe_clock_out_i2c_bit(hw, bit); + + if (status != IXGBE_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + i2cctl |= IXGBE_I2C_DATA_OUT; + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); + IXGBE_WRITE_FLUSH(hw); + + return status; +} + +/** + * ixgbe_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 i = 0; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 timeout = 10; + bool ack = 1; + + DEBUGFUNC("ixgbe_get_i2c_ack"); + + ixgbe_raise_i2c_clk(hw, &i2cctl); + + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + ack = ixgbe_get_i2c_data(&i2cctl); + + usec_delay(1); + if (ack == 0) + break; + } + + if (ack == 1) { + DEBUGOUT("I2C ack was not received.\n"); + status = IXGBE_ERR_I2C; + } + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + + return status; +} + +/** + * ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + + DEBUGFUNC("ixgbe_clock_in_i2c_bit"); + + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + *data = ixgbe_get_i2c_data(&i2cctl); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(IXGBE_I2C_T_LOW); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + + DEBUGFUNC("ixgbe_clock_out_i2c_bit"); + + status = ixgbe_set_i2c_data(hw, &i2cctl, data); + if (status == IXGBE_SUCCESS) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(IXGBE_I2C_T_LOW); + } else { + status = IXGBE_ERR_I2C; + DEBUGOUT1("I2C data was not set to %X\n", data); + } + + return status; +} +/** + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("ixgbe_raise_i2c_clk"); + + *i2cctl |= IXGBE_I2C_CLK_OUT; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + usec_delay(IXGBE_I2C_T_RISE); +} + +/** + * ixgbe_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) +{ + + DEBUGFUNC("ixgbe_lower_i2c_clk"); + + *i2cctl &= ~IXGBE_I2C_CLK_OUT; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(IXGBE_I2C_T_FALL); +} + +/** + * ixgbe_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_set_i2c_data"); + + if (data) + *i2cctl |= IXGBE_I2C_DATA_OUT; + else + *i2cctl &= ~IXGBE_I2C_DATA_OUT; + + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); + + /* Verify data was set correctly */ + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + if (data != ixgbe_get_i2c_data(i2cctl)) { + status = IXGBE_ERR_I2C; + DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * ixgbe_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static bool ixgbe_get_i2c_data(u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("ixgbe_get_i2c_data"); + + if (*i2cctl & IXGBE_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * ixgbe_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) +{ + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); + u32 i; + + DEBUGFUNC("ixgbe_i2c_bus_clear"); + + ixgbe_i2c_start(hw); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(IXGBE_I2C_T_LOW); + } + + ixgbe_i2c_start(hw); + + /* Put the i2c bus back to default state */ + ixgbe_i2c_stop(hw); +} + +/** + * ixgbe_tn_check_overtemp - Checks if an overtemp occured. + * @hw: pointer to hardware structure + * + * Checks if the LASI temp alarm status was triggered due to overtemp + **/ +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u16 phy_data = 0; + + DEBUGFUNC("ixgbe_tn_check_overtemp"); + + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) + goto out; + + /* Check that the LASI temp alarm status was triggered */ + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); + + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) + goto out; + + status = IXGBE_ERR_OVERTEMP; +out: + return status; +} diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h new file mode 100644 index 0000000000..5c5dfa6d6d --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h @@ -0,0 +1,141 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_PHY_H_ +#define _IXGBE_PHY_H_ + +#include "ixgbe_type.h" +#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 + +/* EEPROM byte offsets */ +#define IXGBE_SFF_IDENTIFIER 0x0 +#define IXGBE_SFF_IDENTIFIER_SFP 0x3 +#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 +#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 +#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 +#define IXGBE_SFF_1GBE_COMP_CODES 0x6 +#define IXGBE_SFF_10GBE_COMP_CODES 0x3 +#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C + +/* Bitmasks */ +#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 +#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 +#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 +#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 + +/* Flow control defines */ +#define IXGBE_TAF_SYM_PAUSE 0x400 +#define IXGBE_TAF_ASM_PAUSE 0x800 + +/* Bit-shift macros */ +#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + +/* I2C SDA and SCL timing parameters for standard mode */ +#define IXGBE_I2C_T_HD_STA 4 +#define IXGBE_I2C_T_LOW 5 +#define IXGBE_I2C_T_HIGH 4 +#define IXGBE_I2C_T_SU_STA 5 +#define IXGBE_I2C_T_HD_DATA 5 +#define IXGBE_I2C_T_SU_DATA 1 +#define IXGBE_I2C_T_RISE 1 +#define IXGBE_I2C_T_FALL 1 +#define IXGBE_I2C_T_SU_STO 4 +#define IXGBE_I2C_T_BUF 5 + +#define IXGBE_TN_LASI_STATUS_REG 0x9005 +#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 + +s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); +bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); +enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); +s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); +s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, + bool autoneg_wait_to_complete); +s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); + +/* PHY specific */ +s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up); +s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw); +s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version); +s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); + +s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); +s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset); +s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); +s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); +#endif /* _IXGBE_PHY_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h new file mode 100644 index 0000000000..a3a3c5baa0 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h @@ -0,0 +1,3138 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_TYPE_H_ +#define _IXGBE_TYPE_H_ + +#include "ixgbe_osdep.h" + + +/* Vendor ID */ +#define IXGBE_INTEL_VENDOR_ID 0x8086 + +/* Device IDs */ +#define IXGBE_DEV_ID_82598 0x10B6 +#define IXGBE_DEV_ID_82598_BX 0x1508 +#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +#define IXGBE_DEV_ID_82598AT 0x10C8 +#define IXGBE_DEV_ID_82598AT2 0x150B +#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +#define IXGBE_DEV_ID_82599_KX4 0x10F7 +#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +#define IXGBE_DEV_ID_82599_KR 0x1517 +#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +#define IXGBE_DEV_ID_82599_CX4 0x10F9 +#define IXGBE_DEV_ID_82599_SFP 0x10FB +#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A +#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +#define IXGBE_DEV_ID_82599_VF 0x10ED +#define IXGBE_DEV_ID_X540_VF 0x1515 +#define IXGBE_DEV_ID_X540T 0x1528 + +/* General Registers */ +#define IXGBE_CTRL 0x00000 +#define IXGBE_STATUS 0x00008 +#define IXGBE_CTRL_EXT 0x00018 +#define IXGBE_ESDP 0x00020 +#define IXGBE_EODSDP 0x00028 +#define IXGBE_I2CCTL 0x00028 +#define IXGBE_PHY_GPIO 0x00028 +#define IXGBE_MAC_GPIO 0x00030 +#define IXGBE_PHYINT_STATUS0 0x00100 +#define IXGBE_PHYINT_STATUS1 0x00104 +#define IXGBE_PHYINT_STATUS2 0x00108 +#define IXGBE_LEDCTL 0x00200 +#define IXGBE_FRTIMER 0x00048 +#define IXGBE_TCPTIMER 0x0004C +#define IXGBE_CORESPARE 0x00600 +#define IXGBE_EXVET 0x05078 + +/* NVM Registers */ +#define IXGBE_EEC 0x10010 +#define IXGBE_EERD 0x10014 +#define IXGBE_EEWR 0x10018 +#define IXGBE_FLA 0x1001C +#define IXGBE_EEMNGCTL 0x10110 +#define IXGBE_EEMNGDATA 0x10114 +#define IXGBE_FLMNGCTL 0x10118 +#define IXGBE_FLMNGDATA 0x1011C +#define IXGBE_FLMNGCNT 0x10120 +#define IXGBE_FLOP 0x1013C +#define IXGBE_GRC 0x10200 +#define IXGBE_SRAMREL 0x10210 +#define IXGBE_PHYDBG 0x10218 + +/* General Receive Control */ +#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +#define IXGBE_VPDDIAG0 0x10204 +#define IXGBE_VPDDIAG1 0x10208 + +/* I2CCTL Bit Masks */ +#define IXGBE_I2C_CLK_IN 0x00000001 +#define IXGBE_I2C_CLK_OUT 0x00000002 +#define IXGBE_I2C_DATA_IN 0x00000004 +#define IXGBE_I2C_DATA_OUT 0x00000008 +#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 + +/* Interrupt Registers */ +#define IXGBE_EICR 0x00800 +#define IXGBE_EICS 0x00808 +#define IXGBE_EIMS 0x00880 +#define IXGBE_EIMC 0x00888 +#define IXGBE_EIAC 0x00810 +#define IXGBE_EIAM 0x00890 +#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) +/* 82599 EITR is only 12 bits, with the lower 3 always zero */ +/* + * 82598 EITR is 16 bits but set the limits based on the max + * supported by all ixgbe hardware + */ +#define IXGBE_MAX_INT_RATE 488281 +#define IXGBE_MIN_INT_RATE 956 +#define IXGBE_MAX_EITR 0x00000FF8 +#define IXGBE_MIN_EITR 8 +#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ + (0x012300 + (((_i) - 24) * 4))) +#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +#define IXGBE_EITR_LLI_MOD 0x00008000 +#define IXGBE_EITR_CNT_WDIS 0x80000000 +#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +#define IXGBE_EITRSEL 0x00894 +#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +#define IXGBE_GPIE 0x00898 + +/* Flow Control Registers */ +#define IXGBE_FCADBUL 0x03210 +#define IXGBE_FCADBUH 0x03214 +#define IXGBE_FCAMACL 0x04328 +#define IXGBE_FCAMACH 0x0432C +#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_PFCTOP 0x03008 +#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +#define IXGBE_FCRTV 0x032A0 +#define IXGBE_FCCFG 0x03D00 +#define IXGBE_TFCS 0x0CE00 + +/* Receive DMA Registers */ +#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + ((_i - 64) * 0x40))) +#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + ((_i - 64) * 0x40))) +#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + ((_i - 64) * 0x40))) +#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + ((_i - 64) * 0x40))) +#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + ((_i - 64) * 0x40))) +#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ + (0x0D028 + ((_i - 64) * 0x40))) +#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ + (0x0D02C + ((_i - 64) * 0x40))) +#define IXGBE_RSCDBU 0x03028 +#define IXGBE_RDDCC 0x02F20 +#define IXGBE_RXMEMWRAP 0x03190 +#define IXGBE_STARCTRL 0x03024 +/* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ + (0x0D014 + ((_i - 64) * 0x40)))) +/* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + ((_i - 64) * 0x40)))) +#define IXGBE_RDRXCTL 0x02F00 +#define IXGBE_RDRXCTL_RSC_PUSH 0x80 +#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) + /* 8 of these 0x03C00 - 0x03C1C */ +#define IXGBE_RXCTRL 0x03000 +#define IXGBE_DROPEN 0x03D04 +#define IXGBE_RXPBSIZE_SHIFT 10 + +/* Receive Registers */ +#define IXGBE_RXCSUM 0x05000 +#define IXGBE_RFCTL 0x05008 +#define IXGBE_DRECCCTL 0x02F08 +#define IXGBE_DRECCCTL_DISABLE 0 +#define IXGBE_DRECCCTL2 0x02F8C + +/* Multicast Table Array - 128 entries */ +#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x0A200 + ((_i) * 8))) +#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x0A204 + ((_i) * 8))) +#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) +/* Packet split receive type */ +#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ + (0x0EA00 + ((_i) * 4))) +/* array of 4096 1-bit vlan filters */ +#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) +/*array of 4096 4-bit vlan vmdq indices */ +#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +#define IXGBE_FCTRL 0x05080 +#define IXGBE_VLNCTRL 0x05088 +#define IXGBE_MCSTCTRL 0x05090 +#define IXGBE_MRQC 0x05818 +#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +#define IXGBE_RQTC 0x0EC70 +#define IXGBE_MTQC 0x08120 +#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_VT_CTL 0x051B0 +#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ +#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +#define IXGBE_QDE 0x2F04 +#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +#define IXGBE_RXFECCERR0 0x051B8 +#define IXGBE_LLITHRESH 0x0EC90 +#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_IMIRVP 0x05AC0 +#define IXGBE_VMD_CTL 0x0581C +#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ + +/* Flow Director registers */ +#define IXGBE_FDIRCTRL 0x0EE00 +#define IXGBE_FDIRHKEY 0x0EE68 +#define IXGBE_FDIRSKEY 0x0EE6C +#define IXGBE_FDIRDIP4M 0x0EE3C +#define IXGBE_FDIRSIP4M 0x0EE40 +#define IXGBE_FDIRTCPM 0x0EE44 +#define IXGBE_FDIRUDPM 0x0EE48 +#define IXGBE_FDIRIP6M 0x0EE74 +#define IXGBE_FDIRM 0x0EE70 + +/* Flow Director Stats registers */ +#define IXGBE_FDIRFREE 0x0EE38 +#define IXGBE_FDIRLEN 0x0EE4C +#define IXGBE_FDIRUSTAT 0x0EE50 +#define IXGBE_FDIRFSTAT 0x0EE54 +#define IXGBE_FDIRMATCH 0x0EE58 +#define IXGBE_FDIRMISS 0x0EE5C + +/* Flow Director Programming registers */ +#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +#define IXGBE_FDIRIPSA 0x0EE18 +#define IXGBE_FDIRIPDA 0x0EE1C +#define IXGBE_FDIRPORT 0x0EE20 +#define IXGBE_FDIRVLAN 0x0EE24 +#define IXGBE_FDIRHASH 0x0EE28 +#define IXGBE_FDIRCMD 0x0EE2C + +/* Transmit DMA registers */ +#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ +#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +#define IXGBE_DTXCTL 0x07E00 + +#define IXGBE_DMATXCTL 0x04A80 +#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +#define IXGBE_PFDTXGSWC 0x08220 +#define IXGBE_DTXMXSZRQ 0x08100 +#define IXGBE_DTXTCPFLGL 0x04A88 +#define IXGBE_DTXTCPFLGH 0x04A8C +#define IXGBE_LBDRPEN 0x0CA00 +#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ + +#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ + +#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + +/* Anti-spoofing defines */ +#define IXGBE_SPOOF_MACAS_MASK 0xFF +#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +#define IXGBE_SPOOF_VLANAS_SHIFT 8 +#define IXGBE_PFVFSPOOF_REG_COUNT 8 +#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ +/* Tx DCA Control register : 128 of these (0-127) */ +#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +#define IXGBE_TIPG 0x0CB00 +#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_MNGTXMAP 0x0CD10 +#define IXGBE_TIPG_FIBER_DEFAULT 3 +#define IXGBE_TXPBSIZE_SHIFT 10 + +/* Wake up registers */ +#define IXGBE_WUC 0x05800 +#define IXGBE_WUFC 0x05808 +#define IXGBE_WUS 0x05810 +#define IXGBE_IPAV 0x05838 +#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ + +#define IXGBE_WUPL 0x05900 +#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ +#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */ +#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host + * Filter Table */ + +#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 +#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ + +/* Wake Up Filter Control */ +#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ + +#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ +#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ +#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ + +/* Wake Up Status */ +#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +#define IXGBE_WUS_EX IXGBE_WUFC_EX +#define IXGBE_WUS_MC IXGBE_WUFC_MC +#define IXGBE_WUS_BC IXGBE_WUFC_BC +#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS + +/* Wake Up Packet Length */ +#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + +/* DCB registers */ +#define MAX_TRAFFIC_CLASS 8 +#define IXGBE_RMCS 0x03D00 +#define IXGBE_DPMCS 0x07F40 +#define IXGBE_PDPMCS 0x0CD00 +#define IXGBE_RUPPBMR 0x050A0 +#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ + + +/* Security Control Registers */ +#define IXGBE_SECTXCTRL 0x08800 +#define IXGBE_SECTXSTAT 0x08804 +#define IXGBE_SECTXBUFFAF 0x08808 +#define IXGBE_SECTXMINIFG 0x08810 +#define IXGBE_SECRXCTRL 0x08D00 +#define IXGBE_SECRXSTAT 0x08D04 + +/* Security Bit Fields and Masks */ +#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 + +#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 + +#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 + +#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 + +/* LinkSec (MacSec) Registers */ +#define IXGBE_LSECTXCAP 0x08A00 +#define IXGBE_LSECRXCAP 0x08F00 +#define IXGBE_LSECTXCTRL 0x08A04 +#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +#define IXGBE_LSECTXSA 0x08A10 +#define IXGBE_LSECTXPN0 0x08A14 +#define IXGBE_LSECTXPN1 0x08A18 +#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +#define IXGBE_LSECRXCTRL 0x08F04 +#define IXGBE_LSECRXSCL 0x08F08 +#define IXGBE_LSECRXSCH 0x08F0C +#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ + +/* LinkSec (MacSec) Bit Fields and Masks */ +#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +#define IXGBE_LSECRXCAP_SUM_SHIFT 16 + +#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +#define IXGBE_LSECTXCTRL_DISABLE 0x0 +#define IXGBE_LSECTXCTRL_AUTH 0x1 +#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +#define IXGBE_LSECRXCTRL_DISABLE 0x0 +#define IXGBE_LSECRXCTRL_CHECK 0x1 +#define IXGBE_LSECRXCTRL_STRICT 0x2 +#define IXGBE_LSECRXCTRL_DROP 0x3 +#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +#define IXGBE_LSECRXCTRL_RP 0x00000080 +#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* IpSec Registers */ +#define IXGBE_IPSTXIDX 0x08900 +#define IXGBE_IPSTXSALT 0x08904 +#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXIDX 0x08E00 +#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSPI 0x08E14 +#define IXGBE_IPSRXIPIDX 0x08E18 +#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +#define IXGBE_IPSRXSALT 0x08E2C +#define IXGBE_IPSRXMOD 0x08E30 + +#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + +/* DCB registers */ +#define IXGBE_RTRPCS 0x02430 +#define IXGBE_RTTDCS 0x04900 +#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +#define IXGBE_RTTPCS 0x0CD00 +#define IXGBE_RTRUP2TC 0x03020 +#define IXGBE_RTTUP2TC 0x0C800 +#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_RTTDQSEL 0x04904 +#define IXGBE_RTTDT1C 0x04908 +#define IXGBE_RTTDT1S 0x0490C +#define IXGBE_RTTDTECC 0x04990 +#define IXGBE_RTTDTECC_NO_BCN 0x00000100 + +#define IXGBE_RTTBCNRC 0x04984 +#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 +#define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +#define IXGBE_RTTBCNRM 0x04980 + +/* BCN (for DCB) Registers */ +#define IXGBE_RTTBCNRS 0x04988 +#define IXGBE_RTTBCNCR 0x08B00 +#define IXGBE_RTTBCNACH 0x08B04 +#define IXGBE_RTTBCNACL 0x08B08 +#define IXGBE_RTTBCNTG 0x04A90 +#define IXGBE_RTTBCNIDX 0x08B0C +#define IXGBE_RTTBCNCP 0x08B10 +#define IXGBE_RTFRTIMER 0x08B14 +#define IXGBE_RTTBCNRTT 0x05150 +#define IXGBE_RTTBCNRD 0x0498C + +/* FCoE DMA Context Registers */ +#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ +#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) +#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 +/* FCoE SOF/EOF */ +#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Filter Context Registers */ +#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ +/* FCoE Receive Control */ +#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 +/* FCoE Redirection */ +#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ +#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ + +/* Stats registers */ +#define IXGBE_CRCERRS 0x04000 +#define IXGBE_ILLERRC 0x04004 +#define IXGBE_ERRBC 0x04008 +#define IXGBE_MSPDC 0x04010 +#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +#define IXGBE_MLFC 0x04034 +#define IXGBE_MRFC 0x04038 +#define IXGBE_RLEC 0x04040 +#define IXGBE_LXONTXC 0x03F60 +#define IXGBE_LXONRXC 0x0CF60 +#define IXGBE_LXOFFTXC 0x03F68 +#define IXGBE_LXOFFRXC 0x0CF68 +#define IXGBE_LXONRXCNT 0x041A4 +#define IXGBE_LXOFFRXCNT 0x041A8 +#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +#define IXGBE_PRC64 0x0405C +#define IXGBE_PRC127 0x04060 +#define IXGBE_PRC255 0x04064 +#define IXGBE_PRC511 0x04068 +#define IXGBE_PRC1023 0x0406C +#define IXGBE_PRC1522 0x04070 +#define IXGBE_GPRC 0x04074 +#define IXGBE_BPRC 0x04078 +#define IXGBE_MPRC 0x0407C +#define IXGBE_GPTC 0x04080 +#define IXGBE_GORCL 0x04088 +#define IXGBE_GORCH 0x0408C +#define IXGBE_GOTCL 0x04090 +#define IXGBE_GOTCH 0x04094 +#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +#define IXGBE_RUC 0x040A4 +#define IXGBE_RFC 0x040A8 +#define IXGBE_ROC 0x040AC +#define IXGBE_RJC 0x040B0 +#define IXGBE_MNGPRC 0x040B4 +#define IXGBE_MNGPDC 0x040B8 +#define IXGBE_MNGPTC 0x0CF90 +#define IXGBE_TORL 0x040C0 +#define IXGBE_TORH 0x040C4 +#define IXGBE_TPR 0x040D0 +#define IXGBE_TPT 0x040D4 +#define IXGBE_PTC64 0x040D8 +#define IXGBE_PTC127 0x040DC +#define IXGBE_PTC255 0x040E0 +#define IXGBE_PTC511 0x040E4 +#define IXGBE_PTC1023 0x040E8 +#define IXGBE_PTC1522 0x040EC +#define IXGBE_MPTC 0x040F0 +#define IXGBE_BPTC 0x040F4 +#define IXGBE_XEC 0x04120 +#define IXGBE_SSVPC 0x08780 + +#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ + (0x08600 + ((_i) * 4))) +#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) + +#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ +#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ +#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ +#define IXGBE_O2BGPTC 0x041C4 +#define IXGBE_O2BSPC 0x087B0 +#define IXGBE_B2OSPC 0x041C0 +#define IXGBE_B2OGPRC 0x02F90 +#define IXGBE_BUPRC 0x04180 +#define IXGBE_BMPRC 0x04184 +#define IXGBE_BBPRC 0x04188 +#define IXGBE_BUPTC 0x0418C +#define IXGBE_BMPTC 0x04190 +#define IXGBE_BBPTC 0x04194 +#define IXGBE_BCRCERRS 0x04198 +#define IXGBE_BXONRXC 0x0419C +#define IXGBE_BXOFFRXC 0x041E0 +#define IXGBE_BXONTXC 0x041E4 +#define IXGBE_BXOFFTXC 0x041E8 +#define IXGBE_PCRC8ECL 0x0E810 +#define IXGBE_PCRC8ECH 0x0E811 +#define IXGBE_PCRC8ECH_MASK 0x1F +#define IXGBE_LDPCECL 0x0E820 +#define IXGBE_LDPCECH 0x0E821 + +/* Management */ +#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MANC 0x05820 +#define IXGBE_MFVAL 0x05824 +#define IXGBE_MANC2H 0x05860 +#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_MIPAF 0x058B0 +#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +#define IXGBE_LSWFW 0x15014 +#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */ +#define IXGBE_BMCIPVAL 0x05060 +#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001 +#define IXGBE_BMCIP_IPADDR_VALID 0x00000002 + +/* Management Bit Fields and Masks */ +#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Enable BMC2OS and OS2BMC + * traffic */ +#define IXGBE_MANC_EN_BMC2OS_SHIFT 28 + +/* Firmware Semaphore Register */ +#define IXGBE_FWSM_MODE_MASK 0xE + +/* ARC Subsystem registers */ +#define IXGBE_HICR 0x15F00 +#define IXGBE_FWSTS 0x15F0C +#define IXGBE_HSMC0R 0x15F04 +#define IXGBE_HSMC1R 0x15F08 +#define IXGBE_SWSR 0x15F10 +#define IXGBE_HFDR 0x15FE8 +#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ + +#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define IXGBE_HICR_C 0x02 +#define IXGBE_HICR_SV 0x04 /* Status Validity */ +#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +#define IXGBE_HICR_FW_RESET 0x80 + +/* PCI-E registers */ +#define IXGBE_GCR 0x11000 +#define IXGBE_GTV 0x11004 +#define IXGBE_FUNCTAG 0x11008 +#define IXGBE_GLT 0x1100C +#define IXGBE_PCIEPIPEADR 0x11004 +#define IXGBE_PCIEPIPEDAT 0x11008 +#define IXGBE_GSCL_1 0x11010 +#define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_3 0x11018 +#define IXGBE_GSCL_4 0x1101C +#define IXGBE_GSCN_0 0x11020 +#define IXGBE_GSCN_1 0x11024 +#define IXGBE_GSCN_2 0x11028 +#define IXGBE_GSCN_3 0x1102C +#define IXGBE_FACTPS 0x10150 +#define IXGBE_PCIEANACTL 0x11040 +#define IXGBE_SWSM 0x10140 +#define IXGBE_FWSM 0x10148 +#define IXGBE_GSSR 0x10160 +#define IXGBE_MREVID 0x11064 +#define IXGBE_DCA_ID 0x11070 +#define IXGBE_DCA_CTRL 0x11074 +#define IXGBE_SWFW_SYNC IXGBE_GSSR + +/* PCI-E registers 82599-Specific */ +#define IXGBE_GCR_EXT 0x11050 +#define IXGBE_GSCL_5_82599 0x11030 +#define IXGBE_GSCL_6_82599 0x11034 +#define IXGBE_GSCL_7_82599 0x11038 +#define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_PHYADR_82599 0x11040 +#define IXGBE_PHYDAT_82599 0x11044 +#define IXGBE_PHYCTL_82599 0x11048 +#define IXGBE_PBACLR_82599 0x11068 +#define IXGBE_CIAA_82599 0x11088 +#define IXGBE_CIAD_82599 0x1108C +#define IXGBE_PICAUSE 0x110B0 +#define IXGBE_PIENA 0x110B8 +#define IXGBE_CDQ_MBR_82599 0x110B4 +#define IXGBE_PCIESPARE 0x110BC +#define IXGBE_MISC_REG_82599 0x110F0 +#define IXGBE_ECC_CTRL_0_82599 0x11100 +#define IXGBE_ECC_CTRL_1_82599 0x11104 +#define IXGBE_ECC_STATUS_82599 0x110E0 +#define IXGBE_BAR_CTRL_82599 0x110F4 + +/* PCI Express Control */ +#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define IXGBE_GCR_CAP_VER2 0x00040000 + +#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ + IXGBE_GCR_EXT_VT_MODE_64) +/* Time Sync Registers */ +#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ + +/* Diagnostic Registers */ +#define IXGBE_RDSTATCTL 0x02C20 +#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +#define IXGBE_RDHMPN 0x02F08 +#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +#define IXGBE_RDPROBE 0x02F20 +#define IXGBE_RDMAM 0x02F30 +#define IXGBE_RDMAD 0x02F34 +#define IXGBE_TDSTATCTL 0x07C20 +#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +#define IXGBE_TDHMPN 0x07F08 +#define IXGBE_TDHMPN2 0x082FC +#define IXGBE_TXDESCIC 0x082CC +#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +#define IXGBE_TDPROBE 0x07F20 +#define IXGBE_TXBUFCTRL 0x0C600 +#define IXGBE_TXBUFDATA0 0x0C610 +#define IXGBE_TXBUFDATA1 0x0C614 +#define IXGBE_TXBUFDATA2 0x0C618 +#define IXGBE_TXBUFDATA3 0x0C61C +#define IXGBE_RXBUFCTRL 0x03600 +#define IXGBE_RXBUFDATA0 0x03610 +#define IXGBE_RXBUFDATA1 0x03614 +#define IXGBE_RXBUFDATA2 0x03618 +#define IXGBE_RXBUFDATA3 0x0361C +#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +#define IXGBE_RFVAL 0x050A4 +#define IXGBE_MDFTC1 0x042B8 +#define IXGBE_MDFTC2 0x042C0 +#define IXGBE_MDFTFIFO1 0x042C4 +#define IXGBE_MDFTFIFO2 0x042C8 +#define IXGBE_MDFTS 0x042CC +#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +#define IXGBE_PCIEECCCTL 0x1106C +#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +#define IXGBE_PCIEECCCTL0 0x11100 +#define IXGBE_PCIEECCCTL1 0x11104 +#define IXGBE_RXDBUECC 0x03F70 +#define IXGBE_TXDBUECC 0x0CF70 +#define IXGBE_RXDBUEST 0x03F74 +#define IXGBE_TXDBUEST 0x0CF74 +#define IXGBE_PBTXECC 0x0C300 +#define IXGBE_PBRXECC 0x03300 +#define IXGBE_GHECCR 0x110B0 + +/* MAC Registers */ +#define IXGBE_PCS1GCFIG 0x04200 +#define IXGBE_PCS1GLCTL 0x04208 +#define IXGBE_PCS1GLSTA 0x0420C +#define IXGBE_PCS1GDBG0 0x04210 +#define IXGBE_PCS1GDBG1 0x04214 +#define IXGBE_PCS1GANA 0x04218 +#define IXGBE_PCS1GANLP 0x0421C +#define IXGBE_PCS1GANNP 0x04220 +#define IXGBE_PCS1GANLPNP 0x04224 +#define IXGBE_HLREG0 0x04240 +#define IXGBE_HLREG1 0x04244 +#define IXGBE_PAP 0x04248 +#define IXGBE_MACA 0x0424C +#define IXGBE_APAE 0x04250 +#define IXGBE_ARD 0x04254 +#define IXGBE_AIS 0x04258 +#define IXGBE_MSCA 0x0425C +#define IXGBE_MSRWD 0x04260 +#define IXGBE_MLADD 0x04264 +#define IXGBE_MHADD 0x04268 +#define IXGBE_MAXFRS 0x04268 +#define IXGBE_TREG 0x0426C +#define IXGBE_PCSS1 0x04288 +#define IXGBE_PCSS2 0x0428C +#define IXGBE_XPCSS 0x04290 +#define IXGBE_MFLCN 0x04294 +#define IXGBE_SERDESC 0x04298 +#define IXGBE_MACS 0x0429C +#define IXGBE_AUTOC 0x042A0 +#define IXGBE_LINKS 0x042A4 +#define IXGBE_LINKS2 0x04324 +#define IXGBE_AUTOC2 0x042A8 +#define IXGBE_AUTOC3 0x042AC +#define IXGBE_ANLP1 0x042B0 +#define IXGBE_ANLP2 0x042B4 +#define IXGBE_MACC 0x04330 +#define IXGBE_ATLASCTL 0x04800 +#define IXGBE_MMNGC 0x042D0 +#define IXGBE_ANLPNP1 0x042D4 +#define IXGBE_ANLPNP2 0x042D8 +#define IXGBE_KRPCSFC 0x042E0 +#define IXGBE_KRPCSS 0x042E4 +#define IXGBE_FECS1 0x042E8 +#define IXGBE_FECS2 0x042EC +#define IXGBE_SMADARCTL 0x14F10 +#define IXGBE_MPVC 0x04318 +#define IXGBE_SGMIIC 0x04314 + +/* Statistics Registers */ +#define IXGBE_RXNFGPC 0x041B0 +#define IXGBE_RXNFGBCL 0x041B4 +#define IXGBE_RXNFGBCH 0x041B8 +#define IXGBE_RXDGPC 0x02F50 +#define IXGBE_RXDGBCL 0x02F54 +#define IXGBE_RXDGBCH 0x02F58 +#define IXGBE_RXDDGPC 0x02F5C +#define IXGBE_RXDDGBCL 0x02F60 +#define IXGBE_RXDDGBCH 0x02F64 +#define IXGBE_RXLPBKGPC 0x02F68 +#define IXGBE_RXLPBKGBCL 0x02F6C +#define IXGBE_RXLPBKGBCH 0x02F70 +#define IXGBE_RXDLPBKGPC 0x02F74 +#define IXGBE_RXDLPBKGBCL 0x02F78 +#define IXGBE_RXDLPBKGBCH 0x02F7C +#define IXGBE_TXDGPC 0x087A0 +#define IXGBE_TXDGBCL 0x087A4 +#define IXGBE_TXDGBCH 0x087A8 + +#define IXGBE_RXDSTATCTRL 0x02F40 + +/* Copper Pond 2 link timeout */ +#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + +/* Omer CORECTL */ +#define IXGBE_CORECTL 0x014F00 +/* BARCTRL */ +#define IXGBE_BARCTRL 0x110F4 +#define IXGBE_BARCTRL_FLSIZE 0x0700 +#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +#define IXGBE_BARCTRL_CSRSIZE 0x2000 + +/* RSCCTL Bit Masks */ +#define IXGBE_RSCCTL_RSCEN 0x01 +#define IXGBE_RSCCTL_MAXDESC_1 0x00 +#define IXGBE_RSCCTL_MAXDESC_4 0x04 +#define IXGBE_RSCCTL_MAXDESC_8 0x08 +#define IXGBE_RSCCTL_MAXDESC_16 0x0C + +/* RSCDBU Bit Masks */ +#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 + +/* RDRXCTL Bit Masks */ +#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ +#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +#define IXGBE_RDRXCTL_MVMEN 0x00000020 +#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ +#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ +#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ + +/* RQTC Bit Masks and Shifts */ +#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +#define IXGBE_RQTC_TC7_MASK (0x7 << 28) + +/* PSRTYPE.RQPL Bit masks and shift */ +#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +#define IXGBE_PSRTYPE_RQPL_SHIFT 29 + +/* CTRL Bit Masks */ +#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) + +/* FACTPS */ +#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + +/* MHADD Bit Masks */ +#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +#define IXGBE_MHADD_MFS_SHIFT 16 + +/* Extended Device Control */ +#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + +/* Direct Cache Access (DCA) definitions */ +#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */ +#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */ + +#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + +/* MSCA Bit Masks */ +#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ +#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ +#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ +#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ +#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ +#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ +#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ +#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ +#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ + +/* MSRWD bit masks */ +#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + +/* Atlas registers */ +#define IXGBE_ATLAS_PDN_LPBK 0x24 +#define IXGBE_ATLAS_PDN_10G 0xB +#define IXGBE_ATLAS_PDN_1G 0xC +#define IXGBE_ATLAS_PDN_AN 0xD + +/* Atlas bit masks */ +#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + +/* Omer bit masks */ +#define IXGBE_CORECTL_WRITE_CMD 0x00010000 + +/* Device Type definitions for new protocol MDIO commands */ +#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 +#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 +#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 +#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ +#define IXGBE_TWINAX_DEV 1 + +#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ + +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 + +#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ +#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ +#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ +#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ +#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ +#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ +#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ +#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ +#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ +#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ +#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ +#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ +#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ +#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ +#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ + +#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ + +/* MII clause 22/28 definitions */ +#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 + +#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ +#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ +#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ +#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ +#define IXGBE_MII_RESTART 0x200 +#define IXGBE_MII_AUTONEG_COMPLETE 0x20 +#define IXGBE_MII_AUTONEG_LINK_UP 0x04 +#define IXGBE_MII_AUTONEG_REG 0x0 + +#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +#define IXGBE_MAX_PHY_ADDR 32 + +/* PHY IDs*/ +#define TN1010_PHY_ID 0x00A19410 +#define TNX_FW_REV 0xB +#define X540_PHY_ID 0x01540200 +#define AQ_FW_REV 0x20 +#define QT2022_PHY_ID 0x0043A400 +#define ATH_PHY_ID 0x03429050 + +/* PHY Types */ +#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 + +/* Special PHY Init Routine */ +#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +#define IXGBE_PHY_INIT_END_NL 0xFFFF +#define IXGBE_CONTROL_MASK_NL 0xF000 +#define IXGBE_DATA_MASK_NL 0x0FFF +#define IXGBE_CONTROL_SHIFT_NL 12 +#define IXGBE_DELAY_NL 0 +#define IXGBE_DATA_NL 1 +#define IXGBE_CONTROL_NL 0x000F +#define IXGBE_CONTROL_EOL_NL 0x0FFF +#define IXGBE_CONTROL_SOL_NL 0x0000 + +/* General purpose Interrupt Enable */ +#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +#define IXGBE_GPIE_EIAME 0x40000000 +#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ + +/* Packet Buffer Initialization */ +#define IXGBE_MAX_PACKET_BUFFERS 8 + +#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ +#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ + +#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ +#define IXGBE_MAX_PB 8 + +/* Packet buffer allocation strategies */ +enum { + PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ +#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL + PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ +#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED +}; + +/* Transmit Flow Control status */ +#define IXGBE_TFCS_TXOFF 0x00000001 +#define IXGBE_TFCS_TXOFF0 0x00000100 +#define IXGBE_TFCS_TXOFF1 0x00000200 +#define IXGBE_TFCS_TXOFF2 0x00000400 +#define IXGBE_TFCS_TXOFF3 0x00000800 +#define IXGBE_TFCS_TXOFF4 0x00001000 +#define IXGBE_TFCS_TXOFF5 0x00002000 +#define IXGBE_TFCS_TXOFF6 0x00004000 +#define IXGBE_TFCS_TXOFF7 0x00008000 + +/* TCP Timer */ +#define IXGBE_TCPTIMER_KS 0x00000100 +#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +#define IXGBE_TCPTIMER_LOOP 0x00000800 +#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + +/* HLREG0 Bit Masks */ +#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + +/* VMD_CTL bitmasks */ +#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + +/* VT_CTL bitmasks */ +#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +#define IXGBE_VT_CTL_POOL_SHIFT 7 +#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + +/* VMOLR bitmasks */ +#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ + +/* VFRE bitmask */ +#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +/* RDHMPN and TDHMPN bitmasks */ +#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 + +#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +#define IXGBE_RDMAM_DWORD_SHIFT 9 +#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 +#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +#define IXGBE_RDMAM_WB_COLL_FIFO 5 +#define IXGBE_RDMAM_QSC_CNT_RAM 6 +#define IXGBE_RDMAM_QSC_FCOE_RAM 7 +#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +#define IXGBE_RDMAM_QSC_RSC_RAM 0xB +#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 +#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 +#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 +#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 +#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 +#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 + +#define IXGBE_TXDESCIC_READY 0x80000000 + +/* Receive Checksum Control */ +#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* FCRTL Bit Masks */ +#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ + +/* PAP bit masks*/ +#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + +/* RMCS Bit Masks */ +#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ +/* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +#define IXGBE_RMCS_RAC 0x00000004 +#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ +#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + +/* FCCFG Bit Masks */ +#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ + +/* Interrupt register bitmasks */ + +/* Extended Interrupt Cause Read */ +#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + +/* Extended Interrupt Cause Set */ +#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Set */ +#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ +#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +/* Extended Interrupt Mask Clear */ +#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + +#define IXGBE_EIMS_ENABLE_MASK ( \ + IXGBE_EIMS_RTX_QUEUE | \ + IXGBE_EIMS_LSC | \ + IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ +#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ + +#define IXGBE_MAX_FTQF_FILTERS 128 +#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +#define IXGBE_FTQF_PROTOCOL_SCTP 2 +#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +#define IXGBE_FTQF_PRIORITY_SHIFT 2 +#define IXGBE_FTQF_POOL_MASK 0x0000003F +#define IXGBE_FTQF_POOL_SHIFT 8 +#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 + +/* Interrupt clear mask */ +#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + +/* Interrupt Vector Allocation Registers */ +#define IXGBE_IVAR_REG_NUM 25 +#define IXGBE_IVAR_REG_NUM_82599 64 +#define IXGBE_IVAR_TXRX_ENTRY 96 +#define IXGBE_IVAR_RX_ENTRY 64 +#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +#define IXGBE_IVAR_TX_ENTRY 32 + +#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + +/* ETYPE Queue Filter/Select Bit Masks */ +#define IXGBE_MAX_ETQF_FILTERS 8 +#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ + +#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ + +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + * FCoE (0x8906): Filter 2 + * 1588 (0x88f7): Filter 3 + * FIP (0x8914): Filter 4 + */ +#define IXGBE_ETQF_FILTER_EAPOL 0 +#define IXGBE_ETQF_FILTER_FCOE 2 +#define IXGBE_ETQF_FILTER_1588 3 +#define IXGBE_ETQF_FILTER_FIP 4 +/* VLAN Control Bit Masks */ +#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + +/* VLAN pool filtering masks */ +#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +#define IXGBE_VLVF_ENTRIES 64 +#define IXGBE_VLVF_VLANID_MASK 0x00000FFF +/* Per VF Port VLAN insertion rules */ +#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + +/* STATUS Bit Masks */ +#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ + +#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + +/* ESDP Bit Masks */ +#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ +#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ + +/* LEDCTL Bit Masks */ +#define IXGBE_LED_IVRT_BASE 0x00000040 +#define IXGBE_LED_BLINK_BASE 0x00000080 +#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) +#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) + +/* LED modes */ +#define IXGBE_LED_LINK_UP 0x0 +#define IXGBE_LED_LINK_10G 0x1 +#define IXGBE_LED_MAC 0x2 +#define IXGBE_LED_FILTER 0x3 +#define IXGBE_LED_LINK_ACTIVE 0x4 +#define IXGBE_LED_LINK_1G 0x5 +#define IXGBE_LED_ON 0xE +#define IXGBE_LED_OFF 0xF + +/* AUTOC Bit Masks */ +#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +#define IXGBE_AUTOC_KX_SUPP 0x40000000 +#define IXGBE_AUTOC_PAUSE 0x30000000 +#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +#define IXGBE_AUTOC_RF 0x08000000 +#define IXGBE_AUTOC_PD_TMR 0x06000000 +#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +#define IXGBE_AUTOC_FECA 0x00040000 +#define IXGBE_AUTOC_FECR 0x00020000 +#define IXGBE_AUTOC_KR_SUPP 0x00010000 +#define IXGBE_AUTOC_AN_RESTART 0x00001000 +#define IXGBE_AUTOC_FLU 0x00000001 +#define IXGBE_AUTOC_LMS_SHIFT 13 +#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) + +#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) + +#define IXGBE_MACC_FLU 0x00000001 +#define IXGBE_MACC_FSV_10G 0x00030000 +#define IXGBE_MACC_FS 0x00040000 +#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + +/* LINKS Bit Masks */ +#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +#define IXGBE_LINKS_UP 0x40000000 +#define IXGBE_LINKS_SPEED 0x20000000 +#define IXGBE_LINKS_MODE 0x18000000 +#define IXGBE_LINKS_RX_MODE 0x06000000 +#define IXGBE_LINKS_TX_MODE 0x01800000 +#define IXGBE_LINKS_XGXS_EN 0x00400000 +#define IXGBE_LINKS_SGMII_EN 0x02000000 +#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +#define IXGBE_LINKS_1G_AN_EN 0x00100000 +#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +#define IXGBE_LINKS_1G_SYNC 0x00040000 +#define IXGBE_LINKS_10G_ALIGN 0x00020000 +#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +#define IXGBE_LINKS_TL_FAULT 0x00001000 +#define IXGBE_LINKS_SIGNAL 0x00000F00 + +#define IXGBE_LINKS_SPEED_82599 0x30000000 +#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ + +#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + +/* PCS1GLSTA Bit Masks */ +#define IXGBE_PCS1GLSTA_LINK_OK 1 +#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 + +/* PCS1GLCTL Bit Masks */ +#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 + +/* ANLP1 Bit Masks */ +#define IXGBE_ANLP1_PAUSE 0x0C00 +#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + +/* SW Semaphore Register bitmasks */ +#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + +/* SW_FW_SYNC/GSSR definitions */ +#define IXGBE_GSSR_EEP_SM 0x0001 +#define IXGBE_GSSR_PHY0_SM 0x0002 +#define IXGBE_GSSR_PHY1_SM 0x0004 +#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +#define IXGBE_GSSR_FLASH_SM 0x0010 +#define IXGBE_GSSR_SW_MNG_SM 0x0400 + +/* FW Status register bitmask */ +#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ + +/* EEC Register */ +#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define IXGBE_EEC_FWE_SHIFT 4 +#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ +/* EEPROM Addressing bits based on type (0-small, 1-large) */ +#define IXGBE_EEC_ADDR_SIZE 0x00000400 +#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +#define IXGBE_EEC_SIZE_SHIFT 11 +#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +#define IXGBE_EEPROM_OPCODE_BITS 8 + +/* Part Number String Length */ +#define IXGBE_PBANUM_LENGTH 11 + +/* Checksum and EEPROM pointers */ +#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +#define IXGBE_EEPROM_CHECKSUM 0x3F +#define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_PCIE_ANALOG_PTR 0x03 +#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +#define IXGBE_PHY_PTR 0x04 +#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +#define IXGBE_OPTION_ROM_PTR 0x05 +#define IXGBE_PCIE_GENERAL_PTR 0x06 +#define IXGBE_PCIE_CONFIG0_PTR 0x07 +#define IXGBE_PCIE_CONFIG1_PTR 0x08 +#define IXGBE_CORE0_PTR 0x09 +#define IXGBE_CORE1_PTR 0x0A +#define IXGBE_MAC0_PTR 0x0B +#define IXGBE_MAC1_PTR 0x0C +#define IXGBE_CSR0_CONFIG_PTR 0x0D +#define IXGBE_CSR1_CONFIG_PTR 0x0E +#define IXGBE_FW_PTR 0x0F +#define IXGBE_PBANUM0_PTR 0x15 +#define IXGBE_PBANUM1_PTR 0x16 +#define IXGBE_FREE_SPACE_PTR 0X3E +#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +#define IXGBE_DEVICE_CAPS 0x2C +#define IXGBE_DEVICE_CAPS_EXT_THERMAL_SENSOR 0x10 +#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 +#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 + +/* MSI-X capability fields masks */ +#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + +/* Legacy EEPROM word offsets */ +#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + +/* EEPROM Commands - SPI */ +#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ +/* EEPROM reset Write Enable latch */ +#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Read Register */ +#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ + +#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 + +#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ +#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ + +#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif + +/* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +#define IXGBE_EERD_EEWR_ATTEMPTS 100000 + +/* # attempts we wait for flush update to complete */ +#define IXGBE_FLUDONE_ATTEMPTS 20000 + +#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ + +#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +#define IXGBE_FW_LESM_STATE_1 0x1 +#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +#define IXGBE_FW_PATCH_VERSION_4 0x7 +#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ +#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ + +#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + +/* PCI Bus Info */ +#define IXGBE_PCI_DEVICE_STATUS 0xAA +#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +#define IXGBE_PCI_LINK_STATUS 0xB2 +#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +#define IXGBE_PCI_LINK_WIDTH 0x3F0 +#define IXGBE_PCI_LINK_WIDTH_1 0x10 +#define IXGBE_PCI_LINK_WIDTH_2 0x20 +#define IXGBE_PCI_LINK_WIDTH_4 0x40 +#define IXGBE_PCI_LINK_WIDTH_8 0x80 +#define IXGBE_PCI_LINK_SPEED 0xF +#define IXGBE_PCI_LINK_SPEED_2500 0x1 +#define IXGBE_PCI_LINK_SPEED_5000 0x2 +#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + +/* Check whether address is multicast. This is little-endian specific check.*/ +#define IXGBE_IS_MULTICAST(Address) \ + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) + +/* Check whether an address is broadcast. */ +#define IXGBE_IS_BROADCAST(Address) \ + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ + (((u8 *)(Address))[1] == ((u8)0xff))) + +/* RAH */ +#define IXGBE_RAH_VIND_MASK 0x003C0000 +#define IXGBE_RAH_VIND_SHIFT 18 +#define IXGBE_RAH_AV 0x80000000 +#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF + +/* Header split receive */ +#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 +#define IXGBE_RFCTL_RSC_DIS 0x00000010 +#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +#define IXGBE_RFCTL_NFS_VER_2 0 +#define IXGBE_RFCTL_NFS_VER_3 1 +#define IXGBE_RFCTL_NFS_VER_4 2 +#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Transmit Config masks */ +#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ +/* Enable short packet padding to 64 bytes */ +#define IXGBE_TX_PAD_ENABLE 0x00000400 +#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ +/* This allows for 16K packets + 4k for vlan */ +#define IXGBE_MAX_FRAME_SZ 0x40040000 + +#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ + +/* Receive Config masks */ +#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ +#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ + +#define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ + +#define IXGBE_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ + +#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF +#define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 +#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG 0x01 +#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG 0x02 +#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 +#define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 + +#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00 +#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 + +#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ +/* Receive Priority Flow Control Enable */ +#define IXGBE_FCTRL_RPFCE 0x00004000 +#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */ + +/* Multiple Receive Queue Control */ +#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 + +/* Queue Drop Enable */ +#define IXGBE_QDE_ENABLE 0x00000001 +#define IXGBE_QDE_IDX_MASK 0x00007F00 +#define IXGBE_QDE_IDX_SHIFT 8 + +#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +/* Multiple Transmit Queue Command Register */ +#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ +#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ + +/* Receive Descriptor bit definitions */ +#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ +#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ +#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define IXGBE_RXD_PRI_SHIFT 13 +#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define IXGBE_RXD_CFI_SHIFT 12 + +#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ + +/* PSRTYPE bit definitions */ +#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +#define IXGBE_PSRTYPE_L2HDR 0x00001000 + +/* SRRCTL bit definitions */ +#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +#define IXGBE_SRRCTL_DROP_EN 0x10000000 +#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +#define IXGBE_RXDADV_SPH 0x8000 + +/* RSS Hash results */ +#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 +#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor. */ +#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* Security Processing bit Indication */ +#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +/* Masks to determine if packets should be dropped due to frame errors */ +#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXD_ERR_CE | \ + IXGBE_RXD_ERR_LE | \ + IXGBE_RXD_ERR_PE | \ + IXGBE_RXD_ERR_OSE | \ + IXGBE_RXD_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ + IXGBE_RXDADV_ERR_CE | \ + IXGBE_RXDADV_ERR_LE | \ + IXGBE_RXDADV_ERR_PE | \ + IXGBE_RXDADV_ERR_OSE | \ + IXGBE_RXDADV_ERR_USE) + +#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE + +/* Multicast bit mask */ +#define IXGBE_MCSTCTRL_MFE 0x4 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + +/* Vlan-specific macros */ +#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + +/* SR-IOV specific macros */ +#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) +#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4)) +#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) +#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4)) + +/* Little Endian defines */ +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 + +#endif +#ifndef __be16 +/* Big Endian defines */ +#define __be16 u16 +#define __be32 u32 +#define __be64 u64 + +#endif +enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, +}; + +/* Flow Director register values */ +#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 + +#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +#define IXGBE_FDIRM_VLANID 0x00000001 +#define IXGBE_FDIRM_VLANP 0x00000002 +#define IXGBE_FDIRM_POOL 0x00000004 +#define IXGBE_FDIRM_L4P 0x00000008 +#define IXGBE_FDIRM_FLEX 0x00000010 +#define IXGBE_FDIRM_DIPv6 0x00000020 + +#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +#define IXGBE_FDIRFREE_FREE_SHIFT 0 +#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +#define IXGBE_FDIRFREE_COLL_SHIFT 16 +#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 + +#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +#define IXGBE_FDIRCMD_IPV6 0x00000080 +#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +#define IXGBE_FDIRCMD_DROP 0x00000200 +#define IXGBE_FDIRCMD_INT 0x00000400 +#define IXGBE_FDIRCMD_LAST 0x00000800 +#define IXGBE_FDIRCMD_COLLISION 0x00001000 +#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +#define IXGBE_FDIR_INIT_DONE_POLL 10 +#define IXGBE_FDIRCMD_CMD_POLL 10 + +#define IXGBE_FDIR_DROP_QUEUE 127 + + +/* Manageablility Host Interface defines */ +#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ + +/* CEM Support */ +#define FW_CEM_HDR_LEN 0x4 +#define FW_CEM_CMD_DRIVER_INFO 0xDD +#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +#define FW_CEM_CMD_RESERVED 0X0 +#define FW_CEM_UNUSED_VER 0x0 +#define FW_CEM_MAX_RETRIES 3 +#define FW_CEM_RESP_STATUS_SUCCESS 0x1 + +/* Host Interface Command Structures */ + +struct ixgbe_hic_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +struct ixgbe_hic_drv_info { + struct ixgbe_hic_hdr hdr; + u8 port_num; + u8 ver_sub; + u8 ver_build; + u8 ver_min; + u8 ver_maj; + u8 pad; /* end spacing to ensure length is mult. of dword */ + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; + +/* Transmit Descriptor - Legacy */ +struct ixgbe_legacy_tx_desc { + u64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 vlan; + } fields; + } upper; +}; + +/* Transmit Descriptor - Advanced */ +union ixgbe_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Receive Descriptor - Legacy */ +struct ixgbe_legacy_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 vlan; +}; + +/* Receive Descriptor - Advanced */ +union ixgbe_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /* RSS, Pkt type */ + __le16 hdr_info; /* Splithdr, hdrlen */ + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +/* Context descriptors */ +struct ixgbe_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */ +#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ + IXGBE_ADVTXD_POPTS_SHIFT) +#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ +#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ +#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ +#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ +#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +/* Autonegotiation advertised speeds */ +typedef u32 ixgbe_autoneg_advertised; +/* Link speed */ +typedef u32 ixgbe_link_speed; +#define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_100_FULL 0x0008 +#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) +#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ + IXGBE_LINK_SPEED_1GB_FULL | \ + IXGBE_LINK_SPEED_10GB_FULL) + + +/* Physical layer type */ +typedef u32 ixgbe_physical_layer; +#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 +#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 +#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 +#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 +#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 +#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 +#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 + +/* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + +/* BitTimes (BT) conversion */ +#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024)) +#define IXGBE_B2BT(BT) (BT * 8) + +/* Calculate Delay to respond to PFC */ +#define IXGBE_PFC_D 672 + +/* Calculate Cable Delay */ +#define IXGBE_CABLE_DC 5556 /* Delay Copper */ +#define IXGBE_CABLE_DO 5000 /* Delay Optical */ + +/* Calculate Interface Delay X540 */ +#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ +#define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + +#define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) + +/* Calculate Interface Delay 82598, 82599 */ +#define IXGBE_PHY_D 12800 +#define IXGBE_MAC_D 4096 +#define IXGBE_XAUI_D (2 * 1024) + +#define IXGBE_ID (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D) + +/* Calculate Delay incurred from higher layer */ +#define IXGBE_HD 6144 + +/* Calculate PCI Bus delay for low thresholds */ +#define IXGBE_PCI_DELAY 10000 + +/* Calculate X540 delay value in bit times */ +#define IXGBE_FILL_RATE (36 / 25) + +#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \ + (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + \ + (2 * IXGBE_ID_X540) + \ + IXGBE_HD + IXGBE_B2BT(TC))) + +/* Calculate 82599, 82598 delay value in bit times */ +#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \ + (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \ + (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \ + IXGBE_HD + IXGBE_B2BT(TC))) + +/* Calculate low threshold delay values */ +#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \ + (IXGBE_FILL_RATE * IXGBE_PCI_DELAY)) +#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC)) + +/* Software ATR hash keys */ +#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define IXGBE_ATR_HASH_MASK 0x7fff +#define IXGBE_ATR_L4TYPE_MASK 0x3 +#define IXGBE_ATR_L4TYPE_UDP 0x1 +#define IXGBE_ATR_L4TYPE_TCP 0x2 +#define IXGBE_ATR_L4TYPE_SCTP 0x3 +#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +enum ixgbe_atr_flow_type { + IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, +}; + +/* Flow Director ATR input struct. */ +union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * src_ip - 16 bytes + * dst_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union ixgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + + +/* + * Unavailable: The FCoE Boot Option ROM is not present in the flash. + * Disabled: Present; boot order is not set for any targets on the port. + * Enabled: Present; boot order is set for at least one target on the port. + */ +enum ixgbe_fcoe_boot_status { + ixgbe_fcoe_bootstatus_disabled = 0, + ixgbe_fcoe_bootstatus_enabled = 1, + ixgbe_fcoe_bootstatus_unavailable = 0xFFFF +}; + +enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, + ixgbe_flash, + ixgbe_eeprom_none /* No NVM support */ +}; + +enum ixgbe_mac_type { + ixgbe_mac_unknown = 0, + ixgbe_mac_82598EB, + ixgbe_mac_82599EB, + ixgbe_mac_82599_vf, + ixgbe_mac_X540, + ixgbe_mac_X540_vf, + ixgbe_num_macs +}; + +enum ixgbe_phy_type { + ixgbe_phy_unknown = 0, + ixgbe_phy_none, + ixgbe_phy_tn, + ixgbe_phy_aq, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, + ixgbe_phy_nl, + ixgbe_phy_sfp_passive_tyco, + ixgbe_phy_sfp_passive_unknown, + ixgbe_phy_sfp_active_unknown, + ixgbe_phy_sfp_avago, + ixgbe_phy_sfp_ftl, + ixgbe_phy_sfp_ftl_active, + ixgbe_phy_sfp_unknown, + ixgbe_phy_sfp_intel, + ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + ixgbe_phy_generic +}; + +/* + * SFP+ module type IDs: + * + * ID Module Type + * ============= + * 0 SFP_DA_CU + * 1 SFP_SR + * 2 SFP_LR + * 3 SFP_DA_CU_CORE0 - 82599-specific + * 4 SFP_DA_CU_CORE1 - 82599-specific + * 5 SFP_SR/LR_CORE0 - 82599-specific + * 6 SFP_SR/LR_CORE1 - 82599-specific + */ +enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, + ixgbe_sfp_type_sr = 1, + ixgbe_sfp_type_lr = 2, + ixgbe_sfp_type_da_cu_core0 = 3, + ixgbe_sfp_type_da_cu_core1 = 4, + ixgbe_sfp_type_srlr_core0 = 5, + ixgbe_sfp_type_srlr_core1 = 6, + ixgbe_sfp_type_da_act_lmt_core0 = 7, + ixgbe_sfp_type_da_act_lmt_core1 = 8, + ixgbe_sfp_type_1g_cu_core0 = 9, + ixgbe_sfp_type_1g_cu_core1 = 10, + ixgbe_sfp_type_not_present = 0xFFFE, + ixgbe_sfp_type_unknown = 0xFFFF +}; + +enum ixgbe_media_type { + ixgbe_media_type_unknown = 0, + ixgbe_media_type_fiber, + ixgbe_media_type_copper, + ixgbe_media_type_backplane, + ixgbe_media_type_cx4, + ixgbe_media_type_virtual +}; + +/* Flow Control Settings */ +enum ixgbe_fc_mode { + ixgbe_fc_none = 0, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full, + ixgbe_fc_default +}; + +/* Smart Speed Settings */ +#define IXGBE_SMARTSPEED_MAX_RETRIES 3 +enum ixgbe_smart_speed { + ixgbe_smart_speed_auto = 0, + ixgbe_smart_speed_on, + ixgbe_smart_speed_off +}; + +/* PCI bus types */ +enum ixgbe_bus_type { + ixgbe_bus_type_unknown = 0, + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, + ixgbe_bus_type_pci_express, + ixgbe_bus_type_reserved +}; + +/* PCI bus speeds */ +enum ixgbe_bus_speed { + ixgbe_bus_speed_unknown = 0, + ixgbe_bus_speed_33 = 33, + ixgbe_bus_speed_66 = 66, + ixgbe_bus_speed_100 = 100, + ixgbe_bus_speed_120 = 120, + ixgbe_bus_speed_133 = 133, + ixgbe_bus_speed_2500 = 2500, + ixgbe_bus_speed_5000 = 5000, + ixgbe_bus_speed_reserved +}; + +/* PCI bus widths */ +enum ixgbe_bus_width { + ixgbe_bus_width_unknown = 0, + ixgbe_bus_width_pcie_x1 = 1, + ixgbe_bus_width_pcie_x2 = 2, + ixgbe_bus_width_pcie_x4 = 4, + ixgbe_bus_width_pcie_x8 = 8, + ixgbe_bus_width_32 = 32, + ixgbe_bus_width_64 = 64, + ixgbe_bus_width_reserved +}; + +struct ixgbe_addr_filter_info { + u32 num_mc_addrs; + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; + bool user_set_promisc; +}; + +/* Bus parameters */ +struct ixgbe_bus_info { + enum ixgbe_bus_speed speed; + enum ixgbe_bus_width width; + enum ixgbe_bus_type type; + + u16 func; + u16 lan_id; +}; + +/* Flow control parameters */ +struct ixgbe_fc_info { + u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ + u32 low_water; /* Flow Control Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + bool disable_fc_autoneg; /* Do not autonegotiate FC */ + bool fc_was_autonegged; /* Is current_mode the result of autonegging? */ + enum ixgbe_fc_mode current_mode; /* FC mode in effect */ + enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +/* Statistics counters collected by the MAC */ +struct ixgbe_hw_stats { + u64 crcerrs; + u64 illerrc; + u64 errbc; + u64 mspdc; + u64 mpctotal; + u64 mpc[8]; + u64 mlfc; + u64 mrfc; + u64 rlec; + u64 lxontxc; + u64 lxonrxc; + u64 lxofftxc; + u64 lxoffrxc; + u64 pxontxc[8]; + u64 pxonrxc[8]; + u64 pxofftxc[8]; + u64 pxoffrxc[8]; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc[8]; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mngprc; + u64 mngpdc; + u64 mngptc; + u64 tor; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 xec; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; + u64 qbtc[16]; + u64 qprdc[16]; + u64 pxon2offc[8]; + u64 fdirustat_add; + u64 fdirustat_remove; + u64 fdirfstat_fadd; + u64 fdirfstat_fremove; + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; + u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; + u64 fcoedwrc; + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; + u64 ldpcec; + u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; + u64 o2bspc; +}; + +/* forward declaration */ +struct ixgbe_hw; + +/* iterator type for walking multicast address lists */ +typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, + u32 *vmdq); + +/* Function pointer table */ +struct ixgbe_eeprom_operations { + s32 (*init_params)(struct ixgbe_hw *); + s32 (*read)(struct ixgbe_hw *, u16, u16 *); + s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*write)(struct ixgbe_hw *, u16, u16); + s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); + u16 (*calc_checksum)(struct ixgbe_hw *); +}; + +struct ixgbe_mac_operations { + s32 (*init_hw)(struct ixgbe_hw *); + s32 (*reset_hw)(struct ixgbe_hw *); + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + void (*enable_relaxed_ordering)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); + u32 (*get_supported_physical_layer)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); + s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + void (*set_lan_id)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + s32 (*setup_sfp)(struct ixgbe_hw *); + s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); + s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); + void (*release_swfw_sync)(struct ixgbe_hw *, u16); + + /* Link */ + void (*disable_tx_laser)(struct ixgbe_hw *); + void (*enable_tx_laser)(struct ixgbe_hw *); + void (*flap_tx_laser)(struct ixgbe_hw *); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); + + /* Packet Buffer manipulation */ + void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); + s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); + s32 (*clear_rar)(struct ixgbe_hw *, u32); + s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr); + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, + ixgbe_mc_addr_itr, bool clear); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); + s32 (*init_uta_tables)(struct ixgbe_hw *); + void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *, s32); + + /* Manageability interface */ + s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); +}; + +struct ixgbe_phy_operations { + s32 (*identify)(struct ixgbe_hw *); + s32 (*identify_sfp)(struct ixgbe_hw *); + s32 (*init)(struct ixgbe_hw *); + s32 (*reset)(struct ixgbe_hw *); + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); + s32 (*setup_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, + bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); + void (*i2c_bus_clear)(struct ixgbe_hw *); + s32 (*check_overtemp)(struct ixgbe_hw *); +}; + +struct ixgbe_eeprom_info { + struct ixgbe_eeprom_operations ops; + enum ixgbe_eeprom_type type; + u32 semaphore_delay; + u16 word_size; + u16 address_bits; + u16 word_page_size; +}; + +#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 +struct ixgbe_mac_info { + struct ixgbe_mac_operations ops; + enum ixgbe_mac_type type; + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ + u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ + u16 wwpn_prefix; +#define IXGBE_MAX_MTA 128 + u32 mta_shadow[IXGBE_MAX_MTA]; + s32 mc_filter_type; + u32 mcft_size; + u32 vft_size; + u32 num_rar_entries; + u32 rar_highwater; + u32 rx_pb_size; + u32 max_tx_queues; + u32 max_rx_queues; + u32 max_msix_vectors; + bool msix_vectors_from_pcie; + u32 orig_autoc; + bool arc_subsystem_valid; + u32 orig_autoc2; + bool orig_link_settings_stored; + bool autotry_restart; + u8 flags; +}; + +struct ixgbe_phy_info { + struct ixgbe_phy_operations ops; + enum ixgbe_phy_type type; + u32 addr; + u32 id; + enum ixgbe_sfp_type sfp_type; + bool sfp_setup_needed; + u32 revision; + enum ixgbe_media_type media_type; + bool reset_disable; + ixgbe_autoneg_advertised autoneg_advertised; + enum ixgbe_smart_speed smart_speed; + bool smart_speed_active; + bool multispeed_fiber; + bool reset_if_overtemp; +}; + +#include "ixgbe_mbx.h" + +struct ixgbe_mbx_operations { + void (*init_params)(struct ixgbe_hw *hw); + s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct ixgbe_hw *, u16); + s32 (*check_for_ack)(struct ixgbe_hw *, u16); + s32 (*check_for_rst)(struct ixgbe_hw *, u16); +}; + +struct ixgbe_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct ixgbe_mbx_info { + struct ixgbe_mbx_operations ops; + struct ixgbe_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u32 v2p_mailbox; + u16 size; +}; + +struct ixgbe_hw { + u8 *hw_addr; + void *back; + struct ixgbe_mac_info mac; + struct ixgbe_addr_filter_info addr_ctrl; + struct ixgbe_fc_info fc; + struct ixgbe_phy_info phy; + struct ixgbe_eeprom_info eeprom; + struct ixgbe_bus_info bus; + struct ixgbe_mbx_info mbx; + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + bool adapter_stopped; + bool force_full_reset; +}; + +#define ixgbe_call_func(hw, func, params, error) \ + (func != NULL) ? func params : error + + +/* Error Codes */ +#define IXGBE_SUCCESS 0 +#define IXGBE_ERR_EEPROM -1 +#define IXGBE_ERR_EEPROM_CHECKSUM -2 +#define IXGBE_ERR_PHY -3 +#define IXGBE_ERR_CONFIG -4 +#define IXGBE_ERR_PARAM -5 +#define IXGBE_ERR_MAC_TYPE -6 +#define IXGBE_ERR_UNKNOWN_PHY -7 +#define IXGBE_ERR_LINK_SETUP -8 +#define IXGBE_ERR_ADAPTER_STOPPED -9 +#define IXGBE_ERR_INVALID_MAC_ADDR -10 +#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +#define IXGBE_ERR_RESET_FAILED -15 +#define IXGBE_ERR_SWFW_SYNC -16 +#define IXGBE_ERR_PHY_ADDR_INVALID -17 +#define IXGBE_ERR_I2C -18 +#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +#define IXGBE_ERR_SFP_NOT_PRESENT -20 +#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +#define IXGBE_ERR_EEPROM_VERSION -24 +#define IXGBE_ERR_NO_SPACE -25 +#define IXGBE_ERR_OVERTEMP -26 +#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +#define IXGBE_ERR_FLOW_CONTROL -29 +#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +#define IXGBE_ERR_PBA_SECTION -31 +#define IXGBE_ERR_INVALID_ARGUMENT -32 +#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +#define IXGBE_ERR_OUT_OF_MEM -34 + +#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF + + +#endif /* _IXGBE_TYPE_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c new file mode 100644 index 0000000000..422c5c843a --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c @@ -0,0 +1,524 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#include "ixgbe_api.h" +#include "ixgbe_type.h" +#include "ixgbe_vf.h" + +s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw); +s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw); +s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw); +u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw); +s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr); +s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete); +s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete); +s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); +s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); +s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr, + bool clear); +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); + +#ifndef IXGBE_VFWRITE_REG +#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG +#endif +#ifndef IXGBE_VFREAD_REG +#define IXGBE_VFREAD_REG IXGBE_READ_REG +#endif + +/** + * ixgbe_init_ops_vf - Initialize the pointers for vf + * @hw: pointer to hardware structure + * + * This will assign function pointers, adapter-specific functions can + * override the assignment of generic function pointers by assigning + * their own adapter-specific function pointers. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw) +{ + /* MAC */ + hw->mac.ops.init_hw = ixgbe_init_hw_vf; + hw->mac.ops.reset_hw = ixgbe_reset_hw_vf; + hw->mac.ops.start_hw = ixgbe_start_hw_vf; + /* Cannot clear stats on VF */ + hw->mac.ops.clear_hw_cntrs = NULL; + hw->mac.ops.get_media_type = NULL; + hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf; + hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf; + hw->mac.ops.get_bus_info = NULL; + + /* Link */ + hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf; + hw->mac.ops.check_link = ixgbe_check_mac_link_vf; + hw->mac.ops.get_link_capabilities = NULL; + + /* RAR, Multicast, VLAN */ + hw->mac.ops.set_rar = ixgbe_set_rar_vf; + hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf; + hw->mac.ops.init_rx_addrs = NULL; + hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf; + hw->mac.ops.enable_mc = NULL; + hw->mac.ops.disable_mc = NULL; + hw->mac.ops.clear_vfta = NULL; + hw->mac.ops.set_vfta = ixgbe_set_vfta_vf; + + hw->mac.max_tx_queues = 1; + hw->mac.max_rx_queues = 1; + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware by filling the bus info structure and media type, clears + * all on chip counters, initializes receive address registers, multicast + * table, VLAN filter table, calls routine to set up link and flow control + * settings, and leaves transmit and receive units disabled and uninitialized + **/ +s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw) +{ + /* Clear adapter stopped flag */ + hw->adapter_stopped = FALSE; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_init_hw_vf - virtual function hardware initialization + * @hw: pointer to hardware structure + * + * Initialize the hardware by resetting the hardware and then starting + * the hardware + **/ +s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw) +{ + s32 status = hw->mac.ops.start_hw(hw); + + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + + return status; +} + +/** + * ixgbe_reset_hw_vf - Performs hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by reseting the transmit and receive units, masks and + * clears all interrupts. + **/ +s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 timeout = IXGBE_VF_INIT_TIMEOUT; + s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR; + u32 ctrl, msgbuf[IXGBE_VF_PERMADDR_MSG_LEN]; + u8 *addr = (u8 *)(&msgbuf[1]); + + DEBUGFUNC("ixgbevf_reset_hw_vf"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + hw->mac.ops.stop_adapter(hw); + + DEBUGOUT("Issuing a function level reset to MAC\n"); + + ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST; + IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + msec_delay(50); + + /* we cannot reset while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw, 0) && timeout) { + timeout--; + usec_delay(5); + } + + if (timeout) { + /* mailbox timeout can now become active */ + mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT; + + msgbuf[0] = IXGBE_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1, 0); + + msec_delay(10); + + /* + * set our "perm_addr" based on info provided by PF + * also set up the mc_filter_type which is piggy backed + * on the mac address in word 3 + */ + ret_val = mbx->ops.read_posted(hw, msgbuf, + IXGBE_VF_PERMADDR_MSG_LEN, 0); + if (!ret_val) { + if (msgbuf[0] == (IXGBE_VF_RESET | + IXGBE_VT_MSGTYPE_ACK)) { + memcpy(hw->mac.perm_addr, addr, + IXGBE_ETH_LENGTH_OF_ADDRESS); + hw->mac.mc_filter_type = + msgbuf[IXGBE_VF_MC_TYPE_WORD]; + } else { + ret_val = IXGBE_ERR_INVALID_MAC_ADDR; + } + } + } + + return ret_val; +} + +/** + * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units + * @hw: pointer to hardware structure + * + * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, + * disables transmit and receive units. The adapter_stopped flag is used by + * the shared code and drivers to determine if the adapter is in a stopped + * state and should not touch the hardware. + **/ +s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw) +{ + u32 reg_val; + u16 i; + + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware + */ + hw->adapter_stopped = TRUE; + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + /* Clear any pending interrupts, flush previous writes */ + IXGBE_VFREAD_REG(hw, IXGBE_VTEICR); + + /* Disable the transmit unit. Each queue must be disabled. */ + for (i = 0; i < hw->mac.max_tx_queues; i++) + IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH); + + /* Disable the receive unit by stopping each queue */ + for (i = 0; i < hw->mac.max_rx_queues; i++) { + reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i)); + reg_val &= ~IXGBE_RXDCTL_ENABLE; + IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val); + } + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); + msec_delay(2); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_mta_vector - Determines bit-vector in multicast table to set + * @hw: pointer to hardware structure + * @mc_addr: the multicast address + * + * Extracts the 12 bits, from a multicast address, to determine which + * bit-vector to set in the multicast table. The hardware uses 12 bits, from + * incoming rx multicast addresses, to determine the bit-vector to check in + * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) +{ + u32 vector = 0; + + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); + break; + case 1: /* use bits [46:35] of the address */ + vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); + break; + case 2: /* use bits [45:34] of the address */ + vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); + break; + case 3: /* use bits [43:32] of the address */ + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ + DEBUGOUT("MC filter type param set incorrectly\n"); + ASSERT(0); + break; + } + + /* vector can only be 12-bits or boundary will be exceeded */ + vector &= 0xFFF; + return vector; +} + +/** + * ixgbe_set_rar_vf - set device MAC address + * @hw: pointer to hardware structure + * @index: Receive address register to write + * @addr: Address to put into receive address register + * @vmdq: VMDq "set" or "pool" index + * @enable_addr: set flag that address is active + **/ +s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + UNREFERENCED_3PARAMETER(vmdq, enable_addr, index); + + memset(msgbuf, 0, 12); + msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) + ixgbe_get_mac_addr_vf(hw, hw->mac.addr); + + return ret_val; +} + +/** + * ixgbe_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @next: caller supplied function to return next address in list + * + * Updates the Multicast Table Array. + **/ +s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count, ixgbe_mc_addr_itr next, + bool clear) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; + u16 *vector_list = (u16 *)&msgbuf[1]; + u32 vector; + u32 cnt, i; + u32 vmdq; + + UNREFERENCED_1PARAMETER(clear); + + DEBUGFUNC("ixgbe_update_mc_addr_list_vf"); + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count); + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = IXGBE_VF_SET_MULTICAST; + msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); + DEBUGOUT1("Hash value = 0x%03X\n", vector); + vector_list[i] = (u16)vector; + } + + return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0); +} + +/** + * ixgbe_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vlan: 12 bit VLAN ID + * @vind: unused by VF drivers + * @vlan_on: if TRUE then set bit, else clear bit + **/ +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + UNREFERENCED_1PARAMETER(vind); + + msgbuf[0] = IXGBE_VF_SET_VLAN; + msgbuf[1] = vlan; + /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */ + msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT; + + return(mbx->ops.write_posted(hw, msgbuf, 2, 0)); +} + +/** + * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues + * @hw: pointer to hardware structure + * + * Returns the number of transmit queues for the given adapter. + **/ +u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return IXGBE_VF_MAX_TX_QUEUES; +} + +/** + * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues + * @hw: pointer to hardware structure + * + * Returns the number of receive queues for the given adapter. + **/ +u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return IXGBE_VF_MAX_RX_QUEUES; +} + +/** + * ixgbe_get_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) +{ + int i; + + for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++) + mac_addr[i] = hw->mac.perm_addr[i]; + + return IXGBE_SUCCESS; +} + +s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) +{ + struct ixgbe_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, sizeof(msgbuf)); + /* + * If index is one then this is the start of a new list and needs + * indication to the PF so it can do it's own list management. + * If it is zero then that tells the PF to just clear all of + * this VF's macvlans and there is no new list. + */ + msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; + msgbuf[0] |= IXGBE_VF_SET_MACVLAN; + if (addr) + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0); + + msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS; + + if (!ret_val) + if (msgbuf[0] == + (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK)) + ret_val = IXGBE_ERR_OUT_OF_MEM; + + return ret_val; +} + +/** + * ixgbe_setup_mac_link_vf - Setup MAC link settings + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Set the link speed in the AUTOC register and restarts link. + **/ +s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + UNREFERENCED_4PARAMETER(hw, speed, autoneg, autoneg_wait_to_complete); + return IXGBE_SUCCESS; +} + +/** + * ixgbe_check_mac_link_vf - Get link/speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: TRUE is link is up, FALSE otherwise + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + * + * Reads the links register to determine if link is up and the current speed + **/ +s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up, bool autoneg_wait_to_complete) +{ + u32 links_reg; + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + if (!(hw->mbx.ops.check_for_rst(hw, 0))) { + *link_up = FALSE; + *speed = 0; + return -1; + } + + links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS); + + if (links_reg & IXGBE_LINKS_UP) + *link_up = TRUE; + else + *link_up = FALSE; + + if ((links_reg & IXGBE_LINKS_SPEED_10G_82599) == + IXGBE_LINKS_SPEED_10G_82599) + *speed = IXGBE_LINK_SPEED_10GB_FULL; + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + + return IXGBE_SUCCESS; +} + diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h new file mode 100644 index 0000000000..d0c4b34d20 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h @@ -0,0 +1,113 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef __IXGBE_VF_H__ +#define __IXGBE_VF_H__ + +#define IXGBE_VF_IRQ_CLEAR_MASK 7 +#define IXGBE_VF_MAX_TX_QUEUES 8 +#define IXGBE_VF_MAX_RX_QUEUES 8 + +#define IXGBE_VFCTRL 0x00000 +#define IXGBE_VFSTATUS 0x00008 +#define IXGBE_VFLINKS 0x00010 +#define IXGBE_VFFRTIMER 0x00048 +#define IXGBE_VFRXMEMWRAP 0x03190 +#define IXGBE_VTEICR 0x00100 +#define IXGBE_VTEICS 0x00104 +#define IXGBE_VTEIMS 0x00108 +#define IXGBE_VTEIMC 0x0010C +#define IXGBE_VTEIAC 0x00110 +#define IXGBE_VTEIAM 0x00114 +#define IXGBE_VTEITR(x) (0x00820 + (4 * x)) +#define IXGBE_VTIVAR(x) (0x00120 + (4 * x)) +#define IXGBE_VTIVAR_MISC 0x00140 +#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x)) +/* define IXGBE_VFPBACL still says TBD in EAS */ +#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x)) +#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x)) +#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x)) +#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x)) +#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x)) +#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x)) +#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x)) +#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x)) +#define IXGBE_VFPSRTYPE 0x00300 +#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x)) +#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x)) +#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x)) +#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x)) +#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x)) +#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x)) +#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x)) +#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x)) +#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x)) +#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x)) +#define IXGBE_VFGPRC 0x0101C +#define IXGBE_VFGPTC 0x0201C +#define IXGBE_VFGORC_LSB 0x01020 +#define IXGBE_VFGORC_MSB 0x01024 +#define IXGBE_VFGOTC_LSB 0x02020 +#define IXGBE_VFGOTC_MSB 0x02024 +#define IXGBE_VFMPRC 0x01034 + + +struct ixgbevf_hw_stats { + u64 base_vfgprc; + u64 base_vfgptc; + u64 base_vfgorc; + u64 base_vfgotc; + u64 base_vfmprc; + + u64 last_vfgprc; + u64 last_vfgptc; + u64 last_vfgorc; + u64 last_vfgotc; + u64 last_vfmprc; + + u64 vfgprc; + u64 vfgptc; + u64 vfgorc; + u64 vfgotc; + u64 vfmprc; + + u64 saved_reset_vfgprc; + u64 saved_reset_vfgptc; + u64 saved_reset_vfgorc; + u64 saved_reset_vfgotc; + u64 saved_reset_vfmprc; +}; + +#endif /* __IXGBE_VF_H__ */ + diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c new file mode 100644 index 0000000000..d6fdcc46ab --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c @@ -0,0 +1,989 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#include "ixgbe_x540.h" +#include "ixgbe_type.h" +#include "ixgbe_api.h" +#include "ixgbe_common.h" +#include "ixgbe_phy.h" + +s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg, bool link_up_wait_to_complete); +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); +u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw); + +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data); +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data); +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data); +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw); +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val); +u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); + +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); + +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +/** + * ixgbe_init_ops_X540 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * + * Initialize the function pointers and assign the MAC type for 82599. + * Does not touch the hardware. + **/ +s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val; + + DEBUGFUNC("ixgbe_init_ops_X540"); + + ret_val = ixgbe_init_phy_ops_generic(hw); + ret_val = ixgbe_init_ops_generic(hw); + + + /* EEPROM */ + eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540; + eeprom->ops.read = &ixgbe_read_eerd_X540; + eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540; + eeprom->ops.write = &ixgbe_write_eewr_X540; + eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540; + eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540; + eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540; + eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540; + + /* PHY */ + phy->ops.init = &ixgbe_init_phy_ops_generic; + phy->ops.reset = NULL; + + /* MAC */ + mac->ops.reset_hw = &ixgbe_reset_hw_X540; + mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2; + mac->ops.get_media_type = &ixgbe_get_media_type_X540; + mac->ops.get_supported_physical_layer = + &ixgbe_get_supported_physical_layer_X540; + mac->ops.read_analog_reg8 = NULL; + mac->ops.write_analog_reg8 = NULL; + mac->ops.start_hw = &ixgbe_start_hw_X540; + mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic; + mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic; + mac->ops.get_device_caps = &ixgbe_get_device_caps_generic; + mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic; + mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic; + mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540; + mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540; + + /* RAR, Multicast, VLAN */ + mac->ops.set_vmdq = &ixgbe_set_vmdq_generic; + mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic; + mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic; + mac->rar_highwater = 1; + mac->ops.set_vfta = &ixgbe_set_vfta_generic; + mac->ops.clear_vfta = &ixgbe_clear_vfta_generic; + mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic; + mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing; + mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing; + + /* Link */ + mac->ops.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic; + mac->ops.setup_link = &ixgbe_setup_mac_link_X540; + mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic; + mac->ops.check_link = &ixgbe_check_mac_link_generic; + + mac->mcft_size = 128; + mac->vft_size = 128; + mac->num_rar_entries = 128; + mac->rx_pb_size = 384; + mac->max_tx_queues = 128; + mac->max_rx_queues = 128; + mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + + /* + * FWSM register + * ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) & + IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE; + + hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; + + /* LEDs */ + mac->ops.blink_led_start = ixgbe_blink_led_start_X540; + mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; + + /* Manageability interface */ + mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic; + + return ret_val; +} + +/** + * ixgbe_get_link_capabilities_X540 - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @negotiation: TRUE when autoneg or autotry is enabled + * + * Determines the link capabilities by reading the AUTOC register. + **/ +s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *negotiation) +{ + ixgbe_get_copper_link_capabilities_generic(hw, speed, negotiation); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_get_media_type_X540 - Get media type + * @hw: pointer to hardware structure + * + * Returns the media type (fiber, copper, backplane) + **/ +enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) +{ + UNREFERENCED_1PARAMETER(hw); + return ixgbe_media_type_copper; +} + +/** + * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg: TRUE if autonegotiation enabled + * @autoneg_wait_to_complete: TRUE when waiting for completion is needed + **/ +s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + ixgbe_link_speed speed, bool autoneg, + bool autoneg_wait_to_complete) +{ + DEBUGFUNC("ixgbe_setup_mac_link_X540"); + return hw->phy.ops.setup_link_speed(hw, speed, autoneg, + autoneg_wait_to_complete); +} + +/** + * ixgbe_reset_hw_X540 - Perform hardware reset + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks + * and clears all interrupts, and perform a reset. + **/ +s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) +{ + s32 status; + u32 ctrl, i; + + DEBUGFUNC("ixgbe_reset_hw_X540"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); + if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + +mac_reset_top: + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { + usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; + } + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; + DEBUGOUT("Reset polling failed to complete.\n"); + } + msec_delay(100); + + /* + * Double resets are required for recovery from certain error + * conditions. Between resets, it is necessary to stall to allow time + * for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + goto mac_reset_top; + } + + /* Set the Rx packet buffer size. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT); + + /* Store the permanent mac address */ + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); + + /* + * Store MAC address from RAR0, clear receive address registers, and + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ + hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ + if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, + &hw->mac.wwpn_prefix); + +reset_hw_out: + return status; +} + +/** + * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx + * @hw: pointer to hardware structure + * + * Starts the hardware using the generic start_hw function + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) +{ + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_start_hw_X540"); + + ret_val = ixgbe_start_hw_generic(hw); + if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); + +out: + return ret_val; +} + +/** + * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type + * @hw: pointer to hardware structure + * + * Determines physical layer capabilities of the current configuration. + **/ +u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) +{ + u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + + DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); + + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); + if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; + if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; + if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + + return physical_layer; +} + +/** + * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params + * @hw: pointer to hardware structure + * + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) +{ + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + + DEBUGFUNC("ixgbe_init_eeprom_params_X540"); + + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + + eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + + DEBUGOUT2("Eeprom params: type = %d, size = %d\n", + eeprom->type, eeprom->word_size); + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_read_eerd_X540- Read EEPROM word using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_eerd_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) + status = ixgbe_read_eerd_generic(hw, offset, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words + * @data: word(s) read from the EEPROM + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) + status = ixgbe_read_eerd_buffer_generic(hw, offset, + words, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @data: word write to the EEPROM + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_eewr_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) + status = ixgbe_write_eewr_generic(hw, offset, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write + * @words: number of words + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + u16 offset, u16 words, u16 *data) +{ + s32 status = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) + status = ixgbe_write_eewr_buffer_generic(hw, offset, + words, data); + else + status = IXGBE_ERR_SWFW_SYNC; + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; +} + +/** + * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum + * + * This function does not use synchronization for EERD and EEWR. It can + * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * + * @hw: pointer to hardware structure + **/ +u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + u16 i; + u16 j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; + + /* + * Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores here. Instead use + * ixgbe_read_eerd_generic + */ + + DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); + + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { + if (ixgbe_read_eerd_generic(hw, i, &word) != IXGBE_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + break; + } + checksum += word; + } + + /* + * Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + + if (ixgbe_read_eerd_generic(hw, i, &pointer) != IXGBE_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + break; + } + + /* Skip pointer section if the pointer is invalid. */ + if (pointer == 0xFFFF || pointer == 0 || + pointer >= hw->eeprom.word_size) + continue; + + if (ixgbe_read_eerd_generic(hw, pointer, &length)!= + IXGBE_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + break; + } + + /* Skip pointer section if length is invalid. */ + if (length == 0xFFFF || length == 0 || + (pointer + length) >= hw->eeprom.word_size) + continue; + + for (j = pointer+1; j <= pointer+length; j++) { + if (ixgbe_read_eerd_generic(hw, j, &word) != + IXGBE_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + break; + } + checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + + return checksum; +} + +/** + * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum + * @hw: pointer to hardware structure + * @checksum_val: calculated checksum + * + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, + u16 *checksum_val) +{ + s32 status; + u16 checksum; + u16 read_checksum = 0; + + DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + goto out; + } + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + checksum = hw->eeprom.ops.calc_checksum(hw); + + /* + * Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores twice here. + */ + ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, + &read_checksum); + + /* + * Verify read checksum from EEPROM is the same as + * calculated checksum + */ + if (read_checksum != checksum) + status = IXGBE_ERR_EEPROM_CHECKSUM; + + /* If the user cares, return the calculated checksum */ + if (checksum_val) + *checksum_val = checksum; + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); +out: + return status; +} + +/** + * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash + * @hw: pointer to hardware structure + * + * After writing EEPROM to shadow RAM using EEWR register, software calculates + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) +{ + s32 status; + u16 checksum; + + DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); + + if (status != IXGBE_SUCCESS) + DEBUGOUT("EEPROM read failed\n"); + + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == + IXGBE_SUCCESS) { + checksum = hw->eeprom.ops.calc_checksum(hw); + + /* + * Do not use hw->eeprom.ops.write because we do not want to + * take the synchronization semaphores twice here. + */ + status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, + checksum); + + if (status == IXGBE_SUCCESS) + status = ixgbe_update_flash_X540(hw); + else + status = IXGBE_ERR_SWFW_SYNC; + } + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; +} + +/** + * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device + * @hw: pointer to hardware structure + * + * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy + * EEPROM from shadow RAM to the flash device. + **/ +static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) +{ + u32 flup; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_update_flash_X540"); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_ERR_EEPROM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + + if (hw->revision_id == 0) { + flup = IXGBE_READ_REG(hw, IXGBE_EEC); + + if (flup & IXGBE_EEC_SEC1VAL) { + flup |= IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + } + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + } +out: + return status; +} + +/** + * ixgbe_poll_flash_update_done_X540 - Poll flash update status + * @hw: pointer to hardware structure + * + * Polls the FLUDONE (bit 26) of the EEC Register to determine when the + * flash update is done. + **/ +static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) +{ + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + + DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); + + for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_EEC); + if (reg & IXGBE_EEC_FLUDONE) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(5); + } + return status; +} + +/** + * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to acquire + * + * Acquires the SWFW semaphore thought the SW_FW_SYNC register for + * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 5; + u32 hwmask = 0; + u32 timeout = 200; + u32 i; + s32 ret_val = IXGBE_SUCCESS; + + DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); + + if (swmask == IXGBE_GSSR_EEP_SM) + hwmask = IXGBE_GSSR_FLASH_SM; + + /* SW only mask doesn't have FW bit pair */ + if (swmask == IXGBE_GSSR_SW_MNG_SM) + fwmask = 0; + + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ + if (ixgbe_get_swfw_sync_semaphore(hw)) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (!(swfw_sync & (fwmask | swmask | hwmask))) { + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); + goto out; + } else { + /* + * Firmware currently using resource (fwmask), hardware currently + * using resource (hwmask), or other software thread currently + * using resource (swmask) + */ + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); + } + } + + /* Failed to get SW only semaphore */ + if (swmask == IXGBE_GSSR_SW_MNG_SM) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto out; + } + + /* If the resource is not released by the FW/HW the SW can assume that + * the FW/HW malfunctions. In that case the SW should sets the SW bit(s) + * of the requested resource(s) while ignoring the corresponding FW/HW + * bits in the SW_FW_SYNC register. + */ + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (swfw_sync & (fwmask| hwmask)) { + if (ixgbe_get_swfw_sync_semaphore(hw)) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); + } + +out: + return ret_val; +} + +/** + * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore + * @hw: pointer to hardware structure + * @mask: Mask to specify which semaphore to release + * + * Releases the SWFW semaphore throught the SW_FW_SYNC register + * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + + DEBUGFUNC("ixgbe_release_swfw_sync_X540"); + + ixgbe_get_swfw_sync_semaphore(hw); + + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync &= ~swmask; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + + ixgbe_release_swfw_sync_semaphore(hw); + msec_delay(5); +} + +/** + * ixgbe_get_nvm_semaphore - Get hardware semaphore + * @hw: pointer to hardware structure + * + * Sets the hardware semaphores so SW/FW can gain control of shared resources + **/ +static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + + DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); + + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + if (!(swsm & IXGBE_SWSM_SMBI)) { + status = IXGBE_SUCCESS; + break; + } + usec_delay(50); + } + + /* Now get the semaphore between SW/FW through the REGSMP bit */ + if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + if (!(swsm & IXGBE_SWFW_REGSMP)) + break; + + usec_delay(50); + } + + /* + * Release semaphores and return error if SW NVM semaphore + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { + DEBUGOUT("REGSMP Software NVM semaphore not granted.\n"); + ixgbe_release_swfw_sync_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { + DEBUGOUT("Software semaphore SMBI between device drivers " + "not granted.\n"); + } + + return status; +} + +/** + * ixgbe_release_nvm_semaphore - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function clears hardware semaphore bits. + **/ +static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); + + /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm &= ~IXGBE_SWSM_SMBI; + IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swsm &= ~IXGBE_SWFW_REGSMP; + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); + + IXGBE_WRITE_FLUSH(hw); +} + +/** + * ixgbe_blink_led_start_X540 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + + DEBUGFUNC("ixgbe_blink_led_start_X540"); + + /* + * In order for the blink bit in the LED control register + * to work, link and speed must be forced in the MAC. We + * will reverse this when we stop the blinking. + */ + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + + /* Set the LED to LINK_UP + BLINK. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + * + * Devices that implement the version 2 interface: + * X540 + **/ +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) +{ + u32 macc_reg; + u32 ledctl_reg; + + DEBUGFUNC("ixgbe_blink_led_stop_X540"); + + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); + ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + ledctl_reg &= ~IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + + /* Unforce link and speed in the MAC. */ + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS); + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + IXGBE_WRITE_FLUSH(hw); + + return IXGBE_SUCCESS; +} + + diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h new file mode 100644 index 0000000000..0939449b0a --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h @@ -0,0 +1,42 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifndef _IXGBE_X540_H_ +#define _IXGBE_X540_H_ + +#include "ixgbe_type.h" + +s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); +#endif /* _IXGBE_X540_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixv.c b/lib/librte_pmd_ixgbe/ixgbe/ixv.c new file mode 100644 index 0000000000..93b25bebba --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixv.c @@ -0,0 +1,4010 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + +#ifdef HAVE_KERNEL_OPTION_HEADERS +#include "opt_inet.h" +#include "opt_inet6.h" +#endif + +#include "ixv.h" + +/********************************************************************* + * Driver version + *********************************************************************/ +char ixv_driver_version[] = "1.1.2"; + +/********************************************************************* + * PCI Device ID Table + * + * Used by probe to select devices to load on + * Last field stores an index into ixv_strings + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } + *********************************************************************/ + +static ixv_vendor_info_t ixv_vendor_info_array[] = +{ + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0}, + {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0}, + /* required last entry */ + {0, 0, 0, 0, 0} +}; + +/********************************************************************* + * Table of branding strings + *********************************************************************/ + +static char *ixv_strings[] = { + "Intel(R) PRO/10GbE Virtual Function Network Driver" +}; + +/********************************************************************* + * Function prototypes + *********************************************************************/ +static int ixv_probe(device_t); +static int ixv_attach(device_t); +static int ixv_detach(device_t); +static int ixv_shutdown(device_t); +#if __FreeBSD_version < 800000 +static void ixv_start(struct ifnet *); +static void ixv_start_locked(struct tx_ring *, struct ifnet *); +#else +static int ixv_mq_start(struct ifnet *, struct mbuf *); +static int ixv_mq_start_locked(struct ifnet *, + struct tx_ring *, struct mbuf *); +static void ixv_qflush(struct ifnet *); +#endif +static int ixv_ioctl(struct ifnet *, u_long, caddr_t); +static void ixv_init(void *); +static void ixv_init_locked(struct adapter *); +static void ixv_stop(void *); +static void ixv_media_status(struct ifnet *, struct ifmediareq *); +static int ixv_media_change(struct ifnet *); +static void ixv_identify_hardware(struct adapter *); +static int ixv_allocate_pci_resources(struct adapter *); +static int ixv_allocate_msix(struct adapter *); +static int ixv_allocate_queues(struct adapter *); +static int ixv_setup_msix(struct adapter *); +static void ixv_free_pci_resources(struct adapter *); +static void ixv_local_timer(void *); +static void ixv_setup_interface(device_t, struct adapter *); +static void ixv_config_link(struct adapter *); + +static int ixv_allocate_transmit_buffers(struct tx_ring *); +static int ixv_setup_transmit_structures(struct adapter *); +static void ixv_setup_transmit_ring(struct tx_ring *); +static void ixv_initialize_transmit_units(struct adapter *); +static void ixv_free_transmit_structures(struct adapter *); +static void ixv_free_transmit_buffers(struct tx_ring *); + +static int ixv_allocate_receive_buffers(struct rx_ring *); +static int ixv_setup_receive_structures(struct adapter *); +static int ixv_setup_receive_ring(struct rx_ring *); +static void ixv_initialize_receive_units(struct adapter *); +static void ixv_free_receive_structures(struct adapter *); +static void ixv_free_receive_buffers(struct rx_ring *); + +static void ixv_enable_intr(struct adapter *); +static void ixv_disable_intr(struct adapter *); +static bool ixv_txeof(struct tx_ring *); +static bool ixv_rxeof(struct ix_queue *, int); +static void ixv_rx_checksum(u32, struct mbuf *, u32); +static void ixv_set_multi(struct adapter *); +static void ixv_update_link_status(struct adapter *); +static void ixv_refresh_mbufs(struct rx_ring *, int); +static int ixv_xmit(struct tx_ring *, struct mbuf **); +static int ixv_sysctl_stats(SYSCTL_HANDLER_ARGS); +static int ixv_sysctl_debug(SYSCTL_HANDLER_ARGS); +static int ixv_set_flowcntl(SYSCTL_HANDLER_ARGS); +static int ixv_dma_malloc(struct adapter *, bus_size_t, + struct ixv_dma_alloc *, int); +static void ixv_dma_free(struct adapter *, struct ixv_dma_alloc *); +static void ixv_add_rx_process_limit(struct adapter *, const char *, + const char *, int *, int); +static bool ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *); +static bool ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *); +static void ixv_set_ivar(struct adapter *, u8, u8, s8); +static void ixv_configure_ivars(struct adapter *); +static u8 * ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *); + +static void ixv_setup_vlan_support(struct adapter *); +static void ixv_register_vlan(void *, struct ifnet *, u16); +static void ixv_unregister_vlan(void *, struct ifnet *, u16); + +static void ixv_save_stats(struct adapter *); +static void ixv_init_stats(struct adapter *); +static void ixv_update_stats(struct adapter *); + +static __inline void ixv_rx_discard(struct rx_ring *, int); +static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *, + struct mbuf *, u32); + +/* The MSI/X Interrupt handlers */ +static void ixv_msix_que(void *); +static void ixv_msix_mbx(void *); + +/* Deferred interrupt tasklets */ +static void ixv_handle_que(void *, int); +static void ixv_handle_mbx(void *, int); + +/********************************************************************* + * FreeBSD Device Interface Entry Points + *********************************************************************/ + +static device_method_t ixv_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, ixv_probe), + DEVMETHOD(device_attach, ixv_attach), + DEVMETHOD(device_detach, ixv_detach), + DEVMETHOD(device_shutdown, ixv_shutdown), + {0, 0} +}; + +static driver_t ixv_driver = { + "ix", ixv_methods, sizeof(struct adapter), +}; + +extern devclass_t ixgbe_devclass; +DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0); +MODULE_DEPEND(ixv, pci, 1, 1, 1); +MODULE_DEPEND(ixv, ether, 1, 1, 1); + +/* +** TUNEABLE PARAMETERS: +*/ + +/* +** AIM: Adaptive Interrupt Moderation +** which means that the interrupt rate +** is varied over time based on the +** traffic for that interrupt vector +*/ +static int ixv_enable_aim = FALSE; +TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim); + +/* How many packets rxeof tries to clean at a time */ +static int ixv_rx_process_limit = 128; +TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit); + +/* Flow control setting, default to full */ +static int ixv_flow_control = ixgbe_fc_full; +TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control); + +/* + * Header split: this causes the hardware to DMA + * the header into a seperate mbuf from the payload, + * it can be a performance win in some workloads, but + * in others it actually hurts, its off by default. + */ +static bool ixv_header_split = FALSE; +TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split); + +/* +** Number of TX descriptors per ring, +** setting higher than RX as this seems +** the better performing choice. +*/ +static int ixv_txd = DEFAULT_TXD; +TUNABLE_INT("hw.ixv.txd", &ixv_txd); + +/* Number of RX descriptors per ring */ +static int ixv_rxd = DEFAULT_RXD; +TUNABLE_INT("hw.ixv.rxd", &ixv_rxd); + +/* +** Shadow VFTA table, this is needed because +** the real filter table gets cleared during +** a soft reset and we need to repopulate it. +*/ +static u32 ixv_shadow_vfta[VFTA_SIZE]; + +/********************************************************************* + * Device identification routine + * + * ixv_probe determines if the driver should be loaded on + * adapter based on PCI vendor/device id of the adapter. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixv_probe(device_t dev) +{ + ixv_vendor_info_t *ent; + + u16 pci_vendor_id = 0; + u16 pci_device_id = 0; + u16 pci_subvendor_id = 0; + u16 pci_subdevice_id = 0; + char adapter_name[256]; + + + pci_vendor_id = pci_get_vendor(dev); + if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID) + return (ENXIO); + + pci_device_id = pci_get_device(dev); + pci_subvendor_id = pci_get_subvendor(dev); + pci_subdevice_id = pci_get_subdevice(dev); + + ent = ixv_vendor_info_array; + while (ent->vendor_id != 0) { + if ((pci_vendor_id == ent->vendor_id) && + (pci_device_id == ent->device_id) && + + ((pci_subvendor_id == ent->subvendor_id) || + (ent->subvendor_id == 0)) && + + ((pci_subdevice_id == ent->subdevice_id) || + (ent->subdevice_id == 0))) { + sprintf(adapter_name, "%s, Version - %s", + ixv_strings[ent->index], + ixv_driver_version); + device_set_desc_copy(dev, adapter_name); + return (0); + } + ent++; + } + return (ENXIO); +} + +/********************************************************************* + * Device initialization routine + * + * The attach entry point is called when the driver is being loaded. + * This routine identifies the type of hardware, allocates all resources + * and initializes the hardware. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixv_attach(device_t dev) +{ + struct adapter *adapter; + struct ixgbe_hw *hw; + int error = 0; + + INIT_DEBUGOUT("ixv_attach: begin"); + + /* Allocate, clear, and link in our adapter structure */ + adapter = device_get_softc(dev); + adapter->dev = adapter->osdep.dev = dev; + hw = &adapter->hw; + + /* Core Lock Init*/ + IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); + + /* SYSCTL APIs */ + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, + adapter, 0, ixv_sysctl_stats, "I", "Statistics"); + + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, + adapter, 0, ixv_sysctl_debug, "I", "Debug Info"); + + SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW, + adapter, 0, ixv_set_flowcntl, "I", "Flow Control"); + + SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), + OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW, + &ixv_enable_aim, 1, "Interrupt Moderation"); + + /* Set up the timer callout */ + callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); + + /* Determine hardware revision */ + ixv_identify_hardware(adapter); + + /* Do base PCI setup - map BAR0 */ + if (ixv_allocate_pci_resources(adapter)) { + device_printf(dev, "Allocation of PCI resources failed\n"); + error = ENXIO; + goto err_out; + } + + /* Do descriptor calc and sanity checks */ + if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 || + ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) { + device_printf(dev, "TXD config issue, using default!\n"); + adapter->num_tx_desc = DEFAULT_TXD; + } else + adapter->num_tx_desc = ixv_txd; + + if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 || + ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) { + device_printf(dev, "RXD config issue, using default!\n"); + adapter->num_rx_desc = DEFAULT_RXD; + } else + adapter->num_rx_desc = ixv_rxd; + + /* Allocate our TX/RX Queues */ + if (ixv_allocate_queues(adapter)) { + error = ENOMEM; + goto err_out; + } + + /* + ** Initialize the shared code: its + ** at this point the mac type is set. + */ + error = ixgbe_init_shared_code(hw); + if (error) { + device_printf(dev,"Shared Code Initialization Failure\n"); + error = EIO; + goto err_late; + } + + /* Setup the mailbox */ + ixgbe_init_mbx_params_vf(hw); + + ixgbe_reset_hw(hw); + + /* Get Hardware Flow Control setting */ + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.pause_time = IXV_FC_PAUSE; + hw->fc.low_water = IXV_FC_LO; + hw->fc.high_water[0] = IXV_FC_HI; + hw->fc.send_xon = TRUE; + + error = ixgbe_init_hw(hw); + if (error) { + device_printf(dev,"Hardware Initialization Failure\n"); + error = EIO; + goto err_late; + } + + error = ixv_allocate_msix(adapter); + if (error) + goto err_late; + + /* Setup OS specific network interface */ + ixv_setup_interface(dev, adapter); + + /* Sysctl for limiting the amount of work done in the taskqueue */ + ixv_add_rx_process_limit(adapter, "rx_processing_limit", + "max number of rx packets to process", &adapter->rx_process_limit, + ixv_rx_process_limit); + + /* Do the stats setup */ + ixv_save_stats(adapter); + ixv_init_stats(adapter); + + /* Register for VLAN events */ + adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, + ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); + adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, + ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); + + INIT_DEBUGOUT("ixv_attach: end"); + return (0); + +err_late: + ixv_free_transmit_structures(adapter); + ixv_free_receive_structures(adapter); +err_out: + ixv_free_pci_resources(adapter); + return (error); + +} + +/********************************************************************* + * Device removal routine + * + * The detach entry point is called when the driver is being removed. + * This routine stops the adapter and deallocates all the resources + * that were allocated for driver operation. + * + * return 0 on success, positive on failure + *********************************************************************/ + +static int +ixv_detach(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + struct ix_queue *que = adapter->queues; + + INIT_DEBUGOUT("ixv_detach: begin"); + + /* Make sure VLANS are not using driver */ + if (adapter->ifp->if_vlantrunk != NULL) { + device_printf(dev,"Vlan in use, detach first\n"); + return (EBUSY); + } + + IXV_CORE_LOCK(adapter); + ixv_stop(adapter); + IXV_CORE_UNLOCK(adapter); + + for (int i = 0; i < adapter->num_queues; i++, que++) { + if (que->tq) { + taskqueue_drain(que->tq, &que->que_task); + taskqueue_free(que->tq); + } + } + + /* Drain the Link queue */ + if (adapter->tq) { + taskqueue_drain(adapter->tq, &adapter->mbx_task); + taskqueue_free(adapter->tq); + } + + /* Unregister VLAN events */ + if (adapter->vlan_attach != NULL) + EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); + if (adapter->vlan_detach != NULL) + EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); + + ether_ifdetach(adapter->ifp); + callout_drain(&adapter->timer); + ixv_free_pci_resources(adapter); + bus_generic_detach(dev); + if_free(adapter->ifp); + + ixv_free_transmit_structures(adapter); + ixv_free_receive_structures(adapter); + + IXV_CORE_LOCK_DESTROY(adapter); + return (0); +} + +/********************************************************************* + * + * Shutdown entry point + * + **********************************************************************/ +static int +ixv_shutdown(device_t dev) +{ + struct adapter *adapter = device_get_softc(dev); + IXV_CORE_LOCK(adapter); + ixv_stop(adapter); + IXV_CORE_UNLOCK(adapter); + return (0); +} + +#if __FreeBSD_version < 800000 +/********************************************************************* + * Transmit entry point + * + * ixv_start is called by the stack to initiate a transmit. + * The driver will remain in this routine as long as there are + * packets to transmit and transmit resources are available. + * In case resources are not available stack is notified and + * the packet is requeued. + **********************************************************************/ +static void +ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp) +{ + struct mbuf *m_head; + struct adapter *adapter = txr->adapter; + + IXV_TX_LOCK_ASSERT(txr); + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING) + return; + if (!adapter->link_active) + return; + + while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { + + IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); + if (m_head == NULL) + break; + + if (ixv_xmit(txr, &m_head)) { + if (m_head == NULL) + break; + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + IFQ_DRV_PREPEND(&ifp->if_snd, m_head); + break; + } + /* Send a copy of the frame to the BPF listener */ + ETHER_BPF_MTAP(ifp, m_head); + + /* Set watchdog on */ + txr->watchdog_check = TRUE; + txr->watchdog_time = ticks; + + } + return; +} + +/* + * Legacy TX start - called by the stack, this + * always uses the first tx ring, and should + * not be used with multiqueue tx enabled. + */ +static void +ixv_start(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXV_TX_LOCK(txr); + ixv_start_locked(txr, ifp); + IXV_TX_UNLOCK(txr); + } + return; +} + +#else + +/* +** Multiqueue Transmit driver +** +*/ +static int +ixv_mq_start(struct ifnet *ifp, struct mbuf *m) +{ + struct adapter *adapter = ifp->if_softc; + struct ix_queue *que; + struct tx_ring *txr; + int i = 0, err = 0; + + /* Which queue to use */ + if ((m->m_flags & M_FLOWID) != 0) + i = m->m_pkthdr.flowid % adapter->num_queues; + + txr = &adapter->tx_rings[i]; + que = &adapter->queues[i]; + + if (IXV_TX_TRYLOCK(txr)) { + err = ixv_mq_start_locked(ifp, txr, m); + IXV_TX_UNLOCK(txr); + } else { + err = drbr_enqueue(ifp, txr->br, m); + taskqueue_enqueue(que->tq, &que->que_task); + } + + return (err); +} + +static int +ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m) +{ + struct adapter *adapter = txr->adapter; + struct mbuf *next; + int enqueued, err = 0; + + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != + IFF_DRV_RUNNING || adapter->link_active == 0) { + if (m != NULL) + err = drbr_enqueue(ifp, txr->br, m); + return (err); + } + + /* Do a clean if descriptors are low */ + if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD) + ixv_txeof(txr); + + enqueued = 0; + if (m == NULL) { + next = drbr_dequeue(ifp, txr->br); + } else if (drbr_needs_enqueue(ifp, txr->br)) { + if ((err = drbr_enqueue(ifp, txr->br, m)) != 0) + return (err); + next = drbr_dequeue(ifp, txr->br); + } else + next = m; + + /* Process the queue */ + while (next != NULL) { + if ((err = ixv_xmit(txr, &next)) != 0) { + if (next != NULL) + err = drbr_enqueue(ifp, txr->br, next); + break; + } + enqueued++; + drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags); + /* Send a copy of the frame to the BPF listener */ + ETHER_BPF_MTAP(ifp, next); + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; + if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) { + ifp->if_drv_flags |= IFF_DRV_OACTIVE; + break; + } + next = drbr_dequeue(ifp, txr->br); + } + + if (enqueued > 0) { + /* Set watchdog on */ + txr->watchdog_check = TRUE; + txr->watchdog_time = ticks; + } + + return (err); +} + +/* +** Flush all ring buffers +*/ +static void +ixv_qflush(struct ifnet *ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct tx_ring *txr = adapter->tx_rings; + struct mbuf *m; + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IXV_TX_LOCK(txr); + while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) + m_freem(m); + IXV_TX_UNLOCK(txr); + } + if_qflush(ifp); +} + +#endif + +/********************************************************************* + * Ioctl entry point + * + * ixv_ioctl is called when the user wants to configure the + * interface. + * + * return 0 on success, positive on failure + **********************************************************************/ + +static int +ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data) +{ + struct adapter *adapter = ifp->if_softc; + struct ifreq *ifr = (struct ifreq *) data; +#if defined(INET) || defined(INET6) + struct ifaddr *ifa = (struct ifaddr *)data; +#endif + int error = 0; + bool avoid_reset = FALSE; + + switch (command) { + + case SIOCSIFADDR: +#ifdef INET + if (ifa->ifa_addr->sa_family == AF_INET) + avoid_reset = TRUE; +#endif +#ifdef INET6 + if (ifa->ifa_addr->sa_family == AF_INET6) + avoid_reset = TRUE; +#endif + /* + ** Calling init results in link renegotiation, + ** so we avoid doing it when possible. + */ + if (avoid_reset) { + ifp->if_flags |= IFF_UP; + if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) + ixv_init(adapter); + if (!(ifp->if_flags & IFF_NOARP)) + arp_ifinit(ifp, ifa); + } else + error = ether_ioctl(ifp, command, data); + break; + + case SIOCSIFMTU: + IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); + if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) { + error = EINVAL; + } else { + IXV_CORE_LOCK(adapter); + ifp->if_mtu = ifr->ifr_mtu; + adapter->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + ixv_init_locked(adapter); + IXV_CORE_UNLOCK(adapter); + } + break; + case SIOCSIFFLAGS: + IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)"); + IXV_CORE_LOCK(adapter); + if (ifp->if_flags & IFF_UP) { + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + ixv_init_locked(adapter); + } else + if (ifp->if_drv_flags & IFF_DRV_RUNNING) + ixv_stop(adapter); + adapter->if_flags = ifp->if_flags; + IXV_CORE_UNLOCK(adapter); + break; + case SIOCADDMULTI: + case SIOCDELMULTI: + IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI"); + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXV_CORE_LOCK(adapter); + ixv_disable_intr(adapter); + ixv_set_multi(adapter); + ixv_enable_intr(adapter); + IXV_CORE_UNLOCK(adapter); + } + break; + case SIOCSIFMEDIA: + case SIOCGIFMEDIA: + IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)"); + error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); + break; + case SIOCSIFCAP: + { + int mask = ifr->ifr_reqcap ^ ifp->if_capenable; + IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)"); + if (mask & IFCAP_HWCSUM) + ifp->if_capenable ^= IFCAP_HWCSUM; + if (mask & IFCAP_TSO4) + ifp->if_capenable ^= IFCAP_TSO4; + if (mask & IFCAP_LRO) + ifp->if_capenable ^= IFCAP_LRO; + if (mask & IFCAP_VLAN_HWTAGGING) + ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + IXV_CORE_LOCK(adapter); + ixv_init_locked(adapter); + IXV_CORE_UNLOCK(adapter); + } + VLAN_CAPABILITIES(ifp); + break; + } + + default: + IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command); + error = ether_ioctl(ifp, command, data); + break; + } + + return (error); +} + +/********************************************************************* + * Init entry point + * + * This routine is used in two ways. It is used by the stack as + * init entry point in network interface structure. It is also used + * by the driver as a hw/sw initialization routine to get to a + * consistent state. + * + * return 0 on success, positive on failure + **********************************************************************/ +#define IXGBE_MHADD_MFS_SHIFT 16 + +static void +ixv_init_locked(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + device_t dev = adapter->dev; + struct ixgbe_hw *hw = &adapter->hw; + u32 mhadd, gpie; + + INIT_DEBUGOUT("ixv_init: begin"); + mtx_assert(&adapter->core_mtx, MA_OWNED); + hw->adapter_stopped = FALSE; + ixgbe_stop_adapter(hw); + callout_stop(&adapter->timer); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + /* Get the latest mac address, User can use a LAA */ + bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr, + IXGBE_ETH_LENGTH_OF_ADDRESS); + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1); + hw->addr_ctrl.rar_used_count = 1; + + /* Prepare transmit descriptors and buffers */ + if (ixv_setup_transmit_structures(adapter)) { + device_printf(dev,"Could not setup transmit structures\n"); + ixv_stop(adapter); + return; + } + + ixgbe_reset_hw(hw); + ixv_initialize_transmit_units(adapter); + + /* Setup Multicast table */ + ixv_set_multi(adapter); + + /* + ** Determine the correct mbuf pool + ** for doing jumbo/headersplit + */ + if (ifp->if_mtu > ETHERMTU) + adapter->rx_mbuf_sz = MJUMPAGESIZE; + else + adapter->rx_mbuf_sz = MCLBYTES; + + /* Prepare receive descriptors and buffers */ + if (ixv_setup_receive_structures(adapter)) { + device_printf(dev,"Could not setup receive structures\n"); + ixv_stop(adapter); + return; + } + + /* Configure RX settings */ + ixv_initialize_receive_units(adapter); + + /* Enable Enhanced MSIX mode */ + gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME; + gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD; + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + + /* Set the various hardware offload abilities */ + ifp->if_hwassist = 0; + if (ifp->if_capenable & IFCAP_TSO4) + ifp->if_hwassist |= CSUM_TSO; + if (ifp->if_capenable & IFCAP_TXCSUM) { + ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); +#if __FreeBSD_version >= 800000 + ifp->if_hwassist |= CSUM_SCTP; +#endif + } + + /* Set MTU size */ + if (ifp->if_mtu > ETHERMTU) { + mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); + mhadd &= ~IXGBE_MHADD_MFS_MASK; + mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + + /* Set up VLAN offload and filter */ + ixv_setup_vlan_support(adapter); + + callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); + + /* Set up MSI/X routing */ + ixv_configure_ivars(adapter); + + /* Set up auto-mask */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE); + + /* Set moderation on the Link interrupt */ + IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR); + + /* Stats init */ + ixv_init_stats(adapter); + + /* Config/Enable Link */ + ixv_config_link(adapter); + + /* And now turn on interrupts */ + ixv_enable_intr(adapter); + + /* Now inform the stack we're ready */ + ifp->if_drv_flags |= IFF_DRV_RUNNING; + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + + return; +} + +static void +ixv_init(void *arg) +{ + struct adapter *adapter = arg; + + IXV_CORE_LOCK(adapter); + ixv_init_locked(adapter); + IXV_CORE_UNLOCK(adapter); + return; +} + + +/* +** +** MSIX Interrupt Handlers and Tasklets +** +*/ + +static inline void +ixv_enable_queue(struct adapter *adapter, u32 vector) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 queue = 1 << vector; + u32 mask; + + mask = (IXGBE_EIMS_RTX_QUEUE & queue); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); +} + +static inline void +ixv_disable_queue(struct adapter *adapter, u32 vector) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 queue = (u64)(1 << vector); + u32 mask; + + mask = (IXGBE_EIMS_RTX_QUEUE & queue); + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask); +} + +static inline void +ixv_rearm_queues(struct adapter *adapter, u64 queues) +{ + u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask); +} + + +static void +ixv_handle_que(void *context, int pending) +{ + struct ix_queue *que = context; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; + struct ifnet *ifp = adapter->ifp; + bool more; + + if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + more = ixv_rxeof(que, adapter->rx_process_limit); + IXV_TX_LOCK(txr); + ixv_txeof(txr); +#if __FreeBSD_version >= 800000 + if (!drbr_empty(ifp, txr->br)) + ixv_mq_start_locked(ifp, txr, NULL); +#else + if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) + ixv_start_locked(txr, ifp); +#endif + IXV_TX_UNLOCK(txr); + if (more) { + taskqueue_enqueue(que->tq, &que->que_task); + return; + } + } + + /* Reenable this interrupt */ + ixv_enable_queue(adapter, que->msix); + return; +} + +/********************************************************************* + * + * MSI Queue Interrupt Service routine + * + **********************************************************************/ +void +ixv_msix_que(void *arg) +{ + struct ix_queue *que = arg; + struct adapter *adapter = que->adapter; + struct tx_ring *txr = que->txr; + struct rx_ring *rxr = que->rxr; + bool more_tx, more_rx; + u32 newitr = 0; + + ixv_disable_queue(adapter, que->msix); + ++que->irqs; + + more_rx = ixv_rxeof(que, adapter->rx_process_limit); + + IXV_TX_LOCK(txr); + more_tx = ixv_txeof(txr); + /* + ** Make certain that if the stack + ** has anything queued the task gets + ** scheduled to handle it. + */ +#if __FreeBSD_version < 800000 + if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd)) +#else + if (!drbr_empty(adapter->ifp, txr->br)) +#endif + more_tx = 1; + IXV_TX_UNLOCK(txr); + + more_rx = ixv_rxeof(que, adapter->rx_process_limit); + + /* Do AIM now? */ + + if (ixv_enable_aim == FALSE) + goto no_calc; + /* + ** Do Adaptive Interrupt Moderation: + ** - Write out last calculated setting + ** - Calculate based on average size over + ** the last interval. + */ + if (que->eitr_setting) + IXGBE_WRITE_REG(&adapter->hw, + IXGBE_VTEITR(que->msix), + que->eitr_setting); + + que->eitr_setting = 0; + + /* Idle, do nothing */ + if ((txr->bytes == 0) && (rxr->bytes == 0)) + goto no_calc; + + if ((txr->bytes) && (txr->packets)) + newitr = txr->bytes/txr->packets; + if ((rxr->bytes) && (rxr->packets)) + newitr = max(newitr, + (rxr->bytes / rxr->packets)); + newitr += 24; /* account for hardware frame, crc */ + + /* set an upper boundary */ + newitr = min(newitr, 3000); + + /* Be nice to the mid range */ + if ((newitr > 300) && (newitr < 1200)) + newitr = (newitr / 3); + else + newitr = (newitr / 2); + + newitr |= newitr << 16; + + /* save for next interrupt */ + que->eitr_setting = newitr; + + /* Reset state */ + txr->bytes = 0; + txr->packets = 0; + rxr->bytes = 0; + rxr->packets = 0; + +no_calc: + if (more_tx || more_rx) + taskqueue_enqueue(que->tq, &que->que_task); + else /* Reenable this interrupt */ + ixv_enable_queue(adapter, que->msix); + return; +} + +static void +ixv_msix_mbx(void *arg) +{ + struct adapter *adapter = arg; + struct ixgbe_hw *hw = &adapter->hw; + u32 reg; + + ++adapter->mbx_irq; + + /* First get the cause */ + reg = IXGBE_READ_REG(hw, IXGBE_VTEICS); + /* Clear interrupt with write */ + IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg); + + /* Link status change */ + if (reg & IXGBE_EICR_LSC) + taskqueue_enqueue(adapter->tq, &adapter->mbx_task); + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER); + return; +} + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called whenever the user queries the status of + * the interface using ifconfig. + * + **********************************************************************/ +static void +ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) +{ + struct adapter *adapter = ifp->if_softc; + + INIT_DEBUGOUT("ixv_media_status: begin"); + IXV_CORE_LOCK(adapter); + ixv_update_link_status(adapter); + + ifmr->ifm_status = IFM_AVALID; + ifmr->ifm_active = IFM_ETHER; + + if (!adapter->link_active) { + IXV_CORE_UNLOCK(adapter); + return; + } + + ifmr->ifm_status |= IFM_ACTIVE; + + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_1GB_FULL: + ifmr->ifm_active |= IFM_1000_T | IFM_FDX; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + ifmr->ifm_active |= IFM_FDX; + break; + } + + IXV_CORE_UNLOCK(adapter); + + return; +} + +/********************************************************************* + * + * Media Ioctl callback + * + * This routine is called when the user changes speed/duplex using + * media/mediopt option with ifconfig. + * + **********************************************************************/ +static int +ixv_media_change(struct ifnet * ifp) +{ + struct adapter *adapter = ifp->if_softc; + struct ifmedia *ifm = &adapter->media; + + INIT_DEBUGOUT("ixv_media_change: begin"); + + if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) + return (EINVAL); + + switch (IFM_SUBTYPE(ifm->ifm_media)) { + case IFM_AUTO: + break; + default: + device_printf(adapter->dev, "Only auto media type\n"); + return (EINVAL); + } + + return (0); +} + +/********************************************************************* + * + * This routine maps the mbufs to tx descriptors, allowing the + * TX engine to transmit the packets. + * - return 0 on success, positive on failure + * + **********************************************************************/ + +static int +ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp) +{ + struct adapter *adapter = txr->adapter; + u32 olinfo_status = 0, cmd_type_len; + u32 paylen = 0; + int i, j, error, nsegs; + int first, last = 0; + struct mbuf *m_head; + bus_dma_segment_t segs[32]; + bus_dmamap_t map; + struct ixv_tx_buf *txbuf, *txbuf_mapped; + union ixgbe_adv_tx_desc *txd = NULL; + + m_head = *m_headp; + + /* Basic descriptor defines */ + cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT); + + if (m_head->m_flags & M_VLANTAG) + cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; + + /* + * Important to capture the first descriptor + * used because it will contain the index of + * the one we tell the hardware to report back + */ + first = txr->next_avail_desc; + txbuf = &txr->tx_buffers[first]; + txbuf_mapped = txbuf; + map = txbuf->map; + + /* + * Map the packet for DMA. + */ + error = bus_dmamap_load_mbuf_sg(txr->txtag, map, + *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); + + if (error == EFBIG) { + struct mbuf *m; + + m = m_defrag(*m_headp, M_DONTWAIT); + if (m == NULL) { + adapter->mbuf_defrag_failed++; + m_freem(*m_headp); + *m_headp = NULL; + return (ENOBUFS); + } + *m_headp = m; + + /* Try it again */ + error = bus_dmamap_load_mbuf_sg(txr->txtag, map, + *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); + + if (error == ENOMEM) { + adapter->no_tx_dma_setup++; + return (error); + } else if (error != 0) { + adapter->no_tx_dma_setup++; + m_freem(*m_headp); + *m_headp = NULL; + return (error); + } + } else if (error == ENOMEM) { + adapter->no_tx_dma_setup++; + return (error); + } else if (error != 0) { + adapter->no_tx_dma_setup++; + m_freem(*m_headp); + *m_headp = NULL; + return (error); + } + + /* Make certain there are enough descriptors */ + if (nsegs > txr->tx_avail - 2) { + txr->no_desc_avail++; + error = ENOBUFS; + goto xmit_fail; + } + m_head = *m_headp; + + /* + ** Set up the appropriate offload context + ** this becomes the first descriptor of + ** a packet. + */ + if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { + if (ixv_tso_setup(txr, m_head, &paylen)) { + cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; + olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8; + olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; + olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT; + ++adapter->tso_tx; + } else + return (ENXIO); + } else if (ixv_tx_ctx_setup(txr, m_head)) + olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8; + + /* Record payload length */ + if (paylen == 0) + olinfo_status |= m_head->m_pkthdr.len << + IXGBE_ADVTXD_PAYLEN_SHIFT; + + i = txr->next_avail_desc; + for (j = 0; j < nsegs; j++) { + bus_size_t seglen; + bus_addr_t segaddr; + + txbuf = &txr->tx_buffers[i]; + txd = &txr->tx_base[i]; + seglen = segs[j].ds_len; + segaddr = htole64(segs[j].ds_addr); + + txd->read.buffer_addr = segaddr; + txd->read.cmd_type_len = htole32(txr->txd_cmd | + cmd_type_len |seglen); + txd->read.olinfo_status = htole32(olinfo_status); + last = i; /* descriptor that will get completion IRQ */ + + if (++i == adapter->num_tx_desc) + i = 0; + + txbuf->m_head = NULL; + txbuf->eop_index = -1; + } + + txd->read.cmd_type_len |= + htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS); + txr->tx_avail -= nsegs; + txr->next_avail_desc = i; + + txbuf->m_head = m_head; + txbuf->map = map; + bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); + + /* Set the index of the descriptor that will be marked done */ + txbuf = &txr->tx_buffers[first]; + txbuf->eop_index = last; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + /* + * Advance the Transmit Descriptor Tail (Tdt), this tells the + * hardware that this frame is available to transmit. + */ + ++txr->total_packets; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i); + + return (0); + +xmit_fail: + bus_dmamap_unload(txr->txtag, txbuf->map); + return (error); + +} + + +/********************************************************************* + * Multicast Update + * + * This routine is called whenever multicast address list is updated. + * + **********************************************************************/ +#define IXGBE_RAR_ENTRIES 16 + +static void +ixv_set_multi(struct adapter *adapter) +{ + u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS]; + u8 *update_ptr; + struct ifmultiaddr *ifma; + int mcnt = 0; + struct ifnet *ifp = adapter->ifp; + + IOCTL_DEBUGOUT("ixv_set_multi: begin"); + +#if __FreeBSD_version < 800000 + IF_ADDR_LOCK(ifp); +#else + if_maddr_rlock(ifp); +#endif + TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { + if (ifma->ifma_addr->sa_family != AF_LINK) + continue; + bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), + &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS], + IXGBE_ETH_LENGTH_OF_ADDRESS); + mcnt++; + } +#if __FreeBSD_version < 800000 + IF_ADDR_UNLOCK(ifp); +#else + if_maddr_runlock(ifp); +#endif + + update_ptr = mta; + + ixgbe_update_mc_addr_list(&adapter->hw, + update_ptr, mcnt, ixv_mc_array_itr, TRUE); + + return; +} + +/* + * This is an iterator function now needed by the multicast + * shared code. It simply feeds the shared code routine the + * addresses in the array of ixv_set_multi() one by one. + */ +static u8 * +ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq) +{ + u8 *addr = *update_ptr; + u8 *newptr; + *vmdq = 0; + + newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS; + *update_ptr = newptr; + return addr; +} + +/********************************************************************* + * Timer routine + * + * This routine checks for link status,updates statistics, + * and runs the watchdog check. + * + **********************************************************************/ + +static void +ixv_local_timer(void *arg) +{ + struct adapter *adapter = arg; + device_t dev = adapter->dev; + struct tx_ring *txr = adapter->tx_rings; + int i; + + mtx_assert(&adapter->core_mtx, MA_OWNED); + + ixv_update_link_status(adapter); + + /* Stats Update */ + ixv_update_stats(adapter); + + /* + * If the interface has been paused + * then don't do the watchdog check + */ + if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF) + goto out; + /* + ** Check for time since any descriptor was cleaned + */ + for (i = 0; i < adapter->num_queues; i++, txr++) { + IXV_TX_LOCK(txr); + if (txr->watchdog_check == FALSE) { + IXV_TX_UNLOCK(txr); + continue; + } + if ((ticks - txr->watchdog_time) > IXV_WATCHDOG) + goto hung; + IXV_TX_UNLOCK(txr); + } +out: + ixv_rearm_queues(adapter, adapter->que_mask); + callout_reset(&adapter->timer, hz, ixv_local_timer, adapter); + return; + +hung: + device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); + device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, + IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)), + IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i))); + device_printf(dev,"TX(%d) desc avail = %d," + "Next TX to Clean = %d\n", + txr->me, txr->tx_avail, txr->next_to_clean); + adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + adapter->watchdog_events++; + IXV_TX_UNLOCK(txr); + ixv_init_locked(adapter); +} + +/* +** Note: this routine updates the OS on the link state +** the real check of the hardware only happens with +** a link interrupt. +*/ +static void +ixv_update_link_status(struct adapter *adapter) +{ + struct ifnet *ifp = adapter->ifp; + struct tx_ring *txr = adapter->tx_rings; + device_t dev = adapter->dev; + + + if (adapter->link_up){ + if (adapter->link_active == FALSE) { + if (bootverbose) + device_printf(dev,"Link is up %d Gbps %s \n", + ((adapter->link_speed == 128)? 10:1), + "Full Duplex"); + adapter->link_active = TRUE; + if_link_state_change(ifp, LINK_STATE_UP); + } + } else { /* Link down */ + if (adapter->link_active == TRUE) { + if (bootverbose) + device_printf(dev,"Link is Down\n"); + if_link_state_change(ifp, LINK_STATE_DOWN); + adapter->link_active = FALSE; + for (int i = 0; i < adapter->num_queues; + i++, txr++) + txr->watchdog_check = FALSE; + } + } + + return; +} + + +/********************************************************************* + * + * This routine disables all traffic on the adapter by issuing a + * global reset on the MAC and deallocates TX/RX buffers. + * + **********************************************************************/ + +static void +ixv_stop(void *arg) +{ + struct ifnet *ifp; + struct adapter *adapter = arg; + struct ixgbe_hw *hw = &adapter->hw; + ifp = adapter->ifp; + + mtx_assert(&adapter->core_mtx, MA_OWNED); + + INIT_DEBUGOUT("ixv_stop: begin\n"); + ixv_disable_intr(adapter); + + /* Tell the stack that the interface is no longer active */ + ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); + + ixgbe_reset_hw(hw); + adapter->hw.adapter_stopped = FALSE; + ixgbe_stop_adapter(hw); + callout_stop(&adapter->timer); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); + + return; +} + + +/********************************************************************* + * + * Determine hardware revision. + * + **********************************************************************/ +static void +ixv_identify_hardware(struct adapter *adapter) +{ + device_t dev = adapter->dev; + u16 pci_cmd_word; + + /* + ** Make sure BUSMASTER is set, on a VM under + ** KVM it may not be and will break things. + */ + pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); + if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) && + (pci_cmd_word & PCIM_CMD_MEMEN))) { + INIT_DEBUGOUT("Memory Access and/or Bus Master " + "bits were not set!\n"); + pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); + pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2); + } + + /* Save off the information about this board */ + adapter->hw.vendor_id = pci_get_vendor(dev); + adapter->hw.device_id = pci_get_device(dev); + adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); + adapter->hw.subsystem_vendor_id = + pci_read_config(dev, PCIR_SUBVEND_0, 2); + adapter->hw.subsystem_device_id = + pci_read_config(dev, PCIR_SUBDEV_0, 2); + + return; +} + +/********************************************************************* + * + * Setup MSIX Interrupt resources and handlers + * + **********************************************************************/ +static int +ixv_allocate_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ix_queue *que = adapter->queues; + int error, rid, vector = 0; + + for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { + rid = vector + 1; + que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); + if (que->res == NULL) { + device_printf(dev,"Unable to allocate" + " bus resource: que interrupt [%d]\n", vector); + return (ENXIO); + } + /* Set the handler function */ + error = bus_setup_intr(dev, que->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + ixv_msix_que, que, &que->tag); + if (error) { + que->res = NULL; + device_printf(dev, "Failed to register QUE handler"); + return (error); + } +#if __FreeBSD_version >= 800504 + bus_describe_intr(dev, que->res, que->tag, "que %d", i); +#endif + que->msix = vector; + adapter->que_mask |= (u64)(1 << que->msix); + /* + ** Bind the msix vector, and thus the + ** ring to the corresponding cpu. + */ + if (adapter->num_queues > 1) + bus_bind_intr(dev, que->res, i); + + TASK_INIT(&que->que_task, 0, ixv_handle_que, que); + que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT, + taskqueue_thread_enqueue, &que->tq); + taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", + device_get_nameunit(adapter->dev)); + } + + /* and Mailbox */ + rid = vector + 1; + adapter->res = bus_alloc_resource_any(dev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); + if (!adapter->res) { + device_printf(dev,"Unable to allocate" + " bus resource: MBX interrupt [%d]\n", rid); + return (ENXIO); + } + /* Set the mbx handler function */ + error = bus_setup_intr(dev, adapter->res, + INTR_TYPE_NET | INTR_MPSAFE, NULL, + ixv_msix_mbx, adapter, &adapter->tag); + if (error) { + adapter->res = NULL; + device_printf(dev, "Failed to register LINK handler"); + return (error); + } +#if __FreeBSD_version >= 800504 + bus_describe_intr(dev, adapter->res, adapter->tag, "mbx"); +#endif + adapter->mbxvec = vector; + /* Tasklets for Mailbox */ + TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter); + adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT, + taskqueue_thread_enqueue, &adapter->tq); + taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq", + device_get_nameunit(adapter->dev)); + /* + ** XXX - remove this when KVM/QEMU fix gets in... + ** Due to a broken design QEMU will fail to properly + ** enable the guest for MSIX unless the vectors in + ** the table are all set up, so we must rewrite the + ** ENABLE in the MSIX control register again at this + ** point to cause it to successfully initialize us. + */ + if (adapter->hw.mac.type == ixgbe_mac_82599_vf) { + int msix_ctrl; + pci_find_extcap(dev, PCIY_MSIX, &rid); + rid += PCIR_MSIX_CTRL; + msix_ctrl = pci_read_config(dev, rid, 2); + msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; + pci_write_config(dev, rid, msix_ctrl, 2); + } + + return (0); +} + +/* + * Setup MSIX resources, note that the VF + * device MUST use MSIX, there is no fallback. + */ +static int +ixv_setup_msix(struct adapter *adapter) +{ + device_t dev = adapter->dev; + int rid, vectors, want = 2; + + + /* First try MSI/X */ + rid = PCIR_BAR(3); + adapter->msix_mem = bus_alloc_resource_any(dev, + SYS_RES_MEMORY, &rid, RF_ACTIVE); + if (!adapter->msix_mem) { + device_printf(adapter->dev, + "Unable to map MSIX table \n"); + goto out; + } + + vectors = pci_msix_count(dev); + if (vectors < 2) { + bus_release_resource(dev, SYS_RES_MEMORY, + rid, adapter->msix_mem); + adapter->msix_mem = NULL; + goto out; + } + + /* + ** Want two vectors: one for a queue, + ** plus an additional for mailbox. + */ + if (pci_alloc_msix(dev, &want) == 0) { + device_printf(adapter->dev, + "Using MSIX interrupts with %d vectors\n", want); + return (want); + } +out: + device_printf(adapter->dev,"MSIX config error\n"); + return (ENXIO); +} + + +static int +ixv_allocate_pci_resources(struct adapter *adapter) +{ + int rid; + device_t dev = adapter->dev; + + rid = PCIR_BAR(0); + adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); + + if (!(adapter->pci_mem)) { + device_printf(dev,"Unable to allocate bus resource: memory\n"); + return (ENXIO); + } + + adapter->osdep.mem_bus_space_tag = + rman_get_bustag(adapter->pci_mem); + adapter->osdep.mem_bus_space_handle = + rman_get_bushandle(adapter->pci_mem); + adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle; + + adapter->num_queues = 1; + adapter->hw.back = &adapter->osdep; + + /* + ** Now setup MSI/X, should + ** return us the number of + ** configured vectors. + */ + adapter->msix = ixv_setup_msix(adapter); + if (adapter->msix == ENXIO) + return (ENXIO); + else + return (0); +} + +static void +ixv_free_pci_resources(struct adapter * adapter) +{ + struct ix_queue *que = adapter->queues; + device_t dev = adapter->dev; + int rid, memrid; + + memrid = PCIR_BAR(MSIX_BAR); + + /* + ** There is a slight possibility of a failure mode + ** in attach that will result in entering this function + ** before interrupt resources have been initialized, and + ** in that case we do not want to execute the loops below + ** We can detect this reliably by the state of the adapter + ** res pointer. + */ + if (adapter->res == NULL) + goto mem; + + /* + ** Release all msix queue resources: + */ + for (int i = 0; i < adapter->num_queues; i++, que++) { + rid = que->msix + 1; + if (que->tag != NULL) { + bus_teardown_intr(dev, que->res, que->tag); + que->tag = NULL; + } + if (que->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, que->res); + } + + + /* Clean the Legacy or Link interrupt last */ + if (adapter->mbxvec) /* we are doing MSIX */ + rid = adapter->mbxvec + 1; + else + (adapter->msix != 0) ? (rid = 1):(rid = 0); + + if (adapter->tag != NULL) { + bus_teardown_intr(dev, adapter->res, adapter->tag); + adapter->tag = NULL; + } + if (adapter->res != NULL) + bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); + +mem: + if (adapter->msix) + pci_release_msi(dev); + + if (adapter->msix_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + memrid, adapter->msix_mem); + + if (adapter->pci_mem != NULL) + bus_release_resource(dev, SYS_RES_MEMORY, + PCIR_BAR(0), adapter->pci_mem); + + return; +} + +/********************************************************************* + * + * Setup networking device structure and register an interface. + * + **********************************************************************/ +static void +ixv_setup_interface(device_t dev, struct adapter *adapter) +{ + struct ifnet *ifp; + + INIT_DEBUGOUT("ixv_setup_interface: begin"); + + ifp = adapter->ifp = if_alloc(IFT_ETHER); + if (ifp == NULL) + panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); + ifp->if_mtu = ETHERMTU; + ifp->if_baudrate = 1000000000; + ifp->if_init = ixv_init; + ifp->if_softc = adapter; + ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; + ifp->if_ioctl = ixv_ioctl; +#if __FreeBSD_version >= 800000 + ifp->if_transmit = ixv_mq_start; + ifp->if_qflush = ixv_qflush; +#else + ifp->if_start = ixv_start; +#endif + ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2; + + ether_ifattach(ifp, adapter->hw.mac.addr); + + adapter->max_frame_size = + ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + /* + * Tell the upper layer(s) we support long frames. + */ + ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + + ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM; + ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; + ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO; + + ifp->if_capenable = ifp->if_capabilities; + + /* + * Specify the media types supported by this adapter and register + * callbacks to update media and link information + */ + ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change, + ixv_media_status); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL); + ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); + ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); + + return; +} + +static void +ixv_config_link(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 autoneg, err = 0; + bool negotiate = TRUE; + + if (hw->mac.ops.check_link) + err = hw->mac.ops.check_link(hw, &autoneg, + &adapter->link_up, FALSE); + if (err) + goto out; + + if (hw->mac.ops.setup_link) + err = hw->mac.ops.setup_link(hw, autoneg, + negotiate, adapter->link_up); +out: + return; +} + +/******************************************************************** + * Manage DMA'able memory. + *******************************************************************/ +static void +ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) +{ + if (error) + return; + *(bus_addr_t *) arg = segs->ds_addr; + return; +} + +static int +ixv_dma_malloc(struct adapter *adapter, bus_size_t size, + struct ixv_dma_alloc *dma, int mapflags) +{ + device_t dev = adapter->dev; + int r; + + r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ + DBA_ALIGN, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + size, /* maxsize */ + 1, /* nsegments */ + size, /* maxsegsize */ + BUS_DMA_ALLOCNOW, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &dma->dma_tag); + if (r != 0) { + device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; " + "error %u\n", r); + goto fail_0; + } + r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, + BUS_DMA_NOWAIT, &dma->dma_map); + if (r != 0) { + device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; " + "error %u\n", r); + goto fail_1; + } + r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, + size, + ixv_dmamap_cb, + &dma->dma_paddr, + mapflags | BUS_DMA_NOWAIT); + if (r != 0) { + device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; " + "error %u\n", r); + goto fail_2; + } + dma->dma_size = size; + return (0); +fail_2: + bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); +fail_1: + bus_dma_tag_destroy(dma->dma_tag); +fail_0: + dma->dma_map = NULL; + dma->dma_tag = NULL; + return (r); +} + +static void +ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma) +{ + bus_dmamap_sync(dma->dma_tag, dma->dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(dma->dma_tag, dma->dma_map); + bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); + bus_dma_tag_destroy(dma->dma_tag); +} + + +/********************************************************************* + * + * Allocate memory for the transmit and receive rings, and then + * the descriptors associated with each, called only once at attach. + * + **********************************************************************/ +static int +ixv_allocate_queues(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ix_queue *que; + struct tx_ring *txr; + struct rx_ring *rxr; + int rsize, tsize, error = 0; + int txconf = 0, rxconf = 0; + + /* First allocate the top level queue structs */ + if (!(adapter->queues = + (struct ix_queue *) malloc(sizeof(struct ix_queue) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate queue memory\n"); + error = ENOMEM; + goto fail; + } + + /* First allocate the TX ring struct memory */ + if (!(adapter->tx_rings = + (struct tx_ring *) malloc(sizeof(struct tx_ring) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate TX ring memory\n"); + error = ENOMEM; + goto tx_fail; + } + + /* Next allocate the RX */ + if (!(adapter->rx_rings = + (struct rx_ring *) malloc(sizeof(struct rx_ring) * + adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate RX ring memory\n"); + error = ENOMEM; + goto rx_fail; + } + + /* For the ring itself */ + tsize = roundup2(adapter->num_tx_desc * + sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN); + + /* + * Now set up the TX queues, txconf is needed to handle the + * possibility that things fail midcourse and we need to + * undo memory gracefully + */ + for (int i = 0; i < adapter->num_queues; i++, txconf++) { + /* Set up some basics */ + txr = &adapter->tx_rings[i]; + txr->adapter = adapter; + txr->me = i; + + /* Initialize the TX side lock */ + snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", + device_get_nameunit(dev), txr->me); + mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); + + if (ixv_dma_malloc(adapter, tsize, + &txr->txdma, BUS_DMA_NOWAIT)) { + device_printf(dev, + "Unable to allocate TX Descriptor memory\n"); + error = ENOMEM; + goto err_tx_desc; + } + txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr; + bzero((void *)txr->tx_base, tsize); + + /* Now allocate transmit buffers for the ring */ + if (ixv_allocate_transmit_buffers(txr)) { + device_printf(dev, + "Critical Failure setting up transmit buffers\n"); + error = ENOMEM; + goto err_tx_desc; + } +#if __FreeBSD_version >= 800000 + /* Allocate a buf ring */ + txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF, + M_WAITOK, &txr->tx_mtx); + if (txr->br == NULL) { + device_printf(dev, + "Critical Failure setting up buf ring\n"); + error = ENOMEM; + goto err_tx_desc; + } +#endif + } + + /* + * Next the RX queues... + */ + rsize = roundup2(adapter->num_rx_desc * + sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); + for (int i = 0; i < adapter->num_queues; i++, rxconf++) { + rxr = &adapter->rx_rings[i]; + /* Set up some basics */ + rxr->adapter = adapter; + rxr->me = i; + + /* Initialize the RX side lock */ + snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", + device_get_nameunit(dev), rxr->me); + mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); + + if (ixv_dma_malloc(adapter, rsize, + &rxr->rxdma, BUS_DMA_NOWAIT)) { + device_printf(dev, + "Unable to allocate RxDescriptor memory\n"); + error = ENOMEM; + goto err_rx_desc; + } + rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr; + bzero((void *)rxr->rx_base, rsize); + + /* Allocate receive buffers for the ring*/ + if (ixv_allocate_receive_buffers(rxr)) { + device_printf(dev, + "Critical Failure setting up receive buffers\n"); + error = ENOMEM; + goto err_rx_desc; + } + } + + /* + ** Finally set up the queue holding structs + */ + for (int i = 0; i < adapter->num_queues; i++) { + que = &adapter->queues[i]; + que->adapter = adapter; + que->txr = &adapter->tx_rings[i]; + que->rxr = &adapter->rx_rings[i]; + } + + return (0); + +err_rx_desc: + for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) + ixv_dma_free(adapter, &rxr->rxdma); +err_tx_desc: + for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) + ixv_dma_free(adapter, &txr->txdma); + free(adapter->rx_rings, M_DEVBUF); +rx_fail: + free(adapter->tx_rings, M_DEVBUF); +tx_fail: + free(adapter->queues, M_DEVBUF); +fail: + return (error); +} + + +/********************************************************************* + * + * Allocate memory for tx_buffer structures. The tx_buffer stores all + * the information needed to transmit a packet on the wire. This is + * called only once at attach, setup is done every reset. + * + **********************************************************************/ +static int +ixv_allocate_transmit_buffers(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + device_t dev = adapter->dev; + struct ixv_tx_buf *txbuf; + int error, i; + + /* + * Setup DMA descriptor areas. + */ + if ((error = bus_dma_tag_create(NULL, /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + IXV_TSO_SIZE, /* maxsize */ + 32, /* nsegments */ + PAGE_SIZE, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &txr->txtag))) { + device_printf(dev,"Unable to allocate TX DMA tag\n"); + goto fail; + } + + if (!(txr->tx_buffers = + (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) * + adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate tx_buffer memory\n"); + error = ENOMEM; + goto fail; + } + + /* Create the descriptor buffer dma maps */ + txbuf = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { + error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); + if (error != 0) { + device_printf(dev, "Unable to create TX DMA map\n"); + goto fail; + } + } + + return 0; +fail: + /* We free all, it handles case where we are in the middle */ + ixv_free_transmit_structures(adapter); + return (error); +} + +/********************************************************************* + * + * Initialize a transmit ring. + * + **********************************************************************/ +static void +ixv_setup_transmit_ring(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct ixv_tx_buf *txbuf; + int i; + + /* Clear the old ring contents */ + IXV_TX_LOCK(txr); + bzero((void *)txr->tx_base, + (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc); + /* Reset indices */ + txr->next_avail_desc = 0; + txr->next_to_clean = 0; + + /* Free any existing tx buffers. */ + txbuf = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { + if (txbuf->m_head != NULL) { + bus_dmamap_sync(txr->txtag, txbuf->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, txbuf->map); + m_freem(txbuf->m_head); + txbuf->m_head = NULL; + } + /* Clear the EOP index */ + txbuf->eop_index = -1; + } + + /* Set number of descriptors available */ + txr->tx_avail = adapter->num_tx_desc; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + IXV_TX_UNLOCK(txr); +} + +/********************************************************************* + * + * Initialize all transmit rings. + * + **********************************************************************/ +static int +ixv_setup_transmit_structures(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + + for (int i = 0; i < adapter->num_queues; i++, txr++) + ixv_setup_transmit_ring(txr); + + return (0); +} + +/********************************************************************* + * + * Enable transmit unit. + * + **********************************************************************/ +static void +ixv_initialize_transmit_units(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + struct ixgbe_hw *hw = &adapter->hw; + + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + u64 tdba = txr->txdma.dma_paddr; + u32 txctrl, txdctl; + + /* Set WTHRESH to 8, burst writeback */ + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= (8 << 16); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + /* Now enable */ + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + + /* Set the HW Tx Head and Tail indices */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0); + + /* Setup Transmit Descriptor Cmd Settings */ + txr->txd_cmd = IXGBE_TXD_CMD_IFCS; + txr->watchdog_check = FALSE; + + /* Set Ring parameters */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), + (tdba & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), + adapter->num_tx_desc * + sizeof(struct ixgbe_legacy_tx_desc)); + txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i)); + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl); + break; + } + + return; +} + +/********************************************************************* + * + * Free all transmit rings. + * + **********************************************************************/ +static void +ixv_free_transmit_structures(struct adapter *adapter) +{ + struct tx_ring *txr = adapter->tx_rings; + + for (int i = 0; i < adapter->num_queues; i++, txr++) { + IXV_TX_LOCK(txr); + ixv_free_transmit_buffers(txr); + ixv_dma_free(adapter, &txr->txdma); + IXV_TX_UNLOCK(txr); + IXV_TX_LOCK_DESTROY(txr); + } + free(adapter->tx_rings, M_DEVBUF); +} + +/********************************************************************* + * + * Free transmit ring related data structures. + * + **********************************************************************/ +static void +ixv_free_transmit_buffers(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct ixv_tx_buf *tx_buffer; + int i; + + INIT_DEBUGOUT("free_transmit_ring: begin"); + + if (txr->tx_buffers == NULL) + return; + + tx_buffer = txr->tx_buffers; + for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { + if (tx_buffer->m_head != NULL) { + bus_dmamap_sync(txr->txtag, tx_buffer->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + m_freem(tx_buffer->m_head); + tx_buffer->m_head = NULL; + if (tx_buffer->map != NULL) { + bus_dmamap_destroy(txr->txtag, + tx_buffer->map); + tx_buffer->map = NULL; + } + } else if (tx_buffer->map != NULL) { + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + bus_dmamap_destroy(txr->txtag, + tx_buffer->map); + tx_buffer->map = NULL; + } + } +#if __FreeBSD_version >= 800000 + if (txr->br != NULL) + buf_ring_free(txr->br, M_DEVBUF); +#endif + if (txr->tx_buffers != NULL) { + free(txr->tx_buffers, M_DEVBUF); + txr->tx_buffers = NULL; + } + if (txr->txtag != NULL) { + bus_dma_tag_destroy(txr->txtag); + txr->txtag = NULL; + } + return; +} + +/********************************************************************* + * + * Advanced Context Descriptor setup for VLAN or CSUM + * + **********************************************************************/ + +static boolean_t +ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_adv_tx_context_desc *TXD; + struct ixv_tx_buf *tx_buffer; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + struct ether_vlan_header *eh; + struct ip *ip; + struct ip6_hdr *ip6; + int ehdrlen, ip_hlen = 0; + u16 etype; + u8 ipproto = 0; + bool offload = TRUE; + int ctxd = txr->next_avail_desc; + u16 vtag = 0; + + + if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) + offload = FALSE; + + + tx_buffer = &txr->tx_buffers[ctxd]; + TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; + + /* + ** In advanced descriptors the vlan tag must + ** be placed into the descriptor itself. + */ + if (mp->m_flags & M_VLANTAG) { + vtag = htole16(mp->m_pkthdr.ether_vtag); + vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); + } else if (offload == FALSE) + return FALSE; + + /* + * Determine where frame payload starts. + * Jump over vlan headers if already present, + * helpful for QinQ too. + */ + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { + etype = ntohs(eh->evl_proto); + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + } else { + etype = ntohs(eh->evl_encap_proto); + ehdrlen = ETHER_HDR_LEN; + } + + /* Set the ether header length */ + vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; + + switch (etype) { + case ETHERTYPE_IP: + ip = (struct ip *)(mp->m_data + ehdrlen); + ip_hlen = ip->ip_hl << 2; + if (mp->m_len < ehdrlen + ip_hlen) + return (FALSE); + ipproto = ip->ip_p; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + break; + case ETHERTYPE_IPV6: + ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); + ip_hlen = sizeof(struct ip6_hdr); + if (mp->m_len < ehdrlen + ip_hlen) + return (FALSE); + ipproto = ip6->ip6_nxt; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6; + break; + default: + offload = FALSE; + break; + } + + vlan_macip_lens |= ip_hlen; + type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + + switch (ipproto) { + case IPPROTO_TCP: + if (mp->m_pkthdr.csum_flags & CSUM_TCP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + break; + + case IPPROTO_UDP: + if (mp->m_pkthdr.csum_flags & CSUM_UDP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP; + break; + +#if __FreeBSD_version >= 800000 + case IPPROTO_SCTP: + if (mp->m_pkthdr.csum_flags & CSUM_SCTP) + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + break; +#endif + default: + offload = FALSE; + break; + } + + /* Now copy bits into descriptor */ + TXD->vlan_macip_lens |= htole32(vlan_macip_lens); + TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); + TXD->seqnum_seed = htole32(0); + TXD->mss_l4len_idx = htole32(0); + + tx_buffer->m_head = NULL; + tx_buffer->eop_index = -1; + + /* We've consumed the first desc, adjust counters */ + if (++ctxd == adapter->num_tx_desc) + ctxd = 0; + txr->next_avail_desc = ctxd; + --txr->tx_avail; + + return (offload); +} + +/********************************************************************** + * + * Setup work for hardware segmentation offload (TSO) on + * adapters using advanced tx descriptors + * + **********************************************************************/ +static boolean_t +ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen) +{ + struct adapter *adapter = txr->adapter; + struct ixgbe_adv_tx_context_desc *TXD; + struct ixv_tx_buf *tx_buffer; + u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; + u32 mss_l4len_idx = 0; + u16 vtag = 0; + int ctxd, ehdrlen, hdrlen, ip_hlen, tcp_hlen; + struct ether_vlan_header *eh; + struct ip *ip; + struct tcphdr *th; + + + /* + * Determine where frame payload starts. + * Jump over vlan headers if already present + */ + eh = mtod(mp, struct ether_vlan_header *); + if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) + ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; + else + ehdrlen = ETHER_HDR_LEN; + + /* Ensure we have at least the IP+TCP header in the first mbuf. */ + if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr)) + return FALSE; + + ctxd = txr->next_avail_desc; + tx_buffer = &txr->tx_buffers[ctxd]; + TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd]; + + ip = (struct ip *)(mp->m_data + ehdrlen); + if (ip->ip_p != IPPROTO_TCP) + return FALSE; /* 0 */ + ip->ip_sum = 0; + ip_hlen = ip->ip_hl << 2; + th = (struct tcphdr *)((caddr_t)ip + ip_hlen); + th->th_sum = in_pseudo(ip->ip_src.s_addr, + ip->ip_dst.s_addr, htons(IPPROTO_TCP)); + tcp_hlen = th->th_off << 2; + hdrlen = ehdrlen + ip_hlen + tcp_hlen; + + /* This is used in the transmit desc in encap */ + *paylen = mp->m_pkthdr.len - hdrlen; + + /* VLAN MACLEN IPLEN */ + if (mp->m_flags & M_VLANTAG) { + vtag = htole16(mp->m_pkthdr.ether_vtag); + vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT); + } + + vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= ip_hlen; + TXD->vlan_macip_lens |= htole32(vlan_macip_lens); + + /* ADV DTYPE TUCMD */ + type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; + TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); + + + /* MSS L4LEN IDX */ + mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT); + TXD->mss_l4len_idx = htole32(mss_l4len_idx); + + TXD->seqnum_seed = htole32(0); + tx_buffer->m_head = NULL; + tx_buffer->eop_index = -1; + + if (++ctxd == adapter->num_tx_desc) + ctxd = 0; + + txr->tx_avail--; + txr->next_avail_desc = ctxd; + return TRUE; +} + + +/********************************************************************** + * + * Examine each tx_buffer in the used queue. If the hardware is done + * processing the packet then free associated resources. The + * tx_buffer is put back on the free queue. + * + **********************************************************************/ +static boolean_t +ixv_txeof(struct tx_ring *txr) +{ + struct adapter *adapter = txr->adapter; + struct ifnet *ifp = adapter->ifp; + u32 first, last, done; + struct ixv_tx_buf *tx_buffer; + struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc; + + mtx_assert(&txr->tx_mtx, MA_OWNED); + + if (txr->tx_avail == adapter->num_tx_desc) + return FALSE; + + first = txr->next_to_clean; + tx_buffer = &txr->tx_buffers[first]; + /* For cleanup we just use legacy struct */ + tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; + last = tx_buffer->eop_index; + if (last == -1) + return FALSE; + eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; + + /* + ** Get the index of the first descriptor + ** BEYOND the EOP and call that 'done'. + ** I do this so the comparison in the + ** inner while loop below can be simple + */ + if (++last == adapter->num_tx_desc) last = 0; + done = last; + + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_POSTREAD); + /* + ** Only the EOP descriptor of a packet now has the DD + ** bit set, this is what we look for... + */ + while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) { + /* We clean the range of the packet */ + while (first != done) { + tx_desc->upper.data = 0; + tx_desc->lower.data = 0; + tx_desc->buffer_addr = 0; + ++txr->tx_avail; + + if (tx_buffer->m_head) { + bus_dmamap_sync(txr->txtag, + tx_buffer->map, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(txr->txtag, + tx_buffer->map); + m_freem(tx_buffer->m_head); + tx_buffer->m_head = NULL; + tx_buffer->map = NULL; + } + tx_buffer->eop_index = -1; + txr->watchdog_time = ticks; + + if (++first == adapter->num_tx_desc) + first = 0; + + tx_buffer = &txr->tx_buffers[first]; + tx_desc = + (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first]; + } + ++ifp->if_opackets; + /* See if there is more work now */ + last = tx_buffer->eop_index; + if (last != -1) { + eop_desc = + (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last]; + /* Get next done point */ + if (++last == adapter->num_tx_desc) last = 0; + done = last; + } else + break; + } + bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + txr->next_to_clean = first; + + /* + * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that + * it is OK to send packets. If there are no pending descriptors, + * clear the timeout. Otherwise, if some descriptors have been freed, + * restart the timeout. + */ + if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) { + ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + if (txr->tx_avail == adapter->num_tx_desc) { + txr->watchdog_check = FALSE; + return FALSE; + } + } + + return TRUE; +} + +/********************************************************************* + * + * Refresh mbuf buffers for RX descriptor rings + * - now keeps its own state so discards due to resource + * exhaustion are unnecessary, if an mbuf cannot be obtained + * it just returns, keeping its placeholder, thus it can simply + * be recalled to try again. + * + **********************************************************************/ +static void +ixv_refresh_mbufs(struct rx_ring *rxr, int limit) +{ + struct adapter *adapter = rxr->adapter; + bus_dma_segment_t hseg[1]; + bus_dma_segment_t pseg[1]; + struct ixv_rx_buf *rxbuf; + struct mbuf *mh, *mp; + int i, j, nsegs, error; + bool refreshed = FALSE; + + i = j = rxr->next_to_refresh; + /* Get the control variable, one beyond refresh point */ + if (++j == adapter->num_rx_desc) + j = 0; + while (j != limit) { + rxbuf = &rxr->rx_buffers[i]; + if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) { + mh = m_gethdr(M_DONTWAIT, MT_DATA); + if (mh == NULL) + goto update; + mh->m_pkthdr.len = mh->m_len = MHLEN; + mh->m_len = MHLEN; + mh->m_flags |= M_PKTHDR; + m_adj(mh, ETHER_ALIGN); + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->htag, + rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); + if (error != 0) { + printf("GET BUF: dmamap load" + " failure - %d\n", error); + m_free(mh); + goto update; + } + rxbuf->m_head = mh; + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_PREREAD); + rxr->rx_base[i].read.hdr_addr = + htole64(hseg[0].ds_addr); + } + + if (rxbuf->m_pack == NULL) { + mp = m_getjcl(M_DONTWAIT, MT_DATA, + M_PKTHDR, adapter->rx_mbuf_sz); + if (mp == NULL) + goto update; + } else + mp = rxbuf->m_pack; + + mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->ptag, + rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); + if (error != 0) { + printf("GET BUF: dmamap load" + " failure - %d\n", error); + m_free(mp); + rxbuf->m_pack = NULL; + goto update; + } + rxbuf->m_pack = mp; + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_PREREAD); + rxr->rx_base[i].read.pkt_addr = + htole64(pseg[0].ds_addr); + + refreshed = TRUE; + rxr->next_to_refresh = i = j; + /* Calculate next index */ + if (++j == adapter->num_rx_desc) + j = 0; + } +update: + if (refreshed) /* update tail index */ + IXGBE_WRITE_REG(&adapter->hw, + IXGBE_VFRDT(rxr->me), rxr->next_to_refresh); + return; +} + +/********************************************************************* + * + * Allocate memory for rx_buffer structures. Since we use one + * rx_buffer per received packet, the maximum number of rx_buffer's + * that we'll need is equal to the number of receive descriptors + * that we've allocated. + * + **********************************************************************/ +static int +ixv_allocate_receive_buffers(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + device_t dev = adapter->dev; + struct ixv_rx_buf *rxbuf; + int i, bsize, error; + + bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc; + if (!(rxr->rx_buffers = + (struct ixv_rx_buf *) malloc(bsize, + M_DEVBUF, M_NOWAIT | M_ZERO))) { + device_printf(dev, "Unable to allocate rx_buffer memory\n"); + error = ENOMEM; + goto fail; + } + + if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MSIZE, /* maxsize */ + 1, /* nsegments */ + MSIZE, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &rxr->htag))) { + device_printf(dev, "Unable to create RX DMA tag\n"); + goto fail; + } + + if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ + 1, 0, /* alignment, bounds */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MJUMPAGESIZE, /* maxsize */ + 1, /* nsegments */ + MJUMPAGESIZE, /* maxsegsize */ + 0, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockfuncarg */ + &rxr->ptag))) { + device_printf(dev, "Unable to create RX DMA tag\n"); + goto fail; + } + + for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) { + rxbuf = &rxr->rx_buffers[i]; + error = bus_dmamap_create(rxr->htag, + BUS_DMA_NOWAIT, &rxbuf->hmap); + if (error) { + device_printf(dev, "Unable to create RX head map\n"); + goto fail; + } + error = bus_dmamap_create(rxr->ptag, + BUS_DMA_NOWAIT, &rxbuf->pmap); + if (error) { + device_printf(dev, "Unable to create RX pkt map\n"); + goto fail; + } + } + + return (0); + +fail: + /* Frees all, but can handle partial completion */ + ixv_free_receive_structures(adapter); + return (error); +} + +static void +ixv_free_receive_ring(struct rx_ring *rxr) +{ + struct adapter *adapter; + struct ixv_rx_buf *rxbuf; + int i; + + adapter = rxr->adapter; + for (i = 0; i < adapter->num_rx_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->m_head != NULL) { + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->htag, rxbuf->hmap); + rxbuf->m_head->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_head); + } + if (rxbuf->m_pack != NULL) { + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + rxbuf->m_pack->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_pack); + } + rxbuf->m_head = NULL; + rxbuf->m_pack = NULL; + } +} + + +/********************************************************************* + * + * Initialize a receive ring and its buffers. + * + **********************************************************************/ +static int +ixv_setup_receive_ring(struct rx_ring *rxr) +{ + struct adapter *adapter; + struct ifnet *ifp; + device_t dev; + struct ixv_rx_buf *rxbuf; + bus_dma_segment_t pseg[1], hseg[1]; + struct lro_ctrl *lro = &rxr->lro; + int rsize, nsegs, error = 0; + + adapter = rxr->adapter; + ifp = adapter->ifp; + dev = adapter->dev; + + /* Clear the ring contents */ + IXV_RX_LOCK(rxr); + rsize = roundup2(adapter->num_rx_desc * + sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN); + bzero((void *)rxr->rx_base, rsize); + + /* Free current RX buffer structs and their mbufs */ + ixv_free_receive_ring(rxr); + + /* Configure header split? */ + if (ixv_header_split) + rxr->hdr_split = TRUE; + + /* Now replenish the mbufs */ + for (int j = 0; j != adapter->num_rx_desc; ++j) { + struct mbuf *mh, *mp; + + rxbuf = &rxr->rx_buffers[j]; + /* + ** Dont allocate mbufs if not + ** doing header split, its wasteful + */ + if (rxr->hdr_split == FALSE) + goto skip_head; + + /* First the header */ + rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA); + if (rxbuf->m_head == NULL) { + error = ENOBUFS; + goto fail; + } + m_adj(rxbuf->m_head, ETHER_ALIGN); + mh = rxbuf->m_head; + mh->m_len = mh->m_pkthdr.len = MHLEN; + mh->m_flags |= M_PKTHDR; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->htag, + rxbuf->hmap, rxbuf->m_head, hseg, + &nsegs, BUS_DMA_NOWAIT); + if (error != 0) /* Nothing elegant to do here */ + goto fail; + bus_dmamap_sync(rxr->htag, + rxbuf->hmap, BUS_DMASYNC_PREREAD); + /* Update descriptor */ + rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); + +skip_head: + /* Now the payload cluster */ + rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA, + M_PKTHDR, adapter->rx_mbuf_sz); + if (rxbuf->m_pack == NULL) { + error = ENOBUFS; + goto fail; + } + mp = rxbuf->m_pack; + mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; + /* Get the memory mapping */ + error = bus_dmamap_load_mbuf_sg(rxr->ptag, + rxbuf->pmap, mp, pseg, + &nsegs, BUS_DMA_NOWAIT); + if (error != 0) + goto fail; + bus_dmamap_sync(rxr->ptag, + rxbuf->pmap, BUS_DMASYNC_PREREAD); + /* Update descriptor */ + rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); + } + + + /* Setup our descriptor indices */ + rxr->next_to_check = 0; + rxr->next_to_refresh = 0; + rxr->lro_enabled = FALSE; + rxr->rx_split_packets = 0; + rxr->rx_bytes = 0; + rxr->discard = FALSE; + + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* + ** Now set up the LRO interface: + */ + if (ifp->if_capenable & IFCAP_LRO) { + int err = tcp_lro_init(lro); + if (err) { + device_printf(dev, "LRO Initialization failed!\n"); + goto fail; + } + INIT_DEBUGOUT("RX Soft LRO Initialized\n"); + rxr->lro_enabled = TRUE; + lro->ifp = adapter->ifp; + } + + IXV_RX_UNLOCK(rxr); + return (0); + +fail: + ixv_free_receive_ring(rxr); + IXV_RX_UNLOCK(rxr); + return (error); +} + +/********************************************************************* + * + * Initialize all receive rings. + * + **********************************************************************/ +static int +ixv_setup_receive_structures(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + int j; + + for (j = 0; j < adapter->num_queues; j++, rxr++) + if (ixv_setup_receive_ring(rxr)) + goto fail; + + return (0); +fail: + /* + * Free RX buffers allocated so far, we will only handle + * the rings that completed, the failing case will have + * cleaned up for itself. 'j' failed, so its the terminus. + */ + for (int i = 0; i < j; ++i) { + rxr = &adapter->rx_rings[i]; + ixv_free_receive_ring(rxr); + } + + return (ENOBUFS); +} + +/********************************************************************* + * + * Setup receive registers and features. + * + **********************************************************************/ +#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 + +static void +ixv_initialize_receive_units(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + struct ixgbe_hw *hw = &adapter->hw; + struct ifnet *ifp = adapter->ifp; + u32 bufsz, fctrl, rxcsum, hlreg; + + + /* Enable broadcasts */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; + fctrl |= IXGBE_FCTRL_PMCF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + /* Set for Jumbo Frames? */ + hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (ifp->if_mtu > ETHERMTU) { + hlreg |= IXGBE_HLREG0_JUMBOEN; + bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } else { + hlreg &= ~IXGBE_HLREG0_JUMBOEN; + bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg); + + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + u64 rdba = rxr->rxdma.dma_paddr; + u32 reg, rxdctl; + + /* Do the queue enabling first */ + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); + for (int k = 0; k < 10; k++) { + if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) & + IXGBE_RXDCTL_ENABLE) + break; + else + msec_delay(1); + } + wmb(); + + /* Setup the Base and Length of the Rx Descriptor Ring */ + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), + (rdba & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), + (rdba >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), + adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + + /* Set up the SRRCTL register */ + reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i)); + reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; + reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; + reg |= bufsz; + if (rxr->hdr_split) { + /* Use a standard mbuf for the header */ + reg |= ((IXV_RX_HDR << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) + & IXGBE_SRRCTL_BSIZEHDR_MASK); + reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else + reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg); + + /* Setup the HW Rx Head and Tail Descriptor Pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me), + adapter->num_rx_desc - 1); + } + + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + + if (ifp->if_capenable & IFCAP_RXCSUM) + rxcsum |= IXGBE_RXCSUM_PCSD; + + if (!(rxcsum & IXGBE_RXCSUM_PCSD)) + rxcsum |= IXGBE_RXCSUM_IPPCSE; + + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + return; +} + +/********************************************************************* + * + * Free all receive rings. + * + **********************************************************************/ +static void +ixv_free_receive_structures(struct adapter *adapter) +{ + struct rx_ring *rxr = adapter->rx_rings; + + for (int i = 0; i < adapter->num_queues; i++, rxr++) { + struct lro_ctrl *lro = &rxr->lro; + ixv_free_receive_buffers(rxr); + /* Free LRO memory */ + tcp_lro_free(lro); + /* Free the ring memory as well */ + ixv_dma_free(adapter, &rxr->rxdma); + } + + free(adapter->rx_rings, M_DEVBUF); +} + + +/********************************************************************* + * + * Free receive ring data structures + * + **********************************************************************/ +static void +ixv_free_receive_buffers(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + struct ixv_rx_buf *rxbuf; + + INIT_DEBUGOUT("free_receive_structures: begin"); + + /* Cleanup any existing buffers */ + if (rxr->rx_buffers != NULL) { + for (int i = 0; i < adapter->num_rx_desc; i++) { + rxbuf = &rxr->rx_buffers[i]; + if (rxbuf->m_head != NULL) { + bus_dmamap_sync(rxr->htag, rxbuf->hmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->htag, rxbuf->hmap); + rxbuf->m_head->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_head); + } + if (rxbuf->m_pack != NULL) { + bus_dmamap_sync(rxr->ptag, rxbuf->pmap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(rxr->ptag, rxbuf->pmap); + rxbuf->m_pack->m_flags |= M_PKTHDR; + m_freem(rxbuf->m_pack); + } + rxbuf->m_head = NULL; + rxbuf->m_pack = NULL; + if (rxbuf->hmap != NULL) { + bus_dmamap_destroy(rxr->htag, rxbuf->hmap); + rxbuf->hmap = NULL; + } + if (rxbuf->pmap != NULL) { + bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); + rxbuf->pmap = NULL; + } + } + if (rxr->rx_buffers != NULL) { + free(rxr->rx_buffers, M_DEVBUF); + rxr->rx_buffers = NULL; + } + } + + if (rxr->htag != NULL) { + bus_dma_tag_destroy(rxr->htag); + rxr->htag = NULL; + } + if (rxr->ptag != NULL) { + bus_dma_tag_destroy(rxr->ptag); + rxr->ptag = NULL; + } + + return; +} + +static __inline void +ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) +{ + + /* + * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet + * should be computed by hardware. Also it should not have VLAN tag in + * ethernet header. + */ + if (rxr->lro_enabled && + (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && + (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && + (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) == + (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) && + (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == + (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { + /* + * Send to the stack if: + ** - LRO not enabled, or + ** - no LRO resources, or + ** - lro enqueue fails + */ + if (rxr->lro.lro_cnt != 0) + if (tcp_lro_rx(&rxr->lro, m, 0) == 0) + return; + } + IXV_RX_UNLOCK(rxr); + (*ifp->if_input)(ifp, m); + IXV_RX_LOCK(rxr); +} + +static __inline void +ixv_rx_discard(struct rx_ring *rxr, int i) +{ + struct ixv_rx_buf *rbuf; + + rbuf = &rxr->rx_buffers[i]; + + if (rbuf->fmp != NULL) {/* Partial chain ? */ + rbuf->fmp->m_flags |= M_PKTHDR; + m_freem(rbuf->fmp); + rbuf->fmp = NULL; + } + + /* + ** With advanced descriptors the writeback + ** clobbers the buffer addrs, so its easier + ** to just free the existing mbufs and take + ** the normal refresh path to get new buffers + ** and mapping. + */ + if (rbuf->m_head) { + m_free(rbuf->m_head); + rbuf->m_head = NULL; + } + + if (rbuf->m_pack) { + m_free(rbuf->m_pack); + rbuf->m_pack = NULL; + } + + return; +} + + +/********************************************************************* + * + * This routine executes in interrupt context. It replenishes + * the mbufs in the descriptor and sends data which has been + * dma'ed into host memory to upper layer. + * + * We loop at most count times if count is > 0, or until done if + * count < 0. + * + * Return TRUE for more work, FALSE for all clean. + *********************************************************************/ +static bool +ixv_rxeof(struct ix_queue *que, int count) +{ + struct adapter *adapter = que->adapter; + struct rx_ring *rxr = que->rxr; + struct ifnet *ifp = adapter->ifp; + struct lro_ctrl *lro = &rxr->lro; + struct lro_entry *queued; + int i, nextp, processed = 0; + u32 staterr = 0; + union ixgbe_adv_rx_desc *cur; + struct ixv_rx_buf *rbuf, *nbuf; + + IXV_RX_LOCK(rxr); + + for (i = rxr->next_to_check; count != 0;) { + struct mbuf *sendmp, *mh, *mp; + u32 rsc, ptype; + u16 hlen, plen, hdr, vtag; + bool eop; + + /* Sync the ring. */ + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); + + cur = &rxr->rx_base[i]; + staterr = le32toh(cur->wb.upper.status_error); + + if ((staterr & IXGBE_RXD_STAT_DD) == 0) + break; + if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) + break; + + count--; + sendmp = NULL; + nbuf = NULL; + rsc = 0; + cur->wb.upper.status_error = 0; + rbuf = &rxr->rx_buffers[i]; + mh = rbuf->m_head; + mp = rbuf->m_pack; + + plen = le16toh(cur->wb.upper.length); + ptype = le32toh(cur->wb.lower.lo_dword.data) & + IXGBE_RXDADV_PKTTYPE_MASK; + hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); + vtag = le16toh(cur->wb.upper.vlan); + eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0); + + /* Make sure all parts of a bad packet are discarded */ + if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) || + (rxr->discard)) { + ifp->if_ierrors++; + rxr->rx_discarded++; + if (!eop) + rxr->discard = TRUE; + else + rxr->discard = FALSE; + ixv_rx_discard(rxr, i); + goto next_desc; + } + + if (!eop) { + nextp = i + 1; + if (nextp == adapter->num_rx_desc) + nextp = 0; + nbuf = &rxr->rx_buffers[nextp]; + prefetch(nbuf); + } + /* + ** The header mbuf is ONLY used when header + ** split is enabled, otherwise we get normal + ** behavior, ie, both header and payload + ** are DMA'd into the payload buffer. + ** + ** Rather than using the fmp/lmp global pointers + ** we now keep the head of a packet chain in the + ** buffer struct and pass this along from one + ** descriptor to the next, until we get EOP. + */ + if (rxr->hdr_split && (rbuf->fmp == NULL)) { + /* This must be an initial descriptor */ + hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >> + IXGBE_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > IXV_RX_HDR) + hlen = IXV_RX_HDR; + mh->m_len = hlen; + mh->m_flags |= M_PKTHDR; + mh->m_next = NULL; + mh->m_pkthdr.len = mh->m_len; + /* Null buf pointer so it is refreshed */ + rbuf->m_head = NULL; + /* + ** Check the payload length, this + ** could be zero if its a small + ** packet. + */ + if (plen > 0) { + mp->m_len = plen; + mp->m_next = NULL; + mp->m_flags &= ~M_PKTHDR; + mh->m_next = mp; + mh->m_pkthdr.len += mp->m_len; + /* Null buf pointer so it is refreshed */ + rbuf->m_pack = NULL; + rxr->rx_split_packets++; + } + /* + ** Now create the forward + ** chain so when complete + ** we wont have to. + */ + if (eop == 0) { + /* stash the chain head */ + nbuf->fmp = mh; + /* Make forward chain */ + if (plen) + mp->m_next = nbuf->m_pack; + else + mh->m_next = nbuf->m_pack; + } else { + /* Singlet, prepare to send */ + sendmp = mh; + if ((adapter->num_vlans) && + (staterr & IXGBE_RXD_STAT_VP)) { + sendmp->m_pkthdr.ether_vtag = vtag; + sendmp->m_flags |= M_VLANTAG; + } + } + } else { + /* + ** Either no header split, or a + ** secondary piece of a fragmented + ** split packet. + */ + mp->m_len = plen; + /* + ** See if there is a stored head + ** that determines what we are + */ + sendmp = rbuf->fmp; + rbuf->m_pack = rbuf->fmp = NULL; + + if (sendmp != NULL) /* secondary frag */ + sendmp->m_pkthdr.len += mp->m_len; + else { + /* first desc of a non-ps chain */ + sendmp = mp; + sendmp->m_flags |= M_PKTHDR; + sendmp->m_pkthdr.len = mp->m_len; + if (staterr & IXGBE_RXD_STAT_VP) { + sendmp->m_pkthdr.ether_vtag = vtag; + sendmp->m_flags |= M_VLANTAG; + } + } + /* Pass the head pointer on */ + if (eop == 0) { + nbuf->fmp = sendmp; + sendmp = NULL; + mp->m_next = nbuf->m_pack; + } + } + ++processed; + /* Sending this frame? */ + if (eop) { + sendmp->m_pkthdr.rcvif = ifp; + ifp->if_ipackets++; + rxr->rx_packets++; + /* capture data for AIM */ + rxr->bytes += sendmp->m_pkthdr.len; + rxr->rx_bytes += sendmp->m_pkthdr.len; + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) + ixv_rx_checksum(staterr, sendmp, ptype); +#if __FreeBSD_version >= 800000 + sendmp->m_pkthdr.flowid = que->msix; + sendmp->m_flags |= M_FLOWID; +#endif + } +next_desc: + bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + /* Advance our pointers to the next descriptor. */ + if (++i == adapter->num_rx_desc) + i = 0; + + /* Now send to the stack or do LRO */ + if (sendmp != NULL) + ixv_rx_input(rxr, ifp, sendmp, ptype); + + /* Every 8 descriptors we go to refresh mbufs */ + if (processed == 8) { + ixv_refresh_mbufs(rxr, i); + processed = 0; + } + } + + /* Refresh any remaining buf structs */ + if (ixv_rx_unrefreshed(rxr)) + ixv_refresh_mbufs(rxr, i); + + rxr->next_to_check = i; + + /* + * Flush any outstanding LRO work + */ + while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { + SLIST_REMOVE_HEAD(&lro->lro_active, next); + tcp_lro_flush(lro, queued); + } + + IXV_RX_UNLOCK(rxr); + + /* + ** We still have cleaning to do? + ** Schedule another interrupt if so. + */ + if ((staterr & IXGBE_RXD_STAT_DD) != 0) { + ixv_rearm_queues(adapter, (u64)(1 << que->msix)); + return (TRUE); + } + + return (FALSE); +} + + +/********************************************************************* + * + * Verify that the hardware indicated that the checksum is valid. + * Inform the stack about the status of checksum so that stack + * doesn't spend time verifying the checksum. + * + *********************************************************************/ +static void +ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype) +{ + u16 status = (u16) staterr; + u8 errors = (u8) (staterr >> 24); + bool sctp = FALSE; + + if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 && + (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0) + sctp = TRUE; + + if (status & IXGBE_RXD_STAT_IPCS) { + if (!(errors & IXGBE_RXD_ERR_IPE)) { + /* IP Checksum Good */ + mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; + mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; + + } else + mp->m_pkthdr.csum_flags = 0; + } + if (status & IXGBE_RXD_STAT_L4CS) { + u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); +#if __FreeBSD_version >= 800000 + if (sctp) + type = CSUM_SCTP_VALID; +#endif + if (!(errors & IXGBE_RXD_ERR_TCPE)) { + mp->m_pkthdr.csum_flags |= type; + if (!sctp) + mp->m_pkthdr.csum_data = htons(0xffff); + } + } + return; +} + +static void +ixv_setup_vlan_support(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ctrl, vid, vfta, retry; + + + /* + ** We get here thru init_locked, meaning + ** a soft reset, this has already cleared + ** the VFTA and other state, so if there + ** have been no vlan's registered do nothing. + */ + if (adapter->num_vlans == 0) + return; + + /* Enable the queues */ + for (int i = 0; i < adapter->num_queues; i++) { + ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl); + } + + /* + ** A soft reset zero's out the VFTA, so + ** we need to repopulate it now. + */ + for (int i = 0; i < VFTA_SIZE; i++) { + if (ixv_shadow_vfta[i] == 0) + continue; + vfta = ixv_shadow_vfta[i]; + /* + ** Reconstruct the vlan id's + ** based on the bits set in each + ** of the array ints. + */ + for ( int j = 0; j < 32; j++) { + retry = 0; + if ((vfta & (1 << j)) == 0) + continue; + vid = (i * 32) + j; + /* Call the shared code mailbox routine */ + while (ixgbe_set_vfta(hw, vid, 0, TRUE)) { + if (++retry > 5) + break; + } + } + } +} + +/* +** This routine is run via an vlan config EVENT, +** it enables us to use the HW Filter table since +** we can get the vlan id. This just creates the +** entry in the soft version of the VFTA, init will +** repopulate the real table. +*/ +static void +ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct adapter *adapter = ifp->if_softc; + u16 index, bit; + + if (ifp->if_softc != arg) /* Not our event */ + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + IXV_CORE_LOCK(adapter); + index = (vtag >> 5) & 0x7F; + bit = vtag & 0x1F; + ixv_shadow_vfta[index] |= (1 << bit); + ++adapter->num_vlans; + /* Re-init to load the changes */ + ixv_init_locked(adapter); + IXV_CORE_UNLOCK(adapter); +} + +/* +** This routine is run via an vlan +** unconfig EVENT, remove our entry +** in the soft vfta. +*/ +static void +ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) +{ + struct adapter *adapter = ifp->if_softc; + u16 index, bit; + + if (ifp->if_softc != arg) + return; + + if ((vtag == 0) || (vtag > 4095)) /* Invalid */ + return; + + IXV_CORE_LOCK(adapter); + index = (vtag >> 5) & 0x7F; + bit = vtag & 0x1F; + ixv_shadow_vfta[index] &= ~(1 << bit); + --adapter->num_vlans; + /* Re-init to load the changes */ + ixv_init_locked(adapter); + IXV_CORE_UNLOCK(adapter); +} + +static void +ixv_enable_intr(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + struct ix_queue *que = adapter->queues; + u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); + + + IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); + + mask = IXGBE_EIMS_ENABLE_MASK; + mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); + IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); + + for (int i = 0; i < adapter->num_queues; i++, que++) + ixv_enable_queue(adapter, que->msix); + + IXGBE_WRITE_FLUSH(hw); + + return; +} + +static void +ixv_disable_intr(struct adapter *adapter) +{ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0); + IXGBE_WRITE_FLUSH(&adapter->hw); + return; +} + +/* +** Setup the correct IVAR register for a particular MSIX interrupt +** - entry is the register array entry +** - vector is the MSIX vector for this queue +** - type is RX/TX/MISC +*/ +static void +ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 ivar, index; + + vector |= IXGBE_IVAR_ALLOC_VAL; + + if (type == -1) { /* MISC IVAR */ + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); + ivar &= ~0xFF; + ivar |= vector; + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); + } else { /* RX/TX IVARS */ + index = (16 * (entry & 1)) + (8 * type); + ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1)); + ivar &= ~(0xFF << index); + ivar |= (vector << index); + IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar); + } +} + +static void +ixv_configure_ivars(struct adapter *adapter) +{ + struct ix_queue *que = adapter->queues; + + for (int i = 0; i < adapter->num_queues; i++, que++) { + /* First the RX queue entry */ + ixv_set_ivar(adapter, i, que->msix, 0); + /* ... and the TX */ + ixv_set_ivar(adapter, i, que->msix, 1); + /* Set an initial value in EITR */ + IXGBE_WRITE_REG(&adapter->hw, + IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT); + } + + /* For the Link interrupt */ + ixv_set_ivar(adapter, 1, adapter->mbxvec, -1); +} + + +/* +** Tasklet handler for MSIX MBX interrupts +** - do outside interrupt since it might sleep +*/ +static void +ixv_handle_mbx(void *context, int pending) +{ + struct adapter *adapter = context; + + ixgbe_check_link(&adapter->hw, + &adapter->link_speed, &adapter->link_up, 0); + ixv_update_link_status(adapter); +} + +/* +** The VF stats registers never have a truely virgin +** starting point, so this routine tries to make an +** artificial one, marking ground zero on attach as +** it were. +*/ +static void +ixv_save_stats(struct adapter *adapter) +{ + if (adapter->stats.vfgprc || adapter->stats.vfgptc) { + adapter->stats.saved_reset_vfgprc += + adapter->stats.vfgprc - adapter->stats.base_vfgprc; + adapter->stats.saved_reset_vfgptc += + adapter->stats.vfgptc - adapter->stats.base_vfgptc; + adapter->stats.saved_reset_vfgorc += + adapter->stats.vfgorc - adapter->stats.base_vfgorc; + adapter->stats.saved_reset_vfgotc += + adapter->stats.vfgotc - adapter->stats.base_vfgotc; + adapter->stats.saved_reset_vfmprc += + adapter->stats.vfmprc - adapter->stats.base_vfmprc; + } +} + +static void +ixv_init_stats(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); + adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); + adapter->stats.last_vfgorc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); + + adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); + adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); + adapter->stats.last_vfgotc |= + (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); + + adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); + + adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; + adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; + adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; + adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; + adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; +} + +#define UPDATE_STAT_32(reg, last, count) \ +{ \ + u32 current = IXGBE_READ_REG(hw, reg); \ + if (current < last) \ + count += 0x100000000LL; \ + last = current; \ + count &= 0xFFFFFFFF00000000LL; \ + count |= current; \ +} + +#define UPDATE_STAT_36(lsb, msb, last, count) \ +{ \ + u64 cur_lsb = IXGBE_READ_REG(hw, lsb); \ + u64 cur_msb = IXGBE_READ_REG(hw, msb); \ + u64 current = ((cur_msb << 32) | cur_lsb); \ + if (current < last) \ + count += 0x1000000000LL; \ + last = current; \ + count &= 0xFFFFFFF000000000LL; \ + count |= current; \ +} + +/* +** ixv_update_stats - Update the board statistics counters. +*/ +void +ixv_update_stats(struct adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + + UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc, + adapter->stats.vfgprc); + UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc, + adapter->stats.vfgptc); + UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, + adapter->stats.last_vfgorc, adapter->stats.vfgorc); + UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, + adapter->stats.last_vfgotc, adapter->stats.vfgotc); + UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc, + adapter->stats.vfmprc); +} + +/********************************************************************** + * + * This routine is called only when ixgbe_display_debug_stats is enabled. + * This routine provides a way to take a look at important statistics + * maintained by the driver and hardware. + * + **********************************************************************/ +static void +ixv_print_hw_stats(struct adapter * adapter) +{ + device_t dev = adapter->dev; + + device_printf(dev,"Std Mbuf Failed = %lu\n", + adapter->mbuf_defrag_failed); + device_printf(dev,"Driver dropped packets = %lu\n", + adapter->dropped_pkts); + device_printf(dev, "watchdog timeouts = %ld\n", + adapter->watchdog_events); + + device_printf(dev,"Good Packets Rcvd = %llu\n", + (long long)adapter->stats.vfgprc); + device_printf(dev,"Good Packets Xmtd = %llu\n", + (long long)adapter->stats.vfgptc); + device_printf(dev,"TSO Transmissions = %lu\n", + adapter->tso_tx); + +} + +/********************************************************************** + * + * This routine is called only when em_display_debug_stats is enabled. + * This routine provides a way to take a look at important statistics + * maintained by the driver and hardware. + * + **********************************************************************/ +static void +ixv_print_debug_info(struct adapter *adapter) +{ + device_t dev = adapter->dev; + struct ixgbe_hw *hw = &adapter->hw; + struct ix_queue *que = adapter->queues; + struct rx_ring *rxr; + struct tx_ring *txr; + struct lro_ctrl *lro; + + device_printf(dev,"Error Byte Count = %u \n", + IXGBE_READ_REG(hw, IXGBE_ERRBC)); + + for (int i = 0; i < adapter->num_queues; i++, que++) { + txr = que->txr; + rxr = que->rxr; + lro = &rxr->lro; + device_printf(dev,"QUE(%d) IRQs Handled: %lu\n", + que->msix, (long)que->irqs); + device_printf(dev,"RX(%d) Packets Received: %lld\n", + rxr->me, (long long)rxr->rx_packets); + device_printf(dev,"RX(%d) Split RX Packets: %lld\n", + rxr->me, (long long)rxr->rx_split_packets); + device_printf(dev,"RX(%d) Bytes Received: %lu\n", + rxr->me, (long)rxr->rx_bytes); + device_printf(dev,"RX(%d) LRO Queued= %d\n", + rxr->me, lro->lro_queued); + device_printf(dev,"RX(%d) LRO Flushed= %d\n", + rxr->me, lro->lro_flushed); + device_printf(dev,"TX(%d) Packets Sent: %lu\n", + txr->me, (long)txr->total_packets); + device_printf(dev,"TX(%d) NO Desc Avail: %lu\n", + txr->me, (long)txr->no_desc_avail); + } + + device_printf(dev,"MBX IRQ Handled: %lu\n", + (long)adapter->mbx_irq); + return; +} + +static int +ixv_sysctl_stats(SYSCTL_HANDLER_ARGS) +{ + int error; + int result; + struct adapter *adapter; + + result = -1; + error = sysctl_handle_int(oidp, &result, 0, req); + + if (error || !req->newptr) + return (error); + + if (result == 1) { + adapter = (struct adapter *) arg1; + ixv_print_hw_stats(adapter); + } + return error; +} + +static int +ixv_sysctl_debug(SYSCTL_HANDLER_ARGS) +{ + int error, result; + struct adapter *adapter; + + result = -1; + error = sysctl_handle_int(oidp, &result, 0, req); + + if (error || !req->newptr) + return (error); + + if (result == 1) { + adapter = (struct adapter *) arg1; + ixv_print_debug_info(adapter); + } + return error; +} + +/* +** Set flow control using sysctl: +** Flow control values: +** 0 - off +** 1 - rx pause +** 2 - tx pause +** 3 - full +*/ +static int +ixv_set_flowcntl(SYSCTL_HANDLER_ARGS) +{ + int error; + struct adapter *adapter; + + error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req); + + if (error) + return (error); + + adapter = (struct adapter *) arg1; + switch (ixv_flow_control) { + case ixgbe_fc_rx_pause: + case ixgbe_fc_tx_pause: + case ixgbe_fc_full: + adapter->hw.fc.requested_mode = ixv_flow_control; + break; + case ixgbe_fc_none: + default: + adapter->hw.fc.requested_mode = ixgbe_fc_none; + } + + ixgbe_fc_enable(&adapter->hw, 0); + return error; +} + +static void +ixv_add_rx_process_limit(struct adapter *adapter, const char *name, + const char *description, int *limit, int value) +{ + *limit = value; + SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), + SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), + OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); +} + diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixv.h b/lib/librte_pmd_ixgbe/ixgbe/ixv.h new file mode 100644 index 0000000000..fcd0e1daad --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe/ixv.h @@ -0,0 +1,430 @@ +/****************************************************************************** + + Copyright (c) 2001-2010, Intel Corporation + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + 3. Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + +******************************************************************************/ +/*$FreeBSD$*/ + + +#ifndef _IXV_H_ +#define _IXV_H_ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe_api.h" +#include "ixgbe_vf.h" + +/* Tunables */ + +/* + * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the + * number of transmit descriptors allocated by the driver. Increasing this + * value allows the driver to queue more transmits. Each descriptor is 16 + * bytes. Performance tests have show the 2K value to be optimal for top + * performance. + */ +#define DEFAULT_TXD 1024 +#define PERFORM_TXD 2048 +#define MAX_TXD 4096 +#define MIN_TXD 64 + +/* + * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the + * number of receive descriptors allocated for each RX queue. Increasing this + * value allows the driver to buffer more incoming packets. Each descriptor + * is 16 bytes. A receive buffer is also allocated for each descriptor. + * + * Note: with 8 rings and a dual port card, it is possible to bump up + * against the system mbuf pool limit, you can tune nmbclusters + * to adjust for this. + */ +#define DEFAULT_RXD 1024 +#define PERFORM_RXD 2048 +#define MAX_RXD 4096 +#define MIN_RXD 64 + +/* Alignment for rings */ +#define DBA_ALIGN 128 + +/* + * This parameter controls the maximum no of times the driver will loop in + * the isr. Minimum Value = 1 + */ +#define MAX_LOOP 10 + +/* + * This is the max watchdog interval, ie. the time that can + * pass between any two TX clean operations, such only happening + * when the TX hardware is functioning. + */ +#define IXV_WATCHDOG (10 * hz) + +/* + * This parameters control when the driver calls the routine to reclaim + * transmit descriptors. + */ +#define IXV_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) +#define IXV_TX_OP_THRESHOLD (adapter->num_tx_desc / 32) + +#define IXV_MAX_FRAME_SIZE 0x3F00 + +/* Flow control constants */ +#define IXV_FC_PAUSE 0xFFFF +#define IXV_FC_HI 0x20000 +#define IXV_FC_LO 0x10000 + +/* Defines for printing debug information */ +#define DEBUG_INIT 0 +#define DEBUG_IOCTL 0 +#define DEBUG_HW 0 + +#define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") +#define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) +#define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) +#define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") +#define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) +#define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) +#define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") +#define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) +#define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) + +#define MAX_NUM_MULTICAST_ADDRESSES 128 +#define IXV_EITR_DEFAULT 128 +#define IXV_SCATTER 32 +#define IXV_RX_HDR 128 +#define MSIX_BAR 3 +#define IXV_TSO_SIZE 65535 +#define IXV_BR_SIZE 4096 +#define IXV_LINK_ITR 2000 +#define TX_BUFFER_SIZE ((u32) 1514) +#define VFTA_SIZE 128 + +/* Offload bits in mbuf flag */ +#define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) + +/* + ***************************************************************************** + * vendor_info_array + * + * This array contains the list of Subvendor/Subdevice IDs on which the driver + * should load. + * + ***************************************************************************** + */ +typedef struct _ixv_vendor_info_t { + unsigned int vendor_id; + unsigned int device_id; + unsigned int subvendor_id; + unsigned int subdevice_id; + unsigned int index; +} ixv_vendor_info_t; + + +struct ixv_tx_buf { + u32 eop_index; + struct mbuf *m_head; + bus_dmamap_t map; +}; + +struct ixv_rx_buf { + struct mbuf *m_head; + struct mbuf *m_pack; + struct mbuf *fmp; + bus_dmamap_t hmap; + bus_dmamap_t pmap; +}; + +/* + * Bus dma allocation structure used by ixv_dma_malloc and ixv_dma_free. + */ +struct ixv_dma_alloc { + bus_addr_t dma_paddr; + caddr_t dma_vaddr; + bus_dma_tag_t dma_tag; + bus_dmamap_t dma_map; + bus_dma_segment_t dma_seg; + bus_size_t dma_size; + int dma_nseg; +}; + +/* +** Driver queue struct: this is the interrupt container +** for the associated tx and rx ring. +*/ +struct ix_queue { + struct adapter *adapter; + u32 msix; /* This queue's MSIX vector */ + u32 eims; /* This queue's EIMS bit */ + u32 eitr_setting; + u32 eitr; /* cached reg */ + struct resource *res; + void *tag; + struct tx_ring *txr; + struct rx_ring *rxr; + struct task que_task; + struct taskqueue *tq; + u64 irqs; +}; + +/* + * The transmit ring, one per queue + */ +struct tx_ring { + struct adapter *adapter; + struct mtx tx_mtx; + u32 me; + bool watchdog_check; + int watchdog_time; + union ixgbe_adv_tx_desc *tx_base; + struct ixv_dma_alloc txdma; + u32 next_avail_desc; + u32 next_to_clean; + struct ixv_tx_buf *tx_buffers; + volatile u16 tx_avail; + u32 txd_cmd; + bus_dma_tag_t txtag; + char mtx_name[16]; + struct buf_ring *br; + /* Soft Stats */ + u32 bytes; + u32 packets; + u64 no_desc_avail; + u64 total_packets; +}; + + +/* + * The Receive ring, one per rx queue + */ +struct rx_ring { + struct adapter *adapter; + struct mtx rx_mtx; + u32 me; + union ixgbe_adv_rx_desc *rx_base; + struct ixv_dma_alloc rxdma; + struct lro_ctrl lro; + bool lro_enabled; + bool hdr_split; + bool discard; + u32 next_to_refresh; + u32 next_to_check; + char mtx_name[16]; + struct ixv_rx_buf *rx_buffers; + bus_dma_tag_t htag; + bus_dma_tag_t ptag; + + u32 bytes; /* Used for AIM calc */ + u32 packets; + + /* Soft stats */ + u64 rx_irq; + u64 rx_split_packets; + u64 rx_packets; + u64 rx_bytes; + u64 rx_discarded; +}; + +/* Our adapter structure */ +struct adapter { + struct ifnet *ifp; + struct ixgbe_hw hw; + + struct ixgbe_osdep osdep; + struct device *dev; + + struct resource *pci_mem; + struct resource *msix_mem; + + /* + * Interrupt resources: this set is + * either used for legacy, or for Link + * when doing MSIX + */ + void *tag; + struct resource *res; + + struct ifmedia media; + struct callout timer; + int msix; + int if_flags; + + struct mtx core_mtx; + + eventhandler_tag vlan_attach; + eventhandler_tag vlan_detach; + + u16 num_vlans; + u16 num_queues; + + /* Info about the board itself */ + bool link_active; + u16 max_frame_size; + u32 link_speed; + bool link_up; + u32 mbxvec; + + /* Mbuf cluster size */ + u32 rx_mbuf_sz; + + /* Support for pluggable optics */ + struct task mbx_task; /* Mailbox tasklet */ + struct taskqueue *tq; + + /* + ** Queues: + ** This is the irq holder, it has + ** and RX/TX pair or rings associated + ** with it. + */ + struct ix_queue *queues; + + /* + * Transmit rings: + * Allocated at run time, an array of rings. + */ + struct tx_ring *tx_rings; + int num_tx_desc; + + /* + * Receive rings: + * Allocated at run time, an array of rings. + */ + struct rx_ring *rx_rings; + int num_rx_desc; + u64 que_mask; + u32 rx_process_limit; + + /* Misc stats maintained by the driver */ + unsigned long dropped_pkts; + unsigned long mbuf_defrag_failed; + unsigned long mbuf_header_failed; + unsigned long mbuf_packet_failed; + unsigned long no_tx_map_avail; + unsigned long no_tx_dma_setup; + unsigned long watchdog_events; + unsigned long tso_tx; + unsigned long mbx_irq; + + struct ixgbevf_hw_stats stats; +}; + + +#define IXV_CORE_LOCK_INIT(_sc, _name) \ + mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF) +#define IXV_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) +#define IXV_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) +#define IXV_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) +#define IXV_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) +#define IXV_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) +#define IXV_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) +#define IXV_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) +#define IXV_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) +#define IXV_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) +#define IXV_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) +#define IXV_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) +#define IXV_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) + +/* Workaround to make 8.0 buildable */ +#if __FreeBSD_version < 800504 +static __inline int +drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) +{ +#ifdef ALTQ + if (ALTQ_IS_ENABLED(&ifp->if_snd)) + return (1); +#endif + return (!buf_ring_empty(br)); +} +#endif + +/* +** Find the number of unrefreshed RX descriptors +*/ +static inline u16 +ixv_rx_unrefreshed(struct rx_ring *rxr) +{ + struct adapter *adapter = rxr->adapter; + + if (rxr->next_to_check > rxr->next_to_refresh) + return (rxr->next_to_check - rxr->next_to_refresh - 1); + else + return ((adapter->num_rx_desc + rxr->next_to_check) - + rxr->next_to_refresh - 1); +} + +#endif /* _IXV_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c new file mode 100644 index 0000000000..f3b3cdad44 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c @@ -0,0 +1,1609 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe_logs.h" +#include "ixgbe/ixgbe_api.h" +#include "ixgbe/ixgbe_vf.h" +#include "ixgbe/ixgbe_common.h" +#include "ixgbe_ethdev.h" + +/* + * High threshold controlling when to start sending XOFF frames. Must be at + * least 8 bytes less than receive packet buffer size. This value is in units + * of 1024 bytes. + */ +#define IXGBE_FC_HI 0x80 + +/* + * Low threshold controlling when to start sending XON frames. This value is + * in units of 1024 bytes. + */ +#define IXGBE_FC_LO 0x40 + +/* Timer value included in XOFF frames. */ +#define IXGBE_FC_PAUSE 0x680 + +#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ +#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ + +static int eth_ixgbe_dev_init(struct eth_driver *eth_drv, + struct rte_eth_dev *eth_dev); +static int ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, + uint16_t nb_tx_q); +static int ixgbe_dev_start(struct rte_eth_dev *dev); +static void ixgbe_dev_stop(struct rte_eth_dev *dev); +static void ixgbe_dev_close(struct rte_eth_dev *dev); +static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); +static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); +static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); +static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); +static int ixgbe_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete); +static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats); +static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); +static void ixgbe_dev_info_get(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info); +static void ixgbe_vlan_filter_set(struct rte_eth_dev *dev, + uint16_t vlan_id, + int on); +static int ixgbe_dev_led_on(struct rte_eth_dev *dev); +static int ixgbe_dev_led_off(struct rte_eth_dev *dev); +static int ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf); +static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev); +static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev); +static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param); +static void ixgbe_dev_interrupt_delayed_handler(void *param); +static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool); +static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); + +/* For Virtual Function support */ +static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv, + struct rte_eth_dev *eth_dev); +static int ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, + uint16_t nb_tx_q); +static int ixgbevf_dev_start(struct rte_eth_dev *dev); +static void ixgbevf_dev_stop(struct rte_eth_dev *dev); +static void ixgbevf_intr_disable(struct ixgbe_hw *hw); +static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); + +/* + * * Define VF Stats MACRO for Non "cleared on read" register + * */ +#define UPDATE_VF_STAT(reg, last, cur) \ +{ \ + u32 latest = IXGBE_READ_REG(hw, reg); \ + cur += latest - last; \ + last = latest; \ +} + +#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur) \ +{ \ + u64 new_lsb = IXGBE_READ_REG(hw, lsb); \ + u64 new_msb = IXGBE_READ_REG(hw, msb); \ + u64 latest = ((new_msb << 32) | new_lsb); \ + cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \ + last = latest; \ +} + +/* + * The set of PCI devices this driver supports + */ +static struct rte_pci_id pci_id_ixgbe_map[] = { + +#undef RTE_LIBRTE_IGB_PMD +#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + +{ .vendor_id = 0, /* sentinel */ }, +}; + + +/* + * The set of PCI devices this driver supports (for 82599 VF) + */ +static struct rte_pci_id pci_id_ixgbevf_map[] = { +{ + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = IXGBE_DEV_ID_82599_VF, + .subsystem_vendor_id = PCI_ANY_ID, + .subsystem_device_id = PCI_ANY_ID, +}, +{ .vendor_id = 0, /* sentinel */ }, +}; + +static struct eth_dev_ops ixgbe_eth_dev_ops = { + .dev_configure = ixgbe_dev_configure, + .dev_start = ixgbe_dev_start, + .dev_stop = ixgbe_dev_stop, + .dev_close = ixgbe_dev_close, + .promiscuous_enable = ixgbe_dev_promiscuous_enable, + .promiscuous_disable = ixgbe_dev_promiscuous_disable, + .allmulticast_enable = ixgbe_dev_allmulticast_enable, + .allmulticast_disable = ixgbe_dev_allmulticast_disable, + .link_update = ixgbe_dev_link_update, + .stats_get = ixgbe_dev_stats_get, + .stats_reset = ixgbe_dev_stats_reset, + .dev_infos_get = ixgbe_dev_info_get, + .vlan_filter_set = ixgbe_vlan_filter_set, + .rx_queue_setup = ixgbe_dev_rx_queue_setup, + .tx_queue_setup = ixgbe_dev_tx_queue_setup, + .dev_led_on = ixgbe_dev_led_on, + .dev_led_off = ixgbe_dev_led_off, + .flow_ctrl_set = ixgbe_flow_ctrl_set, + .mac_addr_add = ixgbe_add_rar, + .mac_addr_remove = ixgbe_remove_rar, + .fdir_add_signature_filter = ixgbe_fdir_add_signature_filter, + .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter, + .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter, + .fdir_infos_get = ixgbe_fdir_info_get, + .fdir_add_perfect_filter = ixgbe_fdir_add_perfect_filter, + .fdir_update_perfect_filter = ixgbe_fdir_update_perfect_filter, + .fdir_remove_perfect_filter = ixgbe_fdir_remove_perfect_filter, + .fdir_set_masks = ixgbe_fdir_set_masks, +}; + +/* + * dev_ops for virtual function, bare necessities for basic vf + * operation have been implemented + */ +static struct eth_dev_ops ixgbevf_eth_dev_ops = { + + .dev_configure = ixgbevf_dev_configure, + .dev_start = ixgbevf_dev_start, + .dev_stop = ixgbevf_dev_stop, + .link_update = ixgbe_dev_link_update, + .stats_get = ixgbevf_dev_stats_get, + .stats_reset = ixgbevf_dev_stats_reset, + .dev_close = ixgbevf_dev_stop, + + .dev_infos_get = ixgbe_dev_info_get, + .rx_queue_setup = ixgbe_dev_rx_queue_setup, + .tx_queue_setup = ixgbe_dev_tx_queue_setup, +}; + +/** + * Atomically reads the link status information from global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &(dev->data->dev_link); + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/** + * Atomically writes the link status information into global + * structure rte_eth_dev. + * + * @param dev + * - Pointer to the structure rte_eth_dev to read from. + * - Pointer to the buffer to be saved with the link status. + * + * @return + * - On success, zero. + * - On failure, negative value. + */ +static inline int +rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &(dev->data->dev_link); + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +/* + * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h. + */ +static inline int +ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->phy.type) { + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_ftl: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: + case ixgbe_phy_sfp_passive_tyco: + case ixgbe_phy_sfp_passive_unknown: + return 1; + default: + return 0; + } +} + +/* + * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h. + */ +static void +ixgbe_disable_intr(struct ixgbe_hw *hw) +{ + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type == ixgbe_mac_82598EB) { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0); + } else { + IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0); + } + IXGBE_WRITE_FLUSH(hw); +} + +/* + * This function resets queue statistics mapping registers. + * From Niantic datasheet, Initialization of Statistics section: + * "...if software requires the queue counters, the RQSMR and TQSM registers + * must be re-programmed following a device reset. + */ +static void +ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw) +{ + uint32_t i; + for(i = 0; i != 16; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0); + } +} + +/* + * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c. + * It returns 0 on success. + */ +static int +eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, + struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); + uint32_t ctrl_ext; + uint16_t csum; + int diag, i; + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &ixgbe_eth_dev_ops; + eth_dev->rx_pkt_burst = &ixgbe_recv_pkts; + eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts; + + /* for secondary processes, we don't initialise any further as primary + * has already done this work. Only check we don't need a different + * RX function */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (eth_dev->data->scattered_rx) + eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; + return 0; + } + pci_dev = eth_dev->pci_dev; + + /* Vendor and Device ID need to be set before init of shared code */ + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource.addr; + + /* Initialize the shared code */ + diag = ixgbe_init_shared_code(hw); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag); + return -EIO; + } + + /* Get Hardware Flow Control setting */ + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.current_mode = ixgbe_fc_full; + hw->fc.pause_time = IXGBE_FC_PAUSE; + hw->fc.low_water = IXGBE_FC_LO; + for (i = 0; i < MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = IXGBE_FC_HI; + hw->fc.send_xon = 1; + + ixgbe_disable_intr(hw); + + /* Make sure we have a good EEPROM before we read from it */ + diag = ixgbe_validate_eeprom_checksum(hw, &csum); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag); + return -EIO; + } + + diag = ixgbe_init_hw(hw); + + /* + * Devices with copper phys will fail to initialise if ixgbe_init_hw() + * is called too soon after the kernel driver unbinding/binding occurs. + * The failure occurs in ixgbe_identify_phy_generic() for all devices, + * but for non-copper devies, ixgbe_identify_sfp_module_generic() is + * also called. See ixgbe_identify_phy_82599(). The reason for the + * failure is not known, and only occuts when virtualisation features + * are disabled in the bios. A delay of 100ms was found to be enough by + * trial-and-error, and is doubled to be safe. + */ + if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) { + rte_delay_ms(200); + diag = ixgbe_init_hw(hw); + } + + if (diag == IXGBE_ERR_EEPROM_VERSION) { + PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" + "LOM. Please be aware there may be issues associated " + "with your hardware.\n If you are experiencing problems " + "please contact your Intel or hardware representative " + "who provided you with this hardware.\n"); + } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) + PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n"); + if (diag) { + PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag); + return -EIO; + } + + /* pick up the PCI bus settings for reporting later */ + ixgbe_get_bus_info(hw); + + /* reset mappings for queue statistics hw counters*/ + ixgbe_reset_qstat_mappings(hw); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * + hw->mac.num_rar_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); + return -ENOMEM; + } + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, + ð_dev->data->mac_addrs[0]); + + /* initialize the vfta */ + memset(shadow_vfta, 0, sizeof(*shadow_vfta)); + + /* let hardware know driver is loaded */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD; + IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); + + if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) + PMD_INIT_LOG(DEBUG, + "MAC: %d, PHY: %d, SFP+: %dmac.type, (int) hw->phy.type, + (int) hw->phy.sfp_type); + else + PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n", + (int) hw->mac.type, (int) hw->phy.type); + + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + rte_intr_callback_register(&(pci_dev->intr_handle), + ixgbe_dev_interrupt_handler, (void *)eth_dev); + + return 0; +} + +/* + * Virtual Function device init + */ +static int +eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv, + struct rte_eth_dev *eth_dev) +{ + struct rte_pci_device *pci_dev; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); + int diag; + + PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init"); + + eth_dev->dev_ops = &ixgbevf_eth_dev_ops; + pci_dev = eth_dev->pci_dev; + + hw->device_id = pci_dev->id.device_id; + hw->vendor_id = pci_dev->id.vendor_id; + hw->hw_addr = (void *)pci_dev->mem_resource.addr; + + /* Initialize the shared code */ + diag = ixgbe_init_shared_code(hw); + if (diag != IXGBE_SUCCESS) { + PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag); + return -EIO; + } + + /* init_mailbox_params */ + hw->mbx.ops.init_params(hw); + + /* Disable the interrupts for VF */ + ixgbevf_intr_disable(hw); + + hw->mac.num_rar_entries = hw->mac.max_rx_queues; + diag = hw->mac.ops.reset_hw(hw); + + /* Allocate memory for storing MAC addresses */ + eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * + hw->mac.num_rar_entries, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); + return -ENOMEM; + } + /* Copy the permanent MAC address */ + ether_addr_copy((struct ether_addr *) hw->mac.perm_addr, + ð_dev->data->mac_addrs[0]); + + /* reset the hardware with the new settings */ + diag = hw->mac.ops.start_hw(hw); + switch (diag) { + case 0: + break; + + default: + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return (diag); + } + + PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n", + eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, + "ixgbe_mac_82599_vf"); + + return 0; +} + +static struct eth_driver rte_ixgbe_pmd = { + { + .name = "rte_ixgbe_pmd", + .id_table = pci_id_ixgbe_map, + .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, + }, + .eth_dev_init = eth_ixgbe_dev_init, + .dev_private_size = sizeof(struct ixgbe_adapter), +}; + +/* + * virtual function driver struct + */ +static struct eth_driver rte_ixgbevf_pmd = { + { + .name = "rte_ixgbevf_pmd", + .id_table = pci_id_ixgbevf_map, + .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO, + }, + .eth_dev_init = eth_ixgbevf_dev_init, + .dev_private_size = sizeof(struct ixgbe_adapter), +}; + +/* + * Driver initialization routine. + * Invoked once at EAL init time. + * Register itself as the [Poll Mode] Driver of PCI IXGBE devices. + */ +int +rte_ixgbe_pmd_init(void) +{ + PMD_INIT_FUNC_TRACE(); + + rte_eth_driver_register(&rte_ixgbe_pmd); + return 0; +} + +/* + * VF Driver initialization routine. + * Invoked one at EAL init time. + * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices. + */ +int +rte_ixgbevf_pmd_init(void) +{ + DEBUGFUNC("rte_ixgbevf_pmd_init"); + + rte_eth_driver_register(&rte_ixgbevf_pmd); + return (0); +} + +static void +ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vfta; + uint32_t vid_idx; + uint32_t vid_bit; + + vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F); + vid_bit = (uint32_t) (1 << (vlan_id & 0x1F)); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx)); + if (on) + vfta |= vid_bit; + else + vfta &= ~vid_bit; + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta); + + /* update local VFTA copy */ + shadow_vfta->vfta[vid_idx] = vfta; +} + +static void +ixgbe_vlan_hw_support_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vlnctrl; + uint32_t rxdctl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + /* Filter Table Disable */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_VFE; + + if (hw->mac.type == ixgbe_mac_82598EB) + vlnctrl &= ~IXGBE_VLNCTRL_VME; + else { + /* On 82599 the VLAN enable is per/queue in RXDCTL */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxdctl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl); + } + } + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +} + +static void +ixgbe_vlan_hw_support_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vfta * shadow_vfta = + IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); + uint32_t vlnctrl; + uint32_t rxdctl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + + /* Filter Table Enable */ + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlnctrl &= ~IXGBE_VLNCTRL_CFIEN; + vlnctrl |= IXGBE_VLNCTRL_VFE; + + if (hw->mac.type == ixgbe_mac_82598EB) + vlnctrl |= IXGBE_VLNCTRL_VME; + else { + /* On 82599 the VLAN enable is per/queue in RXDCTL */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl); + } + } + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + + /* write whatever is in local vfta copy */ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]); +} + +static int +ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + int diag; + + PMD_INIT_FUNC_TRACE(); + + /* Allocate the array of pointers to RX queue structures */ + diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q); + if (diag != 0) { + PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d" + "pointers to RX queues failed", dev->data->port_id, + nb_rx_q); + return diag; + } + + /* Allocate the array of pointers to TX queue structures */ + diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q); + if (diag != 0) { + PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d" + "pointers to TX queues failed", dev->data->port_id, + nb_tx_q); + return diag; + } + + /* set flag to update link status after init */ + intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + + return 0; +} + +/* + * Configure device link speed and setup link. + * It returns 0 on success. + */ +static int +ixgbe_dev_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err, link_up = 0, negotiate = 0; + uint32_t speed = 0; + + PMD_INIT_FUNC_TRACE(); + + /* IXGBE devices don't support half duplex */ + if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) && + (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) { + PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n", + dev->data->dev_conf.link_duplex, + dev->data->port_id); + return -EINVAL; + } + + /* stop adapter */ + hw->adapter_stopped = FALSE; + ixgbe_stop_adapter(hw); + + /* reinitialize adapter + * this calls reset and start */ + ixgbe_init_hw(hw); + + /* initialize transmission unit */ + ixgbe_dev_tx_init(dev); + + /* This can fail when allocating mbufs for descriptor rings */ + err = ixgbe_dev_rx_init(dev); + if (err) { + PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n"); + return err; + } + + ixgbe_dev_rxtx_start(dev); + + if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) { + err = hw->mac.ops.setup_sfp(hw); + if (err) + goto error; + } + + /* Turn on the laser */ + if (hw->phy.multispeed_fiber) + ixgbe_enable_tx_laser(hw); + + err = ixgbe_check_link(hw, &speed, &link_up, 0); + if (err) + goto error; + err = ixgbe_get_link_capabilities(hw, &speed, &negotiate); + if (err) + goto error; + + switch(dev->data->dev_conf.link_speed) { + case ETH_LINK_SPEED_AUTONEG: + speed = (hw->mac.type != ixgbe_mac_82598EB) ? + IXGBE_LINK_SPEED_82599_AUTONEG : + IXGBE_LINK_SPEED_82598_AUTONEG; + break; + case ETH_LINK_SPEED_100: + /* + * Invalid for 82598 but error will be detected by + * ixgbe_setup_link() + */ + speed = IXGBE_LINK_SPEED_100_FULL; + break; + case ETH_LINK_SPEED_1000: + speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + case ETH_LINK_SPEED_10000: + speed = IXGBE_LINK_SPEED_10GB_FULL; + break; + default: + PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n", + dev->data->dev_conf.link_speed, dev->data->port_id); + return -EINVAL; + } + + err = ixgbe_setup_link(hw, speed, negotiate, link_up); + if (err) + goto error; + + /* check if lsc interrupt is enabled */ + if (dev->data->dev_conf.intr_conf.lsc != 0) { + err = ixgbe_dev_interrupt_setup(dev); + if (err) + goto error; + } + + /* + * If VLAN filtering is enabled, set up VLAN tag offload and filtering + * and restore VFTA. + */ + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + ixgbe_vlan_hw_support_enable(dev); + else + ixgbe_vlan_hw_support_disable(dev); + + if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) { + err = ixgbe_fdir_configure(dev); + if (err) + goto error; + } + + return (0); + +error: + PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err); + return -EIO; +} + +/* + * Stop device: disable rx and tx functions to allow for reconfiguring. + */ +static void +ixgbe_dev_stop(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + /* disable interrupts */ + ixgbe_disable_intr(hw); + + /* reset the NIC */ + ixgbe_reset_hw(hw); + hw->adapter_stopped = FALSE; + + /* stop adapter */ + ixgbe_stop_adapter(hw); + + /* Turn off the laser */ + if (hw->phy.multispeed_fiber) + ixgbe_disable_tx_laser(hw); + + ixgbe_dev_clear_queues(dev); + + /* Clear recorded link status */ + memset(&link, 0, sizeof(link)); + rte_ixgbe_dev_atomic_write_link_status(dev, &link); +} + +/* + * Reest and stop device. + */ +static void +ixgbe_dev_close(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_FUNC_TRACE(); + + ixgbe_reset_hw(hw); + + + ixgbe_dev_stop(dev); + hw->adapter_stopped = 1; + + ixgbe_disable_pcie_master(hw); + + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); +} + +/* + * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c + */ +static void +ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_stats *hw_stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + uint32_t bprc, lxon, lxoff, total; + uint64_t total_missed_rx, total_qbrc, total_qprc; + unsigned i; + + total_missed_rx = 0; + total_qbrc = 0; + total_qprc = 0; + + hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC); + hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC); + hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); + + for (i = 0; i < 8; i++) { + uint32_t mp; + mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + /* global total per queue */ + hw_stats->mpc[i] += mp; + /* Running comprehensive total for stats display */ + total_missed_rx += hw_stats->mpc[i]; + if (hw->mac.type == ixgbe_mac_82598EB) + hw_stats->rnbc[i] += + IXGBE_READ_REG(hw, IXGBE_RNBC(i)); + hw_stats->pxontxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); + hw_stats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); + hw_stats->pxofftxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); + hw_stats->pxoffrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + hw_stats->pxon2offc[i] += + IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i)); + } + for (i = 0; i < 16; i++) { + hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i)); + hw_stats->qbrc[i] += + ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32); + hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + hw_stats->qbtc[i] += + ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32); + hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); + + total_qprc += hw_stats->qprc[i]; + total_qbrc += hw_stats->qbrc[i]; + } + hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC); + hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC); + hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC); + + /* Note that gprc counts missed packets */ + hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC); + + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) + + ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32); + hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) + + ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32); + hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) + + ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32); + hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); + hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT); + } else { + hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); + hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); + /* 82598 only has a counter in the high register */ + hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH); + hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH); + hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + } + + /* + * Workaround: mprc hardware is incorrectly counting + * broadcasts, so for now we subtract those. + */ + bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); + hw_stats->bprc += bprc; + hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC); + if (hw->mac.type == ixgbe_mac_82598EB) + hw_stats->mprc -= bprc; + + hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64); + hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127); + hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255); + hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511); + hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023); + hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522); + + lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC); + hw_stats->lxontxc += lxon; + lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC); + hw_stats->lxofftxc += lxoff; + total = lxon + lxoff; + + hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC); + hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC); + hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64); + hw_stats->gptc -= total; + hw_stats->mptc -= total; + hw_stats->ptc64 -= total; + hw_stats->gotc -= total * ETHER_MIN_LEN; + + hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC); + hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC); + hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC); + hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC); + hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC); + hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC); + hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC); + hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR); + hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT); + hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127); + hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255); + hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511); + hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); + hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC); + hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); + hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); + /* Only read FCOE on 82599 */ + if (hw->mac.type != ixgbe_mac_82598EB) { + hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); + } + + if (stats == NULL) + return; + + /* Fill out the rte_eth_stats statistics structure */ + stats->ipackets = total_qprc; + stats->ibytes = total_qbrc; + stats->opackets = hw_stats->gptc; + stats->obytes = hw_stats->gotc; + stats->imcasts = hw_stats->mprc; + + /* Rx Errors */ + stats->ierrors = total_missed_rx + hw_stats->crcerrs + + hw_stats->rlec; + + stats->oerrors = 0; + + /* Flow Director Stats registers */ + hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + stats->fdirmatch = hw_stats->fdirmatch; + stats->fdirmiss = hw_stats->fdirmiss; +} + +static void +ixgbe_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct ixgbe_hw_stats *stats = + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* HW registers are cleared on read */ + ixgbe_dev_stats_get(dev, NULL); + + /* Reset software totals */ + memset(stats, 0, sizeof(*stats)); +} + +static void +ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* Good Rx packet, include VF loopback */ + UPDATE_VF_STAT(IXGBE_VFGPRC, + hw_stats->last_vfgprc, hw_stats->vfgprc); + + /* Good Rx octets, include VF loopback */ + UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, + hw_stats->last_vfgorc, hw_stats->vfgorc); + + /* Good Tx packet, include VF loopback */ + UPDATE_VF_STAT(IXGBE_VFGPTC, + hw_stats->last_vfgptc, hw_stats->vfgptc); + + /* Good Tx octets, include VF loopback */ + UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, + hw_stats->last_vfgotc, hw_stats->vfgotc); + + /* Rx Multicst Packet */ + UPDATE_VF_STAT(IXGBE_VFMPRC, + hw_stats->last_vfmprc, hw_stats->vfmprc); + + if (stats == NULL) + return; + + memset(stats, 0, sizeof(*stats)); + stats->ipackets = hw_stats->vfgprc; + stats->ibytes = hw_stats->vfgorc; + stats->opackets = hw_stats->vfgptc; + stats->obytes = hw_stats->vfgotc; + stats->imcasts = hw_stats->vfmprc; +} + +static void +ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) +{ + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); + + /* Sync HW register to the last stats */ + ixgbevf_dev_stats_get(dev, NULL); + + /* reset HW current stats*/ + hw_stats->vfgprc = 0; + hw_stats->vfgorc = 0; + hw_stats->vfgptc = 0; + hw_stats->vfgotc = 0; + hw_stats->vfmprc = 0; + +} + +static void +ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + dev_info->max_rx_queues = hw->mac.max_rx_queues; + dev_info->max_tx_queues = hw->mac.max_tx_queues; + dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */ + dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */ + dev_info->max_mac_addrs = hw->mac.num_rar_entries; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_eth_link link, old; + ixgbe_link_speed link_speed; + int link_up; + int diag; + + link.link_status = 0; + link.link_speed = 0; + link.link_duplex = 0; + memset(&old, 0, sizeof(old)); + rte_ixgbe_dev_atomic_read_link_status(dev, &old); + + /* check if it needs to wait to complete, if lsc interrupt is enabled */ + if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0) + diag = ixgbe_check_link(hw, &link_speed, &link_up, 0); + else + diag = ixgbe_check_link(hw, &link_speed, &link_up, 1); + if (diag != 0) { + link.link_speed = ETH_LINK_SPEED_100; + link.link_duplex = ETH_LINK_HALF_DUPLEX; + rte_ixgbe_dev_atomic_write_link_status(dev, &link); + if (link.link_status == old.link_status) + return -1; + return 0; + } + + if (link_up == 0) { + rte_ixgbe_dev_atomic_write_link_status(dev, &link); + if (link.link_status == old.link_status) + return -1; + return 0; + } + link.link_status = 1; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + + switch (link_speed) { + default: + case IXGBE_LINK_SPEED_UNKNOWN: + link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_speed = ETH_LINK_SPEED_100; + break; + + case IXGBE_LINK_SPEED_100_FULL: + link.link_speed = ETH_LINK_SPEED_100; + break; + + case IXGBE_LINK_SPEED_1GB_FULL: + link.link_speed = ETH_LINK_SPEED_1000; + break; + + case IXGBE_LINK_SPEED_10GB_FULL: + link.link_speed = ETH_LINK_SPEED_10000; + break; + } + rte_ixgbe_dev_atomic_write_link_status(dev, &link); + + if (link.link_status == old.link_status) + return -1; + + return 0; +} + +static void +ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +} + +static void +ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= (~IXGBE_FCTRL_UPE); + if (dev->data->all_multicast == 1) + fctrl |= IXGBE_FCTRL_MPE; + else + fctrl &= (~IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +} + +static void +ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_MPE; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +} + +static void +ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fctrl; + + if (dev->data->promiscuous == 1) + return; /* must remain in all_multicast mode */ + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl &= (~IXGBE_FCTRL_MPE); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); +} + +/** + * It clears the interrupt causes and enables the interrupt. + * It will be called once only during nic initialized. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ixgbe_dev_link_status_print(dev); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC); + IXGBE_WRITE_FLUSH(hw); + rte_intr_enable(&(dev->pci_dev->intr_handle)); + + return 0; +} + +/* + * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC); + IXGBE_WRITE_FLUSH(hw); + + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_EICR); + PMD_INIT_LOG(INFO, "eicr %x", eicr); + if (eicr & IXGBE_EICR_LSC) { + /* set flag for async link update */ + intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + } + + return 0; +} + +/** + * It gets and then prints the link status. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static void +ixgbe_dev_link_status_print(struct rte_eth_dev *dev) +{ + struct rte_eth_link link; + + memset(&link, 0, sizeof(link)); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + if (link.link_status) { + PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s", + (int)(dev->data->port_id), + (unsigned)link.link_speed, + link.link_duplex == ETH_LINK_FULL_DUPLEX ? + "full-duplex" : "half-duplex"); + } else { + PMD_INIT_LOG(INFO, " Port %d: Link Down", + (int)(dev->data->port_id)); + } + PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d", + dev->pci_dev->addr.domain, + dev->pci_dev->addr.bus, + dev->pci_dev->addr.devid, + dev->pci_dev->addr.function); +} + +/* + * It executes link_update after knowing an interrupt occured. + * + * @param dev + * Pointer to struct rte_eth_dev. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +static int +ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) +{ + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) { + return -1; + } + ixgbe_dev_link_update(dev, 0); + + return 0; +} + +/** + * Interrupt handler which shall be registered for alarm callback for delayed + * handling specific interrupt to wait for the stable nic state. As the + * NIC interrupt state is not stable for ixgbe after link is just down, + * it needs to wait 4 seconds to get the stable status. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +ixgbe_dev_interrupt_delayed_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + struct ixgbe_hw *hw = + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + IXGBE_READ_REG(hw, IXGBE_EICR); + ixgbe_dev_interrupt_action(dev); + if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { + intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + rte_intr_enable(&(dev->pci_dev->intr_handle)); + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC); + IXGBE_WRITE_FLUSH(hw); + ixgbe_dev_link_status_print(dev); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC); + } +} + +/** + * Interrupt handler triggered by NIC for handling + * specific interrupt. + * + * @param handle + * Pointer to interrupt handle. + * @param param + * The address of parameter (struct rte_eth_dev *) regsitered before. + * + * @return + * void + */ +static void +ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param) +{ + int64_t timeout; + struct rte_eth_link link; + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + /* get the link status before link update, for predicting later */ + memset(&link, 0, sizeof(link)); + rte_ixgbe_dev_atomic_read_link_status(dev, &link); + ixgbe_dev_interrupt_get_status(dev); + ixgbe_dev_interrupt_action(dev); + + if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) + return; + + /* likely to up */ + if (!link.link_status) + /* handle it 1 sec later, wait it being stable */ + timeout = IXGBE_LINK_UP_CHECK_TIMEOUT; + /* likely to down */ + else + /* handle it 4 sec later, wait it being stable */ + timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT; + + ixgbe_dev_link_status_print(dev); + if (rte_eal_alarm_set(timeout * 1000, + ixgbe_dev_interrupt_delayed_handler, param) < 0) + PMD_INIT_LOG(ERR, "Error setting alarm"); +} + +static int +ixgbe_dev_led_on(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP); +} + +static int +ixgbe_dev_led_off(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP); +} + +static int +ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) +{ + struct ixgbe_hw *hw; + int err; + uint32_t rx_buf_size; + uint32_t max_high_water; + enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { + ixgbe_fc_none, + ixgbe_fc_rx_pause, + ixgbe_fc_tx_pause, + ixgbe_fc_full + }; + + PMD_INIT_FUNC_TRACE(); + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)); + PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size); + + /* + * At least reserve one Ethernet frame for watermark + * high_water/low_water in kilo bytes for ixgbe + */ + max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT; + if ((fc_conf->high_water > max_high_water) || + (fc_conf->high_water < fc_conf->low_water)) { + PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n"); + PMD_INIT_LOG(ERR, "High_water must <= 0x%x\n", max_high_water); + return (-EINVAL); + } + + hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode]; + hw->fc.pause_time = fc_conf->pause_time; + hw->fc.high_water[0] = fc_conf->high_water; + hw->fc.low_water = fc_conf->low_water; + hw->fc.send_xon = fc_conf->send_xon; + + err = ixgbe_fc_enable(hw, 0); + /* Not negotiated is not an error case */ + if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) { + return 0; + } + + PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err); + return -EIO; +} + +static void +ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t enable_addr = 1; + + ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr); +} + +static void +ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + ixgbe_clear_rar(hw, index); +} + +/* + * Virtual Function operations + */ +static void +ixgbevf_intr_disable(struct ixgbe_hw *hw) +{ + PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable"); + + /* Clear interrupt mask to stop from interrupts being generated */ + IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK); + + IXGBE_WRITE_FLUSH(hw); +} + +static int +ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q) +{ + int diag; + struct rte_eth_conf* conf = &dev->data->dev_conf; + + PMD_INIT_FUNC_TRACE(); + + /* Allocate the array of pointers to RX queue structures */ + diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q); + if (diag != 0) { + PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d" + "pointers to RX queues failed", dev->data->port_id, + nb_rx_q); + return diag; + } + + /* Allocate the array of pointers to TX queue structures */ + diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q); + if (diag != 0) { + PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d" + "pointers to TX queues failed", dev->data->port_id, + nb_tx_q); + return diag; + } + + if (!conf->rxmode.hw_strip_crc) { + /* + * VF has no ability to enable/disable HW CRC + * Keep the persistent behavior the same as Host PF + */ + PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n"); + conf->rxmode.hw_strip_crc = 1; + } + + return 0; +} + +static int +ixgbevf_dev_start(struct rte_eth_dev *dev) +{ + int err = 0; + PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start"); + + ixgbevf_dev_tx_init(dev); + err = ixgbevf_dev_rx_init(dev); + if(err){ + ixgbe_dev_clear_queues(dev); + PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n"); + return err; + } + ixgbevf_dev_rxtx_start(dev); + + return 0; +} + +static void +ixgbevf_dev_stop(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop"); + + ixgbe_reset_hw(hw); + hw->adapter_stopped = 0; + ixgbe_stop_adapter(hw); + /* reprogram the RAR[0] in case user changed it. */ + ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); +} diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h new file mode 100644 index 0000000000..1df3a886a0 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h @@ -0,0 +1,176 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _IXGBE_ETHDEV_H_ +#define _IXGBE_ETHDEV_H_ + +/* need update link, bit flag */ +#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0) + +/* + * Defines that were not part of ixgbe_type.h as they are not used by the + * FreeBSD driver. + */ +#define IXGBE_ADVTXD_MAC_1588 0x00080000 /* IEEE1588 Timestamp packet */ +#define IXGBE_RXD_STAT_TMST 0x10000 /* Timestamped Packet indication */ +#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* L4 Packet TYPE, resvd */ +#define IXGBE_RXDADV_ERR_CKSUM_BIT 30 +#define IXGBE_RXDADV_ERR_CKSUM_MSK 3 +#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Bit shift for l2_len */ + +#define IXGBE_VFTA_SIZE 128 + +/* + * Information about the fdir mode. + */ +struct ixgbe_hw_fdir_info { + uint16_t collision; + uint16_t free; + uint16_t maxhash; + uint8_t maxlen; + uint64_t add; + uint64_t remove; + uint64_t f_add; + uint64_t f_remove; +}; + +/* structure for interrupt relative data */ +struct ixgbe_interrupt { + uint32_t flags; +}; + +/* local VFTA copy */ +struct ixgbe_vfta { + uint32_t vfta[IXGBE_VFTA_SIZE]; +}; + +/* + * Structure to store private data for each driver instance (for each port). + */ +struct ixgbe_adapter { + struct ixgbe_hw hw; + struct ixgbe_hw_stats stats; + struct ixgbe_hw_fdir_info fdir; + struct ixgbe_interrupt intr; + struct ixgbe_vfta shadow_vfta; +}; + +#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\ + (&((struct ixgbe_adapter *)adapter)->hw) + +#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \ + (&((struct ixgbe_adapter *)adapter)->stats) + +#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \ + (&((struct ixgbe_adapter *)adapter)->intr) + +#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \ + (&((struct ixgbe_adapter *)adapter)->fdir) + +#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \ + (&((struct ixgbe_adapter *)adapter)->shadow_vfta) + + +/* + * RX/TX function prototypes + */ +void ixgbe_dev_clear_queues(struct rte_eth_dev *dev); + +int ixgbe_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_rx_queues); + +int ixgbe_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_tx_queues); + +int ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool); + +int ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, + uint16_t nb_tx_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +int ixgbe_dev_rx_init(struct rte_eth_dev *dev); + +void ixgbe_dev_tx_init(struct rte_eth_dev *dev); + +void ixgbe_dev_rxtx_start(struct rte_eth_dev *dev); + +int ixgbevf_dev_rx_init(struct rte_eth_dev *dev); + +void ixgbevf_dev_tx_init(struct rte_eth_dev *dev); + +void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev); + +uint16_t ixgbe_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t ixgbe_recv_scattered_pkts(struct igb_rx_queue *rxq, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts); + +uint16_t ixgbe_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +/* + * Flow director function prototypes + */ +int ixgbe_fdir_configure(struct rte_eth_dev *dev); + +int ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint8_t queue); + +int ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint8_t queue); + +int ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter); + +void ixgbe_fdir_info_get(struct rte_eth_dev *dev, + struct rte_eth_fdir *fdir); + +int ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint16_t soft_id, + uint8_t queue, uint8_t drop); + +int ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter,uint16_t soft_id, + uint8_t queue, uint8_t drop); + +int ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint16_t soft_id); + +int ixgbe_fdir_set_masks(struct rte_eth_dev *dev, + struct rte_fdir_masks *fdir_masks); + +#endif /* _IXGBE_ETHDEV_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe_fdir.c b/lib/librte_pmd_ixgbe/ixgbe_fdir.c new file mode 100644 index 0000000000..1ebc4165e0 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe_fdir.c @@ -0,0 +1,891 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ixgbe_logs.h" +#include "ixgbe/ixgbe_api.h" +#include "ixgbe/ixgbe_common.h" +#include "ixgbe_ethdev.h" + +/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */ +#define FDIRCTRL_PBALLOC_MASK 0x03 + +/* For calculating memory required for FDIR filters */ +#define PBALLOC_SIZE_SHIFT 15 + +/* Number of bits used to mask bucket hash for different pballoc sizes */ +#define PERFECT_BUCKET_64KB_HASH_MASK 0x07FF /* 11 bits */ +#define PERFECT_BUCKET_128KB_HASH_MASK 0x0FFF /* 12 bits */ +#define PERFECT_BUCKET_256KB_HASH_MASK 0x1FFF /* 13 bits */ +#define SIG_BUCKET_64KB_HASH_MASK 0x1FFF /* 13 bits */ +#define SIG_BUCKET_128KB_HASH_MASK 0x3FFF /* 14 bits */ +#define SIG_BUCKET_256KB_HASH_MASK 0x7FFF /* 15 bits */ + +/** + * This function is based on ixgbe_fdir_enable_82599() in ixgbe/ixgbe_82599.c. + * It adds extra configuration of fdirctrl that is common for all filter types. + * + * Initialize Flow Director control registers + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) +{ + int i; + + PMD_INIT_FUNC_TRACE(); + + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); + + /* + * Continue setup of fdirctrl register bits: + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + + /* + * Poll init-done after we write the register. Estimated times: + * 10G: PBALLOC = 11b, timing is 60us + * 1G: PBALLOC = 11b, timing is 600us + * 100M: PBALLOC = 11b, timing is 6ms + * + * Multiple these timings by 4 if under full Rx load + * + * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for + * 1 msec per poll time. If we're at line rate and drop to 100M, then + * this might not finish in our poll time, but we can live with that + * for now. + */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) + PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!\n"); +} + +/* + * Set appropriate bits in fdirctrl for: variable reporting levels, moving + * flexbytes matching field, and drop queue (only for perfect matching mode). + */ +static int +configure_fdir_flags(struct rte_fdir_conf *conf, uint32_t *fdirctrl) +{ + *fdirctrl = 0; + + switch (conf->pballoc) { + case RTE_FDIR_PBALLOC_64K: + /* 8k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; + break; + case RTE_FDIR_PBALLOC_128K: + /* 16k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; + break; + case RTE_FDIR_PBALLOC_256K: + /* 32k - 1 signature filters */ + *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value"); + return -EINVAL; + }; + + /* status flags: write hash & swindex in the rx descriptor */ + switch (conf->status) { + case RTE_FDIR_NO_REPORT_STATUS: + /* do nothing, default mode */ + break; + case RTE_FDIR_REPORT_STATUS: + /* report status when the packet matches a fdir rule */ + *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; + break; + case RTE_FDIR_REPORT_STATUS_ALWAYS: + /* always report status */ + *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS; + break; + default: + /* bad value */ + PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value"); + return -EINVAL; + }; + + *fdirctrl |= (conf->flexbytes_offset << IXGBE_FDIRCTRL_FLEX_SHIFT); + + if (conf->mode == RTE_FDIR_MODE_PERFECT) { + *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; + *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); + } + + return 0; +} + +int +ixgbe_fdir_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int err; + uint32_t fdirctrl, pbsize; + int i; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl); + if (err) + return err; + + /* + * Before enabling Flow Director, the Rx Packet Buffer size + * must be reduced. The new value is the current size minus + * flow director memory usage size. + */ + pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK))); + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), + (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); + + /* + * The defaults in the HW for RX PB 1-7 are not zero and so should be + * intialized to zero for non DCB mode otherwise actual total RX PB + * would be bigger than programmed and filter space would run into + * the PB 0 region. + */ + for (i = 1; i < 8; i++) + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); + + fdir_enable_82599(hw, fdirctrl); + return 0; +} + +/* + * The below function is taken from the FreeBSD IXGBE drivers release + * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK + * before returning, as the signature hash can use 16bits. + * + * The newer driver has optimised functions for calculating bucket and + * signature hashes. However they don't support IPv6 type packets for signature + * filters so are not used here. + * + * Note that the bkt_hash field in the ixgbe_atr_input structure is also never + * set. + * + * Compute the hashes for SW ATR + * @stream: input bitstream to compute the hash on + * @key: 32-bit hash key + **/ +static u32 +ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, + u32 key) +{ + /* + * The algorithm is as follows: + * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 + * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] + * and A[n] x B[n] is bitwise AND between same length strings + * + * K[n] is 16 bits, defined as: + * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] + * for n modulo 32 < 15, K[n] = + * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] + * + * S[n] is 16 bits, defined as: + * for n >= 15, S[n] = S[n:n - 15] + * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] + * + * To simplify for programming, the algorithm is implemented + * in software this way: + * + * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0] + * + * for (i = 0; i < 352; i+=32) + * hi_hash_dword[31:0] ^= Stream[(i+31):i]; + * + * lo_hash_dword[15:0] ^= Stream[15:0]; + * lo_hash_dword[15:0] ^= hi_hash_dword[31:16]; + * lo_hash_dword[31:16] ^= hi_hash_dword[15:0]; + * + * hi_hash_dword[31:0] ^= Stream[351:320]; + * + * if(key[0]) + * hash[15:0] ^= Stream[15:0]; + * + * for (i = 0; i < 16; i++) { + * if (key[i]) + * hash[15:0] ^= lo_hash_dword[(i+15):i]; + * if (key[i + 16]) + * hash[15:0] ^= hi_hash_dword[(i+15):i]; + * } + * + */ + __be32 common_hash_dword = 0; + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 hash_result = 0; + u8 i; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 10; i; i -= 2) + common_hash_dword ^= atr_input->dword_stream[i] ^ + atr_input->dword_stream[i - 1]; + + hi_hash_dword = IXGBE_NTOHL(common_hash_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + if (key & 0x0001) hash_result ^= lo_hash_dword; + if (key & 0x00010000) hash_result ^= hi_hash_dword; + + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the vlan until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + + /* process the remaining 30 bits in the key 2 bits at a time */ + for (i = 15; i; i-- ) { + if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; + if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; + } + + return hash_result; +} + +/* + * Calculate the hash value needed for signature-match filters. In the FreeBSD + * driver, this is done by the optimised function + * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it + * doesn't support calculating a hash for an IPv6 filter. + */ +static uint32_t +atr_compute_sig_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc) +{ + uint32_t bucket_hash, sig_hash; + + if (pballoc == RTE_FDIR_PBALLOC_256K) + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_256KB_HASH_MASK; + else if (pballoc == RTE_FDIR_PBALLOC_128K) + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_128KB_HASH_MASK; + else + bucket_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + SIG_BUCKET_64KB_HASH_MASK; + + sig_hash = ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_SIGNATURE_HASH_KEY); + + return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash; +} + +/** + * This function is based on ixgbe_atr_add_signature_filter_82599() in + * ixgbe/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports + * setting extra fields in the FDIRCMD register, and removes the code that was + * verifying the flow_type field. According to the documentation, a flow type of + * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to + * work ok... + * + * Adds a signature hash filter + * @hw: pointer to hardware structure + * @input: unique input dword + * @queue: queue index to direct traffic to + * @fdircmd: any extra flags to set in fdircmd register + * @fdirhash: pre-calculated hash value for the filter + **/ +static void +fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, u8 queue, u32 fdircmd, + u32 fdirhash) +{ + u64 fdirhashcmd; + + PMD_INIT_FUNC_TRACE(); + + /* configure FDIRCMD register */ + fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits + * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. + */ + fdirhashcmd = (u64)fdircmd << 32; + fdirhashcmd |= fdirhash; + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + + PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); +} + +/* + * Convert DPDK rte_fdir_filter struct to ixgbe_atr_input union that is used + * by the IXGBE driver code. + */ +static int +fdir_filter_to_atr_input(struct rte_fdir_filter *fdir_filter, + union ixgbe_atr_input *input) +{ + if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP || + fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) && + (fdir_filter->port_src || fdir_filter->port_dst)) { + PMD_INIT_LOG(ERR, "Invalid fdir_filter"); + return -EINVAL; + } + + memset(input, 0, sizeof(*input)); + + input->formatted.vlan_id = fdir_filter->vlan_id; + input->formatted.src_port = fdir_filter->port_src; + input->formatted.dst_port = fdir_filter->port_dst; + input->formatted.flex_bytes = fdir_filter->flex_bytes; + + switch (fdir_filter->l4type) { + case RTE_FDIR_L4TYPE_TCP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case RTE_FDIR_L4TYPE_UDP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case RTE_FDIR_L4TYPE_SCTP: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case RTE_FDIR_L4TYPE_NONE: + input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; + default: + PMD_INIT_LOG(ERR, " Error on l4type input"); + return -EINVAL; + } + + if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) { + input->formatted.flow_type |= IXGBE_ATR_L4TYPE_IPV6_MASK; + + input->formatted.src_ip[0] = fdir_filter->ip_src.ipv6_addr[0]; + input->formatted.src_ip[1] = fdir_filter->ip_src.ipv6_addr[1]; + input->formatted.src_ip[2] = fdir_filter->ip_src.ipv6_addr[2]; + input->formatted.src_ip[3] = fdir_filter->ip_src.ipv6_addr[3]; + + input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv6_addr[0]; + input->formatted.dst_ip[1] = fdir_filter->ip_dst.ipv6_addr[1]; + input->formatted.dst_ip[2] = fdir_filter->ip_dst.ipv6_addr[2]; + input->formatted.dst_ip[3] = fdir_filter->ip_dst.ipv6_addr[3]; + + } else { + input->formatted.src_ip[0] = fdir_filter->ip_src.ipv4_addr; + input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv4_addr; + } + + return 0; +} + +/* + * Adds or updates a signature filter. + * + * dev: ethernet device to add filter to + * fdir_filter: filter details + * queue: queue index to direct traffic to + * update: 0 to add a new filter, otherwise update existing. + */ +static int +fdir_add_update_signature_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint8_t queue, int update) +{ + struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0; + uint32_t fdirhash; + union ixgbe_atr_input input; + int err; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + err = fdir_filter_to_atr_input(fdir_filter, &input); + if (err) + return err; + + fdirhash = atr_compute_sig_hash_82599(&input, + dev->data->dev_conf.fdir_conf.pballoc); + fdir_add_signature_filter_82599(hw, &input, queue, fdircmd_flags, + fdirhash); + return 0; +} + +int +ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint8_t queue) +{ + PMD_INIT_FUNC_TRACE(); + return fdir_add_update_signature_filter(dev, fdir_filter, queue, 0); +} + +int +ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint8_t queue) +{ + PMD_INIT_FUNC_TRACE(); + return fdir_add_update_signature_filter(dev, fdir_filter, queue, 1); +} + +/* + * This is based on ixgbe_fdir_erase_perfect_filter_82599() in + * ixgbe/ixgbe_82599.c. It is modified to take in the hash as a parameter so + * that it can be used for removing signature and perfect filters. + */ +static s32 +fdir_erase_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, + uint32_t fdirhash) +{ + u32 fdircmd = 0; + u32 retry_count; + s32 err = 0; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* flush hash to HW */ + IXGBE_WRITE_FLUSH(hw); + + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + + for (retry_count = 10; retry_count; retry_count--) { + /* allow 10us for query to process */ + usec_delay(10); + /* verify query completed successfully */ + fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); + if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) + break; + } + + if (!retry_count) { + PMD_INIT_LOG(ERR, "Timeout querying for flow director filter"); + err = -EIO; + } + + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + IXGBE_WRITE_FLUSH(hw); + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + + return err; +} + +int +ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + union ixgbe_atr_input input; + int err; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + err = fdir_filter_to_atr_input(fdir_filter, &input); + if (err) + return err; + + return fdir_erase_filter_82599(hw, &input, + atr_compute_sig_hash_82599(&input, + dev->data->dev_conf.fdir_conf.pballoc)); +} + +/** + * This is based on ixgbe_get_fdirtcpm_82599(), in ixgbe/ixgbe_82599.c. It no + * longer does the byte reordering + * + * generate a tcp port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped + * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +static uint32_t +get_fdirtcpm_82599(struct rte_fdir_masks *input_mask) +{ + u32 mask = input_mask->dst_port_mask; + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; + mask |= input_mask->src_port_mask; + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); + return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); +} + +/* + * This macro exists in ixgbe/ixgbe_82599.c, however in that file it reverses + * the bytes, and then reverses them again. So here it does nothing. + */ +#define IXGBE_WRITE_REG_BE32 IXGBE_WRITE_REG + +/* + * This is based on ixgbe_fdir_set_input_mask_82599() in ixgbe/ixgbe_82599.c, + * but makes use of the rte_fdir_masks structure to see which bits to set. + */ +static int +fdir_set_input_mask_82599(struct ixgbe_hw *hw, + struct rte_fdir_masks *input_mask) +{ + /* mask VM pool and IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; + + PMD_INIT_FUNC_TRACE(); + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + if (input_mask->only_ip_flow) { + /* use the L4 protocol mask for raw IPv4/IPv6 traffic */ + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->dst_port_mask || input_mask->src_port_mask) { + PMD_INIT_LOG(ERR, " Error on src/dst port mask\n"); + return -EINVAL; + } + } + + if (!input_mask->vlan_id) + /* mask VLAN ID*/ + fdirm |= IXGBE_FDIRM_VLANID; + + if (!input_mask->vlan_prio) + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + + if (!input_mask->flexbytes) + /* Mask Flex Bytes */ + fdirm |= IXGBE_FDIRM_FLEX; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = get_fdirtcpm_82599(input_mask); + + /* write both the same so that UDP and TCP use the same mask */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); + IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); + + /* store source and destination IP masks (big-enian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, + IXGBE_NTOHL(~input_mask->src_ipv4_mask)); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, + IXGBE_NTOHL(~input_mask->dst_ipv4_mask)); + + return IXGBE_SUCCESS; +} + +int +ixgbe_fdir_set_masks(struct rte_eth_dev *dev, struct rte_fdir_masks *fdir_masks) +{ + struct ixgbe_hw *hw; + int err; + + PMD_INIT_FUNC_TRACE(); + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + err = ixgbe_reinit_fdir_tables_82599(hw); + if (err) { + PMD_INIT_LOG(ERR, "reinit of fdir tables failed"); + return -EIO; + } + + return fdir_set_input_mask_82599(hw, fdir_masks); +} + +static uint32_t +atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + enum rte_fdir_pballoc_type pballoc) +{ + if (pballoc == RTE_FDIR_PBALLOC_256K) + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_256KB_HASH_MASK; + else if (pballoc == RTE_FDIR_PBALLOC_128K) + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_128KB_HASH_MASK; + else + return ixgbe_atr_compute_hash_82599(input, + IXGBE_ATR_BUCKET_HASH_KEY) & + PERFECT_BUCKET_64KB_HASH_MASK; +} + +/* + * This is based on ixgbe_fdir_write_perfect_filter_82599() in + * ixgbe/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register + * added, and IPv6 support also added. The hash value is also pre-calculated + * as the pballoc value is needed to do it. + */ +static void +fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, + uint16_t soft_id, uint8_t queue, uint32_t fdircmd, + uint32_t fdirhash) +{ + u32 fdirport, fdirvlan; + + /* record the source address (big-endian) */ + if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) { + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), input->formatted.src_ip[0]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), input->formatted.src_ip[1]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.src_ip[2]); + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[3]); + } + else { + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); + } + + /* record the first 32 bits of the destination address (big-endian) */ + IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); + + /* record source and destination port (little-endian)*/ + fdirport = IXGBE_NTOHS(input->formatted.dst_port); + fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; + fdirport |= IXGBE_NTOHS(input->formatted.src_port); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); + + /* record vlan (little-endian) and flex_bytes(big-endian) */ + fdirvlan = input->formatted.flex_bytes; + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; + fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); + + /* + * flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + IXGBE_WRITE_FLUSH(hw); + + /* configure FDIRCMD register */ + fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); +} + +/* + * Adds or updates a perfect filter. + * + * dev: ethernet device to add filter to + * fdir_filter: filter details + * soft_id: software index for the filters + * queue: queue index to direct traffic to + * drop: non-zero if packets should be sent to the drop queue + * update: 0 to add a new filter, otherwise update existing. + */ +static int +fdir_add_update_perfect_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint16_t soft_id, + uint8_t queue, int drop, int update) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0; + uint32_t fdirhash; + union ixgbe_atr_input input; + int err; + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + err = fdir_filter_to_atr_input(fdir_filter, &input); + if (err) + return err; + + if (drop) { + queue = dev->data->dev_conf.fdir_conf.drop_queue; + fdircmd_flags |= IXGBE_FDIRCMD_DROP; + } + + fdirhash = atr_compute_perfect_hash_82599(&input, + dev->data->dev_conf.fdir_conf.pballoc); + + fdir_write_perfect_filter_82599(hw, &input, soft_id, queue, + fdircmd_flags, fdirhash); + return 0; +} + +int +ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint16_t soft_id, + uint8_t queue, uint8_t drop) +{ + PMD_INIT_FUNC_TRACE(); + return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue, + drop, 0); +} + +int +ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, uint16_t soft_id, + uint8_t queue, uint8_t drop) +{ + PMD_INIT_FUNC_TRACE(); + return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue, + drop, 1); +} + +int +ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev, + struct rte_fdir_filter *fdir_filter, + uint16_t soft_id) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + union ixgbe_atr_input input; + uint32_t fdirhash; + int err; + + PMD_INIT_FUNC_TRACE(); + + if (hw->mac.type != ixgbe_mac_82599EB) + return -ENOSYS; + + err = fdir_filter_to_atr_input(fdir_filter, &input); + if (err) + return err; + + /* configure FDIRHASH register */ + fdirhash = atr_compute_perfect_hash_82599(&input, + dev->data->dev_conf.fdir_conf.pballoc); + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + + return fdir_erase_filter_82599(hw, &input, fdirhash); +} + +void +ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir *fdir) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_hw_fdir_info *info = + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + uint32_t reg; + + if (hw->mac.type != ixgbe_mac_82599EB) + return; + + /* Get the information from registers */ + reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE); + info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >> + IXGBE_FDIRFREE_COLL_SHIFT); + info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >> + IXGBE_FDIRFREE_FREE_SHIFT); + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >> + IXGBE_FDIRLEN_MAXHASH_SHIFT); + info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >> + IXGBE_FDIRLEN_MAXLEN_SHIFT); + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); + info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >> + IXGBE_FDIRUSTAT_REMOVE_SHIFT; + info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >> + IXGBE_FDIRUSTAT_ADD_SHIFT; + + reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF; + info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >> + IXGBE_FDIRFSTAT_FREMOVE_SHIFT; + info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >> + IXGBE_FDIRFSTAT_FADD_SHIFT; + + /* Copy the new information in the fdir parameter */ + fdir->collision = info->collision; + fdir->free = info->free; + fdir->maxhash = info->maxhash; + fdir->maxlen = info->maxlen; + fdir->remove = info->remove; + fdir->add = info->add; + fdir->f_remove = info->f_remove; + fdir->f_add = info->f_add; +} diff --git a/lib/librte_pmd_ixgbe/ixgbe_logs.h b/lib/librte_pmd_ixgbe/ixgbe_logs.h new file mode 100644 index 0000000000..e8929cca66 --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe_logs.h @@ -0,0 +1,76 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _IXGBE_LOGS_H_ +#define _IXGBE_LOGS_H_ + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") +#else +#define PMD_INIT_LOG(level, fmt, args...) do { } while(0) +#define PMD_INIT_FUNC_TRACE() do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE +#define PMD_TX_FREE_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0) +#endif + +#ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while(0) +#endif + +#endif /* _IXGBE_LOGS_H_ */ diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c new file mode 100644 index 0000000000..aa698a390f --- /dev/null +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c @@ -0,0 +1,2445 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ixgbe_logs.h" +#include "ixgbe/ixgbe_api.h" +#include "ixgbe/ixgbe_vf.h" +#include "ixgbe_ethdev.h" + +static inline struct rte_mbuf * +rte_rxmbuf_alloc(struct rte_mempool *mp) +{ + struct rte_mbuf *m; + + m = __rte_mbuf_raw_alloc(mp); + __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0); + return (m); +} + +#define RTE_MBUF_DATA_DMA_ADDR(mb) \ + (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \ + (char *)(mb)->buf_addr)) + +#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ + (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) + +/** + * Structure associated with each descriptor of the RX ring of a RX queue. + */ +struct igb_rx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */ +}; + +/** + * Structure associated with each descriptor of the TX ring of a TX queue. + */ +struct igb_tx_entry { + struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */ + uint16_t next_id; /**< Index of next descriptor in ring. */ + uint16_t last_id; /**< Index of last scattered descriptor. */ +}; + +/** + * Structure associated with each RX queue. + */ +struct igb_rx_queue { + struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */ + volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */ + uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */ + volatile uint32_t *rdt_reg_addr; /**< RDT register address. */ + struct igb_rx_entry *sw_ring; /**< address of RX software ring. */ + struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */ + struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */ + uint16_t nb_rx_desc; /**< number of RX descriptors. */ + uint16_t rx_tail; /**< current value of RDT register. */ + uint16_t nb_rx_hold; /**< number of held free RX desc. */ + uint16_t rx_free_thresh; /**< max free RX desc to hold. */ + uint16_t queue_id; /**< RX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ +}; + +/** + * IXGBE CTX Constants + */ +enum ixgbe_advctx_num { + IXGBE_CTX_0 = 0, /**< CTX0 */ + IXGBE_CTX_1 = 1, /**< CTX1 */ + IXGBE_CTX_NUM = 2, /**< CTX NUMBER */ +}; + +/** + * Structure to check if new context need be built + */ +struct ixgbe_advctx_info { + uint16_t flags; /**< ol_flags for context build. */ + uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */ + uint32_t vlan_macip_lens; /**< vlan, mac ip length. */ +}; + +/** + * Structure associated with each TX queue. + */ +struct igb_tx_queue { + /** TX ring virtual address. */ + volatile union ixgbe_adv_tx_desc *tx_ring; + uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */ + struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */ + volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */ + uint16_t nb_tx_desc; /**< number of TX descriptors. */ + uint16_t tx_tail; /**< current value of TDT reg. */ + uint16_t tx_free_thresh;/**< minimum TX before freeing. */ + /** Number of TX descriptors to use before RS bit is set. */ + uint16_t tx_rs_thresh; + /** Number of TX descriptors used since RS bit was set. */ + uint16_t nb_tx_used; + /** Index to last TX descriptor to have been cleaned. */ + uint16_t last_desc_cleaned; + /** Total number of TX descriptors ready to be allocated. */ + uint16_t nb_tx_free; + uint16_t queue_id; /**< TX queue index. */ + uint8_t port_id; /**< Device port identifier. */ + uint8_t pthresh; /**< Prefetch threshold register. */ + uint8_t hthresh; /**< Host threshold register. */ + uint8_t wthresh; /**< Write-back threshold reg. */ + uint32_t ctx_curr; /**< Hardware context states. */ + /** Hardware context0 history. */ + struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM]; +}; + + +#if 1 +#define RTE_PMD_USE_PREFETCH +#endif + +#ifdef RTE_PMD_USE_PREFETCH +/* + * Prefetch a cache line into all cache levels. + */ +#define rte_ixgbe_prefetch(p) rte_prefetch0(p) +#else +#define rte_ixgbe_prefetch(p) do {} while(0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while(0) +#endif + +/********************************************************************* + * + * TX functions + * + **********************************************************************/ +static inline void +ixgbe_set_xmit_ctx(struct igb_tx_queue* txq, + volatile struct ixgbe_adv_tx_context_desc *ctx_txd, + uint16_t ol_flags, uint32_t vlan_macip_lens) +{ + uint32_t type_tucmd_mlhl; + uint32_t mss_l4len_idx; + uint32_t ctx_idx; + uint32_t cmp_mask; + + ctx_idx = txq->ctx_curr; + cmp_mask = 0; + type_tucmd_mlhl = 0; + + if (ol_flags & PKT_TX_VLAN_PKT) { + cmp_mask |= TX_VLAN_CMP_MASK; + } + + if (ol_flags & PKT_TX_IP_CKSUM) { + type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4; + cmp_mask |= TX_MAC_LEN_CMP_MASK; + } + + /* Specify which HW CTX to upload. */ + mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT); + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_UDP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + case PKT_TX_TCP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + case PKT_TX_SCTP_CKSUM: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT; + cmp_mask |= TX_MACIP_LEN_CMP_MASK; + break; + default: + type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV | + IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT; + break; + } + + txq->ctx_cache[ctx_idx].flags = ol_flags; + txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask; + txq->ctx_cache[ctx_idx].vlan_macip_lens = vlan_macip_lens & cmp_mask; + + ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl); + ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens); + ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx); + ctx_txd->seqnum_seed = 0; +} + +/* + * Check which hardware context can be used. Use the existing match + * or create a new context descriptor. + */ +static inline uint32_t +what_advctx_update(struct igb_tx_queue *txq, uint16_t flags, + uint32_t vlan_macip_lens) +{ + /* If match with the current used context */ + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens == + (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) { + return txq->ctx_curr; + } + + /* What if match with the next context */ + txq->ctx_curr ^= 1; + if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && + (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens == + (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) { + return txq->ctx_curr; + } + + /* Mismatch, use the previous context */ + return (IXGBE_CTX_NUM); +} + +static inline uint32_t +tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags) +{ + static const uint32_t l4_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_TXSM}; + static const uint32_t l3_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_IXSM}; + uint32_t tmp; + + tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM]; + tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0]; + return tmp; +} + +static inline uint32_t +tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags) +{ + static const uint32_t vlan_cmd[2] = {0, IXGBE_ADVTXD_DCMD_VLE}; + return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0]; +} + +/* Default RS bit threshold values */ +#ifndef DEFAULT_TX_RS_THRESH +#define DEFAULT_TX_RS_THRESH 32 +#endif +#ifndef DEFAULT_TX_FREE_THRESH +#define DEFAULT_TX_FREE_THRESH 32 +#endif + +/* Reset transmit descriptors after they have been used */ +static inline int +ixgbe_xmit_cleanup(struct igb_tx_queue *txq) +{ + struct igb_tx_entry *sw_ring = txq->sw_ring; + volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring; + uint16_t last_desc_cleaned = txq->last_desc_cleaned; + uint16_t nb_tx_desc = txq->nb_tx_desc; + uint16_t desc_to_clean_to; + uint16_t nb_tx_to_clean; + + /* Determine the last descriptor needing to be cleaned */ + desc_to_clean_to = last_desc_cleaned + txq->tx_rs_thresh; + if (desc_to_clean_to >= nb_tx_desc) + desc_to_clean_to = desc_to_clean_to - nb_tx_desc; + + /* Check to make sure the last descriptor to clean is done */ + desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; + if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD)) + { + PMD_TX_FREE_LOG(DEBUG, + "TX descriptor %4u is not done" + "(port=%d queue=%d)", + desc_to_clean_to, + txq->port_id, txq->queue_id); + /* Failed to clean any descriptors, better luck next time */ + return -(1); + } + + /* Figure out how many descriptors will be cleaned */ + if (last_desc_cleaned > desc_to_clean_to) + nb_tx_to_clean = ((nb_tx_desc - last_desc_cleaned) + + desc_to_clean_to); + else + nb_tx_to_clean = desc_to_clean_to - last_desc_cleaned; + + PMD_TX_FREE_LOG(DEBUG, + "Cleaning %4u TX descriptors: %4u to %4u " + "(port=%d queue=%d)", + nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, + txq->port_id, txq->queue_id); + + /* + * The last descriptor to clean is done, so that means all the + * descriptors from the last descriptor that was cleaned + * up to the last descriptor with the RS bit set + * are done. Only reset the threshold descriptor. + */ + txr[desc_to_clean_to].wb.status = 0; + + /* Update the txq to reflect the last descriptor that was cleaned */ + txq->last_desc_cleaned = desc_to_clean_to; + txq->nb_tx_free += nb_tx_to_clean; + + /* No Error */ + return (0); +} + +uint16_t +ixgbe_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct igb_tx_entry *sw_ring; + struct igb_tx_entry *txe, *txn; + volatile union ixgbe_adv_tx_desc *txr; + volatile union ixgbe_adv_tx_desc *txd; + struct rte_mbuf *tx_pkt; + struct rte_mbuf *m_seg; + uint64_t buf_dma_addr; + uint32_t olinfo_status; + uint32_t cmd_type_len; + uint32_t pkt_len; + uint16_t slen; + uint16_t ol_flags; + uint16_t tx_id; + uint16_t tx_last; + uint16_t nb_tx; + uint16_t nb_used; + uint16_t tx_ol_req; + uint32_t vlan_macip_lens; + uint32_t ctx; + uint32_t new_ctx; + + sw_ring = txq->sw_ring; + txr = txq->tx_ring; + tx_id = txq->tx_tail; + txe = &sw_ring[tx_id]; + + /* Determine if the descriptor ring needs to be cleaned. */ + if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) { + ixgbe_xmit_cleanup(txq); + } + + /* TX loop */ + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + new_ctx = 0; + tx_pkt = *tx_pkts++; + pkt_len = tx_pkt->pkt.pkt_len; + + RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf); + + /* + * Determine how many (if any) context descriptors + * are needed for offload functionality. + */ + ol_flags = tx_pkt->ol_flags; + vlan_macip_lens = tx_pkt->pkt.vlan_tci << 16 | + tx_pkt->pkt.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT | + tx_pkt->pkt.l3_len; + + /* If hardware offload required */ + tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK; + if (tx_ol_req) { + /* If new context need be built or reuse the exist ctx. */ + ctx = what_advctx_update(txq, tx_ol_req, vlan_macip_lens); + /* Only allocate context descriptor if required*/ + new_ctx = (ctx == IXGBE_CTX_NUM); + ctx = txq->ctx_curr; + } + + /* + * Keep track of how many descriptors are used this loop + * This will always be the number of segments + the number of + * Context descriptors required to transmit the packet + */ + nb_used = tx_pkt->pkt.nb_segs + new_ctx; + + /* + * The number of descriptors that must be allocated for a + * packet is the number of segments of that packet, plus 1 + * Context Descriptor for the hardware offload, if any. + * Determine the last TX descriptor to allocate in the TX ring + * for the packet, starting from the current position (tx_id) + * in the ring. + */ + tx_last = (uint16_t) (tx_id + nb_used - 1); + + /* Circular ring */ + if (tx_last >= txq->nb_tx_desc) + tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); + + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" + " tx_first=%u tx_last=%u\n", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); + + /* + * Make sure there are enough TX descriptors available to + * transmit the entire packet. + * nb_used better be less than or equal to txq->tx_rs_thresh + */ + if (nb_used > txq->nb_tx_free) { + PMD_TX_FREE_LOG(DEBUG, + "Not enough free TX descriptors " + "nb_used=%4u nb_free=%4u " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->port_id, txq->queue_id); + + if (ixgbe_xmit_cleanup(txq) != 0) { + /* Could not clean any descriptors */ + if (nb_tx == 0) + return (0); + goto end_of_tx; + } + + /* nb_used better be <= txq->tx_rs_thresh */ + if (unlikely(nb_used > txq->tx_rs_thresh)) { + PMD_TX_FREE_LOG(DEBUG, + "The number of descriptors needed to " + "transmit the packet exceeds the " + "RS bit threshold. This will impact " + "performance." + "nb_used=%4u nb_free=%4u " + "tx_rs_thresh=%4u. " + "(port=%d queue=%d)", + nb_used, txq->nb_tx_free, + txq->tx_rs_thresh, + txq->port_id, txq->queue_id); + /* + * Loop here until there are enough TX + * descriptors or until the ring cannot be + * cleaned. + */ + while (nb_used > txq->nb_tx_free) { + if (ixgbe_xmit_cleanup(txq) != 0) { + /* + * Could not clean any + * descriptors + */ + if (nb_tx == 0) + return (0); + goto end_of_tx; + } + } + } + } + + /* + * By now there are enough free TX descriptors to transmit + * the packet. + */ + + /* + * Set common flags of all TX Data Descriptors. + * + * The following bits must be set in all Data Descriptors: + * - IXGBE_ADVTXD_DTYP_DATA + * - IXGBE_ADVTXD_DCMD_DEXT + * + * The following bits must be set in the first Data Descriptor + * and are ignored in the other ones: + * - IXGBE_ADVTXD_DCMD_IFCS + * - IXGBE_ADVTXD_MAC_1588 + * - IXGBE_ADVTXD_DCMD_VLE + * + * The following bits must only be set in the last Data + * Descriptor: + * - IXGBE_TXD_CMD_EOP + * + * The following bits can be set in any Data Descriptor, but + * are only set in the last Data Descriptor: + * - IXGBE_TXD_CMD_RS + */ + cmd_type_len = IXGBE_ADVTXD_DTYP_DATA | + IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; + olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT); +#ifdef RTE_LIBRTE_IEEE1588 + if (ol_flags & PKT_TX_IEEE1588_TMST) + cmd_type_len |= IXGBE_ADVTXD_MAC_1588; +#endif + + if (tx_ol_req) { + /* + * Setup the TX Advanced Context Descriptor if required + */ + if (new_ctx) { + volatile struct ixgbe_adv_tx_context_desc * + ctx_txd; + + ctx_txd = (volatile struct + ixgbe_adv_tx_context_desc *) + &txr[tx_id]; + + txn = &sw_ring[txe->next_id]; + RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf); + + if (txe->mbuf != NULL) { + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = NULL; + } + + ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, + vlan_macip_lens); + + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + } + + /* + * Setup the TX Advanced Data Descriptor, + * This path will go through + * whatever new/reuse the context descriptor + */ + cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags); + olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags); + olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT; + } + + m_seg = tx_pkt; + do { + txd = &txr[tx_id]; + txn = &sw_ring[txe->next_id]; + + if (txe->mbuf != NULL) + rte_pktmbuf_free_seg(txe->mbuf); + txe->mbuf = m_seg; + + /* + * Set up Transmit Data Descriptor. + */ + slen = m_seg->pkt.data_len; + buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); + txd->read.buffer_addr = + rte_cpu_to_le_64(buf_dma_addr); + txd->read.cmd_type_len = + rte_cpu_to_le_32(cmd_type_len | slen); + txd->read.olinfo_status = + rte_cpu_to_le_32(olinfo_status); + txe->last_id = tx_last; + tx_id = txe->next_id; + txe = txn; + m_seg = m_seg->pkt.next; + } while (m_seg != NULL); + + /* + * The last packet data descriptor needs End Of Packet (EOP) + */ + cmd_type_len |= IXGBE_TXD_CMD_EOP; + txq->nb_tx_used += nb_used; + txq->nb_tx_free -= nb_used; + + /* Set RS bit only on threshold packets' last descriptor */ + if (txq->nb_tx_used >= txq->tx_rs_thresh) { + PMD_TX_FREE_LOG(DEBUG, + "Setting RS bit on TXD id=" + "%4u (port=%d queue=%d)", + tx_last, txq->port_id, txq->queue_id); + + cmd_type_len |= IXGBE_TXD_CMD_RS; + + /* Update txq RS bit counters */ + txq->nb_tx_used = 0; + } + txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len); + } +end_of_tx: + rte_wmb(); + + /* + * Set the Transmit Descriptor Tail (TDT) + */ + PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", + (unsigned) txq->port_id, (unsigned) txq->queue_id, + (unsigned) tx_id, (unsigned) nb_tx); + IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + txq->tx_tail = tx_id; + + return (nb_tx); +} + +/********************************************************************* + * + * RX functions + * + **********************************************************************/ +static inline uint16_t +rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs) +{ + uint16_t pkt_flags; + + static uint16_t ip_pkt_types_map[16] = { + 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT, + PKT_RX_IPV6_HDR, 0, 0, 0, + PKT_RX_IPV6_HDR_EXT, 0, 0, 0, + PKT_RX_IPV6_HDR_EXT, 0, 0, 0, + }; + + static uint16_t ip_rss_types_map[16] = { + 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, 0, 0, + 0, 0, 0, PKT_RX_FDIR, + }; + +#ifdef RTE_LIBRTE_IEEE1588 + static uint32_t ip_pkt_etqf_map[8] = { + 0, 0, 0, PKT_RX_IEEE1588_PTP, + 0, 0, 0, 0, + }; + + pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? + ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] : + ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]); +#else + pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 : + ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]); + +#endif + return (pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]); +} + +static inline uint16_t +rx_desc_status_to_pkt_flags(uint32_t rx_status) +{ + uint16_t pkt_flags; + + /* + * Check if VLAN present only. + * Do not check whether L3/L4 rx checksum done by NIC or not, + * That can be found from rte_eth_rxmode.hw_ip_checksum flag + */ + pkt_flags = (uint16_t) (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + +#ifdef RTE_LIBRTE_IEEE1588 + if (rx_status & IXGBE_RXD_STAT_TMST) + pkt_flags = (pkt_flags | PKT_RX_IEEE1588_TMST); +#endif + return pkt_flags; +} + +static inline uint16_t +rx_desc_error_to_pkt_flags(uint32_t rx_status) +{ + /* + * Bit 31: IPE, IPv4 checksum error + * Bit 30: L4I, L4I integrity error + */ + static uint16_t error_to_pkt_flags_map[4] = { + 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD + }; + return error_to_pkt_flags_map[(rx_status >> + IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; +} + +uint16_t +ixgbe_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile union ixgbe_adv_rx_desc *rx_ring; + volatile union ixgbe_adv_rx_desc *rxdp; + struct igb_rx_entry *sw_ring; + struct igb_rx_entry *rxe; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union ixgbe_adv_rx_desc rxd; + uint64_t dma_addr; + uint32_t staterr; + uint32_t hlen_type_rss; + uint16_t pkt_len; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + while (nb_rx < nb_pkts) { + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + break; + rxd = *rxdp; + + /* + * End of packet. + * + * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet + * is likely to be invalid and to be dropped by the various + * validation checks performed by the network stack. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy do not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "ext_err_stat=0x%08x pkt_len=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u\n", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_ixgbe_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_ixgbe_prefetch(&rx_ring[rx_id]); + rte_ixgbe_prefetch(&sw_ring[rx_id]); + } + + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma_addr = + rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rxdp->read.hdr_addr = dma_addr; + rxdp->read.pkt_addr = dma_addr; + + /* + * Initialize the returned mbuf. + * 1) setup generic mbuf fields: + * - number of segments, + * - next segment, + * - packet length, + * - RX port identifier. + * 2) integrate hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) - + rxq->crc_len); + rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch(rxm->pkt.data); + rxm->pkt.nb_segs = 1; + rxm->pkt.next = NULL; + rxm->pkt.pkt_len = pkt_len; + rxm->pkt.data_len = pkt_len; + rxm->pkt.in_port = rxq->port_id; + + hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ + rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); + + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); + pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr)); + pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr)); + rxm->ol_flags = pkt_flags; + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss; + else if (pkt_flags & PKT_RX_FDIR) { + rxm->pkt.hash.fdir.hash = + (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) + & IXGBE_ATR_HASH_MASK); + rxm->pkt.hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id; + } + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = rxm; + } + rxq->rx_tail = rx_id; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return (nb_rx); +} + +uint16_t +ixgbe_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + volatile union ixgbe_adv_rx_desc *rx_ring; + volatile union ixgbe_adv_rx_desc *rxdp; + struct igb_rx_entry *sw_ring; + struct igb_rx_entry *rxe; + struct rte_mbuf *first_seg; + struct rte_mbuf *last_seg; + struct rte_mbuf *rxm; + struct rte_mbuf *nmb; + union ixgbe_adv_rx_desc rxd; + uint64_t dma; /* Physical address of mbuf data buffer */ + uint32_t staterr; + uint32_t hlen_type_rss; + uint16_t rx_id; + uint16_t nb_rx; + uint16_t nb_hold; + uint16_t data_len; + uint16_t pkt_flags; + + nb_rx = 0; + nb_hold = 0; + rx_id = rxq->rx_tail; + rx_ring = rxq->rx_ring; + sw_ring = rxq->sw_ring; + + /* + * Retrieve RX context of current packet, if any. + */ + first_seg = rxq->pkt_first_seg; + last_seg = rxq->pkt_last_seg; + + while (nb_rx < nb_pkts) { + next_desc: + /* + * The order of operations here is important as the DD status + * bit must not be read after any other descriptor fields. + * rx_ring and rxdp are pointing to volatile data so the order + * of accesses cannot be reordered by the compiler. If they were + * not volatile, they could be reordered which could lead to + * using invalid descriptor fields when read from rxd. + */ + rxdp = &rx_ring[rx_id]; + staterr = rxdp->wb.upper.status_error; + if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + break; + rxd = *rxdp; + + /* + * Descriptor done. + * + * Allocate a new mbuf to replenish the RX ring descriptor. + * If the allocation fails: + * - arrange for that RX descriptor to be the first one + * being parsed the next time the receive function is + * invoked [on the same queue]. + * + * - Stop parsing the RX ring and return immediately. + * + * This policy does not drop the packet received in the RX + * descriptor for which the allocation of a new mbuf failed. + * Thus, it allows that packet to be later retrieved if + * mbuf have been freed in the mean time. + * As a side effect, holding RX descriptors instead of + * systematically giving them back to the NIC may lead to + * RX ring exhaustion situations. + * However, the NIC can gracefully prevent such situations + * to happen by sending specific "back-pressure" flow control + * frames to its peer(s). + */ + PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " + "staterr=0x%x data_len=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) staterr, + (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); + + nmb = rte_rxmbuf_alloc(rxq->mb_pool); + if (nmb == NULL) { + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " + "queue_id=%u\n", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; + break; + } + + nb_hold++; + rxe = &sw_ring[rx_id]; + rx_id++; + if (rx_id == rxq->nb_rx_desc) + rx_id = 0; + + /* Prefetch next mbuf while processing current one. */ + rte_ixgbe_prefetch(sw_ring[rx_id].mbuf); + + /* + * When next RX descriptor is on a cache-line boundary, + * prefetch the next 4 RX descriptors and the next 8 pointers + * to mbufs. + */ + if ((rx_id & 0x3) == 0) { + rte_ixgbe_prefetch(&rx_ring[rx_id]); + rte_ixgbe_prefetch(&sw_ring[rx_id]); + } + + /* + * Update RX descriptor with the physical address of the new + * data buffer of the new allocated mbuf. + */ + rxm = rxe->mbuf; + rxe->mbuf = nmb; + dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb)); + rxdp->read.hdr_addr = dma; + rxdp->read.pkt_addr = dma; + + /* + * Set data length & data buffer address of mbuf. + */ + data_len = rte_le_to_cpu_16(rxd.wb.upper.length); + rxm->pkt.data_len = data_len; + rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + + /* + * If this is the first buffer of the received packet, + * set the pointer to the first mbuf of the packet and + * initialize its context. + * Otherwise, update the total length and the number of segments + * of the current scattered packet, and update the pointer to + * the last mbuf of the current packet. + */ + if (first_seg == NULL) { + first_seg = rxm; + first_seg->pkt.pkt_len = data_len; + first_seg->pkt.nb_segs = 1; + } else { + first_seg->pkt.pkt_len = (uint16_t)(first_seg->pkt.pkt_len + + data_len); + first_seg->pkt.nb_segs++; + last_seg->pkt.next = rxm; + } + + /* + * If this is not the last buffer of the received packet, + * update the pointer to the last mbuf of the current scattered + * packet and continue to parse the RX ring. + */ + if (! (staterr & IXGBE_RXDADV_STAT_EOP)) { + last_seg = rxm; + goto next_desc; + } + + /* + * This is the last buffer of the received packet. + * If the CRC is not stripped by the hardware: + * - Subtract the CRC length from the total packet length. + * - If the last buffer only contains the whole CRC or a part + * of it, free the mbuf associated to the last buffer. + * If part of the CRC is also contained in the previous + * mbuf, subtract the length of that CRC part from the + * data length of the previous mbuf. + */ + rxm->pkt.next = NULL; + if (unlikely(rxq->crc_len > 0)) { + first_seg->pkt.pkt_len -= ETHER_CRC_LEN; + if (data_len <= ETHER_CRC_LEN) { + rte_pktmbuf_free_seg(rxm); + first_seg->pkt.nb_segs--; + last_seg->pkt.data_len = (uint16_t) + (last_seg->pkt.data_len - + (ETHER_CRC_LEN - data_len)); + last_seg->pkt.next = NULL; + } else + rxm->pkt.data_len = + (uint16_t) (data_len - ETHER_CRC_LEN); + } + + /* + * Initialize the first mbuf of the returned packet: + * - RX port identifier, + * - hardware offload data, if any: + * - RSS flag & hash, + * - IP checksum flag, + * - VLAN TCI, if any, + * - error flags. + */ + first_seg->pkt.in_port = rxq->port_id; + + /* + * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is + * set in the pkt_flags field. + */ + first_seg->pkt.vlan_tci = + rte_le_to_cpu_16(rxd.wb.upper.vlan); + hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data); + pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss); + pkt_flags = (pkt_flags | + rx_desc_status_to_pkt_flags(staterr)); + pkt_flags = (pkt_flags | + rx_desc_error_to_pkt_flags(staterr)); + first_seg->ol_flags = pkt_flags; + + if (likely(pkt_flags & PKT_RX_RSS_HASH)) + first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss; + else if (pkt_flags & PKT_RX_FDIR) { + first_seg->pkt.hash.fdir.hash = + (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum) + & IXGBE_ATR_HASH_MASK); + first_seg->pkt.hash.fdir.id = + rxd.wb.lower.hi_dword.csum_ip.ip_id; + } + + /* Prefetch data of first segment, if configured to do so. */ + rte_packet_prefetch(first_seg->pkt.data); + + /* + * Store the mbuf address into the next entry of the array + * of returned packets. + */ + rx_pkts[nb_rx++] = first_seg; + + /* + * Setup receipt context for a new packet. + */ + first_seg = NULL; + } + + /* + * Record index of the next RX descriptor to probe. + */ + rxq->rx_tail = rx_id; + + /* + * Save receive context. + */ + rxq->pkt_first_seg = first_seg; + rxq->pkt_last_seg = last_seg; + + /* + * If the number of free RX descriptors is greater than the RX free + * threshold of the queue, advance the Receive Descriptor Tail (RDT) + * register. + * Update the RDT with the value of the last processed RX descriptor + * minus 1, to guarantee that the RDT register is never equal to the + * RDH register, which creates a "full" ring situtation from the + * hardware point of view... + */ + nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); + if (nb_hold > rxq->rx_free_thresh) { + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " + "nb_hold=%u nb_rx=%u\n", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); + rx_id = (uint16_t) ((rx_id == 0) ? + (rxq->nb_rx_desc - 1) : (rx_id - 1)); + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); + nb_hold = 0; + } + rxq->nb_rx_hold = nb_hold; + return (nb_rx); +} + +/********************************************************************* + * + * Queue management functions + * + **********************************************************************/ + +/* + * Rings setup and release. + * + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will + * also optimize cache line size effect. H/W supports up to cache line size 128. + */ +#define IXGBE_ALIGN 128 + +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * descriptors should meet the following condition: + * (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0 + */ +#define IXGBE_MIN_RING_DESC 64 +#define IXGBE_MAX_RING_DESC 4096 + +/* + * Create memzone for HW rings. malloc can't be used as the physical address is + * needed. If the memzone is already created, then this function returns a ptr + * to the old one. + */ +static const struct rte_memzone * +ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, + uint16_t queue_id, uint32_t ring_size, int socket_id) +{ + char z_name[RTE_MEMZONE_NAMESIZE]; + const struct rte_memzone *mz; + + rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + dev->driver->pci_drv.name, ring_name, + dev->data->port_id, queue_id); + + mz = rte_memzone_lookup(z_name); + if (mz) + return mz; + + return rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size, + socket_id, 0, IXGBE_ALIGN); +} + +static void +ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq) +{ + unsigned i; + + if (txq->sw_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +ixgbe_tx_queue_release(struct igb_tx_queue *txq) +{ + ixgbe_tx_queue_release_mbufs(txq); + rte_free(txq->sw_ring); + rte_free(txq); +} + +int +ixgbe_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + uint16_t old_nb_queues = dev->data->nb_tx_queues; + struct igb_tx_queue **txq; + unsigned i; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->tx_queues == NULL) { + dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues", + sizeof(struct igb_tx_queue *) * nb_queues, + CACHE_LINE_SIZE); + if (dev->data->tx_queues == NULL) { + dev->data->nb_tx_queues = 0; + return -1; + } + } + else { + for (i = nb_queues; i < old_nb_queues; i++) + ixgbe_tx_queue_release(dev->data->tx_queues[i]); + txq = rte_realloc(dev->data->tx_queues, + sizeof(struct igb_tx_queue *) * nb_queues, + CACHE_LINE_SIZE); + if (txq == NULL) + return -1; + else + dev->data->tx_queues = txq; + if (nb_queues > old_nb_queues) + memset(&dev->data->tx_queues[old_nb_queues], 0, + sizeof(struct igb_tx_queue *) * + (nb_queues - old_nb_queues)); + } + dev->data->nb_tx_queues = nb_queues; + return 0; +} + +/* (Re)set dynamic igb_tx_queue fields to defaults */ +static void +ixgbe_reset_tx_queue(struct igb_tx_queue *txq) +{ + struct igb_tx_entry *txe = txq->sw_ring; + uint16_t prev, i; + + /* Zero out HW ring memory */ + for (i = 0; i < sizeof(union ixgbe_adv_tx_desc) * txq->nb_tx_desc; i++) { + ((volatile char *)txq->tx_ring)[i] = 0; + } + + /* Initialize SW ring entries */ + prev = (uint16_t) (txq->nb_tx_desc - 1); + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; + txd->wb.status = IXGBE_TXD_STAT_DD; + txe[i].mbuf = NULL; + txe[i].last_id = i; + txe[prev].next_id = i; + prev = i; + } + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); + txq->ctx_curr = 0; + memset((void*)&txq->ctx_cache, 0, + IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); +} + +int +ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + const struct rte_memzone *tz; + struct igb_tx_queue *txq; + struct ixgbe_hw *hw; + uint16_t tx_rs_thresh, tx_free_thresh; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of transmit descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IXGBE_ALIGN. + */ + if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { + return -EINVAL; + } + + /* + * The following two parameters control the setting of the RS bit on + * transmit descriptors. + * TX descriptors will have their RS bit set after txq->tx_rs_thresh + * descriptors have been used. + * The TX descriptor ring will be cleaned after txq->tx_free_thresh + * descriptors are used or if the number of descriptors required + * to transmit a packet is greater than the number of free TX + * descriptors. + * The following constraints must be satisfied: + * tx_rs_thresh must be greater than 0. + * tx_rs_thresh must be less than the size of the ring minus 2. + * tx_rs_thresh must be less than or equal to tx_free_thresh. + * tx_free_thresh must be greater than 0. + * tx_free_thresh must be less than the size of the ring minus 3. + * One descriptor in the TX ring is used as a sentinel to avoid a + * H/W race condition, hence the maximum threshold constraints. + * When set to zero use default values. + */ + tx_rs_thresh = (tx_conf->tx_rs_thresh) ? + tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH; + tx_free_thresh = (tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH; + if (tx_rs_thresh >= (nb_desc - 2)) { + RTE_LOG(ERR, PMD, + "tx_rs_thresh must be less than the " + "number of TX descriptors minus 2. " + "(tx_rs_thresh=%u port=%d queue=%d)", + tx_rs_thresh, dev->data->port_id, queue_idx); + return -(EINVAL); + } + if (tx_free_thresh >= (nb_desc - 3)) { + RTE_LOG(ERR, PMD, + "tx_rs_thresh must be less than the " + "tx_free_thresh must be less than the " + "number of TX descriptors minus 3. " + "(tx_free_thresh=%u port=%d queue=%d)", + tx_free_thresh, dev->data->port_id, queue_idx); + return -(EINVAL); + } + if (tx_rs_thresh > tx_free_thresh) { + RTE_LOG(ERR, PMD, + "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. " + "(tx_free_thresh=%u tx_rs_thresh=%u " + "port=%d queue=%d)", + tx_free_thresh, tx_rs_thresh, + dev->data->port_id, queue_idx); + return -(EINVAL); + } + + /* + * If rs_bit_thresh is greater than 1, then TX WTHRESH should be + * set to 0. If WTHRESH is greater than zero, the RS bit is ignored + * by the NIC and all descriptors are written back after the NIC + * accumulates WTHRESH descriptors. + */ + if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) { + RTE_LOG(ERR, PMD, + "TX WTHRESH should be set to 0 if " + "tx_rs_thresh is greater than 1. " + "TX WTHRESH will be set to 0. " + "(tx_rs_thresh=%u port=%d queue=%d)", + tx_rs_thresh, + dev->data->port_id, queue_idx); + return -(EINVAL); + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->tx_queues[queue_idx] != NULL) + ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); + + /* First allocate the tx queue data structure */ + txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue), + CACHE_LINE_SIZE); + if (txq == NULL) + return (-ENOMEM); + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, + sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC, + socket_id); + if (tz == NULL) { + ixgbe_tx_queue_release(txq); + return (-ENOMEM); + } + + txq->nb_tx_desc = nb_desc; + txq->tx_rs_thresh = tx_rs_thresh; + txq->tx_free_thresh = tx_free_thresh; + txq->pthresh = tx_conf->tx_thresh.pthresh; + txq->hthresh = tx_conf->tx_thresh.hthresh; + txq->wthresh = tx_conf->tx_thresh.wthresh; + txq->queue_id = queue_idx; + txq->port_id = dev->data->port_id; + + /* + * Modification to set VFTDT for virtual function if vf is detected + */ + if (hw->mac.type == ixgbe_mac_82599_vf) + txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx)); + else + txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(queue_idx)); + + txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr; + txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr; + + /* Allocate software ring */ + txq->sw_ring = rte_zmalloc("txq->sw_ring", + sizeof(struct igb_tx_entry) * nb_desc, + CACHE_LINE_SIZE); + if (txq->sw_ring == NULL) { + ixgbe_tx_queue_release(txq); + return (-ENOMEM); + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + + ixgbe_reset_tx_queue(txq); + + dev->data->tx_queues[queue_idx] = txq; + + dev->tx_pkt_burst = ixgbe_xmit_pkts; + + return (0); +} + +static void +ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq) +{ + unsigned i; + + if (rxq->sw_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_ring[i].mbuf != NULL) { + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->sw_ring[i].mbuf = NULL; + } + } + } +} + +static void +ixgbe_rx_queue_release(struct igb_rx_queue *rxq) +{ + ixgbe_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_ring); + rte_free(rxq); +} + +int +ixgbe_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues) +{ + uint16_t old_nb_queues = dev->data->nb_rx_queues; + struct igb_rx_queue **rxq; + unsigned i; + + PMD_INIT_FUNC_TRACE(); + + if (dev->data->rx_queues == NULL) { + dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues", + sizeof(struct igb_rx_queue *) * nb_queues, + CACHE_LINE_SIZE); + if (dev->data->rx_queues == NULL) { + dev->data->nb_rx_queues = 0; + return -ENOMEM; + } + } + else { + for (i = nb_queues; i < old_nb_queues; i++) + ixgbe_rx_queue_release(dev->data->rx_queues[i]); + rxq = rte_realloc(dev->data->rx_queues, + sizeof(struct igb_rx_queue *) * nb_queues, + CACHE_LINE_SIZE); + if (rxq == NULL) + return -ENOMEM; + else + dev->data->rx_queues = rxq; + if (nb_queues > old_nb_queues) + memset(&dev->data->rx_queues[old_nb_queues], 0, + sizeof(struct igb_rx_queue *) * + (nb_queues - old_nb_queues)); + } + dev->data->nb_rx_queues = nb_queues; + return 0; +} + +/* (Re)set dynamic igb_rx_queue fields to defaults */ +static void +ixgbe_reset_rx_queue(struct igb_rx_queue *rxq) +{ + unsigned i; + + /* Zero out HW ring memory */ + for (i = 0; i < rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc); i++) { + ((volatile char *)rxq->rx_ring)[i] = 0; + } + + rxq->rx_tail = 0; + rxq->nb_rx_hold = 0; + rxq->pkt_first_seg = NULL; + rxq->pkt_last_seg = NULL; +} + +int +ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + const struct rte_memzone *rz; + struct igb_rx_queue *rxq; + struct ixgbe_hw *hw; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Validate number of receive descriptors. + * It must not exceed hardware maximum, and must be multiple + * of IXGBE_ALIGN. + */ + if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 || + (nb_desc > IXGBE_MAX_RING_DESC) || + (nb_desc < IXGBE_MIN_RING_DESC)) { + return (-EINVAL); + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->rx_queues[queue_idx] != NULL) + ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); + + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue), + CACHE_LINE_SIZE); + if (rxq == NULL) + return (-ENOMEM); + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : + ETHER_CRC_LEN); + + /* + * Allocate TX ring hardware descriptors. A memzone large enough to + * handle the maximum ring size is allocated in order to allow for + * resizing in later calls to the queue setup function. + */ + rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, + IXGBE_MAX_RING_DESC * sizeof(union ixgbe_adv_rx_desc), + socket_id); + if (rz == NULL) { + ixgbe_rx_queue_release(rxq); + return (-ENOMEM); + } + /* + * Modified to setup VFRDT for Virtual Function + */ + if (hw->mac.type == ixgbe_mac_82599_vf) + rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx)); + else + rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(queue_idx)); + + rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr; + rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr; + + /* Allocate software ring */ + rxq->sw_ring = rte_zmalloc("rxq->sw_ring", + sizeof(struct igb_rx_entry) * nb_desc, + CACHE_LINE_SIZE); + if (rxq->sw_ring == NULL) { + ixgbe_rx_queue_release(rxq); + return (-ENOMEM); + } + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + + dev->data->rx_queues[queue_idx] = rxq; + + ixgbe_reset_rx_queue(rxq); + + return 0; +} + +void +ixgbe_dev_clear_queues(struct rte_eth_dev *dev) +{ + unsigned i; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct igb_tx_queue *txq = dev->data->tx_queues[i]; + ixgbe_tx_queue_release_mbufs(txq); + ixgbe_reset_tx_queue(txq); + } + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct igb_rx_queue *rxq = dev->data->rx_queues[i]; + ixgbe_rx_queue_release_mbufs(rxq); + ixgbe_reset_rx_queue(rxq); + } +} + +/********************************************************************* + * + * Device RX/TX init functions + * + **********************************************************************/ + +/** + * Receive Side Scaling (RSS) + * See section 7.1.2.8 in the following document: + * "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009 + * + * Principles: + * The source and destination IP addresses of the IP header and the source + * and destination ports of TCP/UDP headers, if any, of received packets are + * hashed against a configurable random key to compute a 32-bit RSS hash result. + * The seven (7) LSBs of the 32-bit hash result are used as an index into a + * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit + * RSS output index which is used as the RX queue index where to store the + * received packets. + * The following output is supplied in the RX write-back descriptor: + * - 32-bit result of the Microsoft RSS hash function, + * - 4-bit RSS type field. + */ + +/* + * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet. + * Used as the default key. + */ +static uint8_t rss_intel_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA, +}; + +static void +ixgbe_rss_disable(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + uint32_t mrqc; + + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + mrqc &= ~IXGBE_MRQC_RSSEN; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); +} + +static void +ixgbe_rss_configure(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + uint8_t *hash_key; + uint32_t rss_key; + uint32_t mrqc; + uint32_t reta; + uint16_t rss_hf; + uint16_t i; + uint16_t j; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + if (rss_hf == 0) { /* Disable RSS */ + ixgbe_rss_disable(dev); + return; + } + hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; + if (hash_key == NULL) + hash_key = rss_intel_key; /* Default hash key */ + + /* Fill in RSS hash key */ + for (i = 0; i < 10; i++) { + rss_key = hash_key[(i * 4)]; + rss_key |= hash_key[(i * 4) + 1] << 8; + rss_key |= hash_key[(i * 4) + 2] << 16; + rss_key |= hash_key[(i * 4) + 3] << 24; + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key); + } + + /* Fill in redirection table */ + reta = 0; + for (i = 0, j = 0; i < 128; i++, j++) { + if (j == dev->data->nb_rx_queues) j = 0; + reta = (reta << 8) | j; + if ((i & 3) == 3) + IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), rte_bswap32(reta)); + } + + /* Set configured hashing functions in MRQC register */ + mrqc = IXGBE_MRQC_RSSEN; /* RSS enable */ + if (rss_hf & ETH_RSS_IPV4) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4; + if (rss_hf & ETH_RSS_IPV4_TCP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP; + if (rss_hf & ETH_RSS_IPV6) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6; + if (rss_hf & ETH_RSS_IPV6_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX; + if (rss_hf & ETH_RSS_IPV6_TCP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + if (rss_hf & ETH_RSS_IPV6_TCP_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP; + if (rss_hf & ETH_RSS_IPV4_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + if (rss_hf & ETH_RSS_IPV6_UDP_EX) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); +} + +#define NUM_VFTA_REGISTERS 128 +#define NIC_RX_BUFFER_SIZE 0x200 + +static void +ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_vmdq_dcb_conf *cfg; + struct ixgbe_hw *hw; + enum rte_eth_nb_pools num_pools; + uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl; + uint16_t pbsize; + uint8_t nb_tcs; /* number of traffic classes */ + int i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; + num_pools = cfg->nb_queue_pools; + /* Check we have a valid number of pools */ + if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) { + ixgbe_rss_disable(dev); + return; + } + /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */ + nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools); + + /* + * RXPBSIZE + * split rx buffer up into sections, each for 1 traffic class + */ + pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs); + for (i = 0 ; i < nb_tcs; i++) { + uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); + /* clear 10 bits. */ + rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + /* zero alloc all unused TCs */ + for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT )); + /* clear 10 bits. */ + IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); + } + + /* MRQC: enable vmdq and dcb */ + mrqc = ((num_pools == ETH_16_POOLS) ? \ + IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN ); + IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + + /* PFVTCTL: turn on virtualisation and set the default pool */ + vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN; + if (cfg->enable_default_pool) { + vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT); + } else { + vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL; + } + IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl); + + /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */ + queue_mapping = 0; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) + /* + * mapping is done with 3 bits per priority, + * so shift by i*3 each time + */ + queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3)); + + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping); + + /* RTRPCS: DCB related */ + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM); + + /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + + /* VFTA - enable all vlan filters */ + for (i = 0; i < NUM_VFTA_REGISTERS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); + } + + /* VFRE: pool enabling for receive - 16 or 32 */ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \ + num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); + + /* + * MPSAR - allow pools to read specific mac addresses + * In this case, all pools should be able to read from mac addr 0 + */ + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF); + + /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ + for (i = 0; i < cfg->nb_pool_maps; i++) { + /* set vlan id in VF register and set the valid bit */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \ + (cfg->pool_map[i].vlan_id & 0xFFF))); + /* + * Put the allowed pools in VFB reg. As we only have 16 or 32 + * pools, we only need to use the first half of the register + * i.e. bits 0-31 + */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools); + } +} + +static int +ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) +{ + struct igb_rx_entry *rxe = rxq->sw_ring; + uint64_t dma_addr; + unsigned i; + + /* Initialize software ring entries */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + volatile union ixgbe_adv_rx_desc *rxd; + struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n", + (unsigned) rxq->queue_id); + return (-ENOMEM); + } + dma_addr = + rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf)); + rxd = &rxq->rx_ring[i]; + rxd->read.hdr_addr = dma_addr; + rxd->read.pkt_addr = dma_addr; + rxe[i].mbuf = mbuf; + } + + return 0; +} + +/* + * Initializes Receive Unit. + */ +int +ixgbe_dev_rx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct igb_rx_queue *rxq; + struct rte_pktmbuf_pool_private *mbp_priv; + uint64_t bus_addr; + uint32_t rxctrl; + uint32_t fctrl; + uint32_t hlreg0; + uint32_t maxfrs; + uint32_t srrctl; + uint32_t rdrxctl; + uint32_t rxcsum; + uint16_t buf_size; + uint16_t i; + int ret; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* + * Make sure receives are disabled while setting + * up the RX context (registers, descriptor rings, etc.). + */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); + + /* Enable receipt of broadcasted frames */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; + fctrl |= IXGBE_FCTRL_PMCF; + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + + /* + * Configure CRC stripping, if any. + */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (dev->data->dev_conf.rxmode.hw_strip_crc) + hlreg0 |= IXGBE_HLREG0_RXCRCSTRP; + else + hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP; + + /* + * Configure jumbo frame support, if any. + */ + if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + maxfrs &= 0x0000FFFF; + maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16); + IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs); + } else + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* Setup RX queues */ + dev->rx_pkt_burst = ixgbe_recv_pkts; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings */ + ret = ixgbe_alloc_rx_queue_mbufs(rxq); + if (ret) { + ixgbe_dev_clear_queues(dev); + return ret; + } + + /* + * Reset crc_len in case it was changed after queue setup by a + * call to configure. + */ + rxq->crc_len = (uint8_t) + ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 : + ETHER_CRC_LEN); + + /* Setup the Base and Length of the Rx Descriptor Rings */ + bus_addr = rxq->rx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i), + rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0); + + /* Configure the SRRCTL register */ +#ifdef RTE_HEADER_SPLIT_ENABLE + /* + * Configure Header Split + */ + if (dev->data->dev_conf.rxmode.header_split) { + if (hw->mac.type == ixgbe_mac_82599EB) { + /* Must setup the PSRTYPE register */ + uint32_t psrtype; + psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_IPV6HDR; + IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), psrtype); + } + srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else +#endif + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* + * Configure the RX buffer size in the BSIZEPACKET field of + * the SRRCTL register of the queue. + * The value is in 1 KB resolution. Valid values can be from + * 1 KB to 16 KB. + */ + mbp_priv = (struct rte_pktmbuf_pool_private *) + ((char *)rxq->mb_pool + sizeof(struct rte_mempool)); + buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size - + RTE_PKTMBUF_HEADROOM); + srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) & + IXGBE_SRRCTL_BSIZEPKT_MASK); + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl); + + buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << + IXGBE_SRRCTL_BSIZEPKT_SHIFT); + if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){ + dev->data->scattered_rx = 1; + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; + } + } + + /* + * Configure RSS if device configured with multiple RX queues. + */ + if (hw->mac.type == ixgbe_mac_82599EB) { + if (dev->data->nb_rx_queues > 1) + switch (dev->data->dev_conf.rxmode.mq_mode) { + case ETH_RSS: + ixgbe_rss_configure(dev); + break; + + case ETH_VMDQ_DCB: + ixgbe_vmdq_dcb_configure(dev); + break; + + default: ixgbe_rss_disable(dev); + } + else + ixgbe_rss_disable(dev); + } + + /* + * Setup the Checksum Register. + * Disable Full-Packet Checksum which is mutually exclusive with RSS. + * Enable IP/L4 checkum computation by hardware if requested to do so. + */ + rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); + rxcsum |= IXGBE_RXCSUM_PCSD; + if (dev->data->dev_conf.rxmode.hw_ip_checksum) + rxcsum |= IXGBE_RXCSUM_IPPCSE; + else + rxcsum &= ~IXGBE_RXCSUM_IPPCSE; + + IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (hw->mac.type == ixgbe_mac_82599EB) { + rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + if (dev->data->dev_conf.rxmode.hw_strip_crc) + rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; + else + rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP; + rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; + IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl); + } + + return 0; +} + +/* + * Initializes Transmit Unit. + */ +void +ixgbe_dev_tx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct igb_tx_queue *txq; + uint64_t bus_addr; + uint32_t hlreg0; + uint32_t txctrl; + uint32_t rttdcs; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Enable TX CRC (checksum offload requirement) */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 |= IXGBE_HLREG0_TXCRCEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + + /* Setup the Base and Length of the Tx Descriptor Rings */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + + bus_addr = txq->tx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), + txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc)); + /* Setup the HW Tx Head and TX Tail descriptor pointers */ + IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); + + /* + * Disable Tx Head Writeback RO bit, since this hoses + * bookkeeping if things aren't delivered in order. + */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, + IXGBE_DCA_TXCTRL(i)); + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), + txctrl); + break; + + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + default: + txctrl = IXGBE_READ_REG(hw, + IXGBE_DCA_TXCTRL_82599(i)); + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), + txctrl); + break; + } + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + /* disable arbiter before setting MTQC */ + rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS); + rttdcs |= IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB); + + /* re-enable arbiter */ + rttdcs &= ~IXGBE_RTTDCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + } +} + +/* + * Start Transmit and Receive Units. + */ +void +ixgbe_dev_rxtx_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct igb_tx_queue *txq; + struct igb_rx_queue *rxq; + uint32_t txdctl; + uint32_t dmatxctl; + uint32_t rxdctl; + uint32_t rxctrl; + uint16_t i; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Setup Transmit Threshold Registers */ + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + txdctl |= txq->pthresh & 0x7F; + txdctl |= ((txq->hthresh & 0x7F) << 8); + txdctl |= ((txq->wthresh & 0x7F) << 16); + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl); + } + + if (hw->mac.type != ixgbe_mac_82598EB) { + dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + dmatxctl |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl); + + /* Wait until TX Enable ready */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = 10; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Tx Queue %d\n", i); + } + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = 10; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Rx Queue %d\n", i); + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_RDT(i), rxq->nb_rx_desc - 1); + } + + /* Enable Receive engine */ + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; + rxctrl |= IXGBE_RXCTRL_RXEN; + hw->mac.ops.enable_rx_dma(hw, rxctrl); +} + + +/* + * [VF] Initializes Receive Unit. + */ +int +ixgbevf_dev_rx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct igb_rx_queue *rxq; + struct rte_pktmbuf_pool_private *mbp_priv; + uint64_t bus_addr; + uint32_t srrctl; + uint16_t buf_size; + uint16_t i; + int ret; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup RX queues */ + dev->rx_pkt_burst = ixgbe_recv_pkts; + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + + /* Allocate buffers for descriptor rings */ + ret = ixgbe_alloc_rx_queue_mbufs(rxq); + if (ret){ + return -1; + } + /* Setup the Base and Length of the Rx Descriptor Rings */ + bus_addr = rxq->rx_ring_phys_addr; + + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i), + rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc)); + IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0); + + + /* Configure the SRRCTL register */ +#ifdef RTE_HEADER_SPLIT_ENABLE + /* + * Configure Header Split + */ + if (dev->data->dev_conf.rxmode.header_split) { + + /* Must setup the PSRTYPE register */ + uint32_t psrtype; + psrtype = IXGBE_PSRTYPE_TCPHDR | + IXGBE_PSRTYPE_UDPHDR | + IXGBE_PSRTYPE_IPV4HDR | + IXGBE_PSRTYPE_IPV6HDR; + + IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype); + + srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size << + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & + IXGBE_SRRCTL_BSIZEHDR_MASK); + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } else +#endif + srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; + + /* + * Configure the RX buffer size in the BSIZEPACKET field of + * the SRRCTL register of the queue. + * The value is in 1 KB resolution. Valid values can be from + * 1 KB to 16 KB. + */ + mbp_priv = (struct rte_pktmbuf_pool_private *) + ((char *)rxq->mb_pool + sizeof(struct rte_mempool)); + buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size - + RTE_PKTMBUF_HEADROOM); + srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) & + IXGBE_SRRCTL_BSIZEPKT_MASK); + + /* + * VF modification to write virtual function SRRCTL register + */ + IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl); + + buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) << + IXGBE_SRRCTL_BSIZEPKT_SHIFT); + if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){ + dev->data->scattered_rx = 1; + dev->rx_pkt_burst = ixgbe_recv_scattered_pkts; + } + } + return 0; +} + +/* + * [VF] Initializes Transmit Unit. + */ +void +ixgbevf_dev_tx_init(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct igb_tx_queue *txq; + uint64_t bus_addr; + uint32_t txctrl; + uint16_t i; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Setup the Base and Length of the Tx Descriptor Rings */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + bus_addr = txq->tx_ring_phys_addr; + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i), + (uint32_t)(bus_addr & 0x00000000ffffffffULL)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), + (uint32_t)(bus_addr >> 32)); + IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i), + txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc)); + /* Setup the HW Tx Head and TX Tail descriptor pointers */ + IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0); + + /* + * Disable Tx Head Writeback RO bit, since this hoses + * bookkeeping if things aren't delivered in order. + */ + txctrl = IXGBE_READ_REG(hw, + IXGBE_VFDCA_TXCTRL(i)); + txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), + txctrl); + } +} + +/* + * [VF] Start Transmit and Receive Units. + */ +void +ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw; + struct igb_tx_queue *txq; + struct igb_rx_queue *rxq; + uint32_t txdctl; + uint32_t rxdctl; + uint16_t i; + int poll_ms; + + PMD_INIT_FUNC_TRACE(); + hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + /* Setup Transmit Threshold Registers */ + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= txq->pthresh & 0x7F; + txdctl |= ((txq->hthresh & 0x7F) << 8); + txdctl |= ((txq->wthresh & 0x7F) << 16); + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + txdctl |= IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl); + + poll_ms = 10; + /* Wait until TX Enable ready */ + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i)); + } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Tx Queue %d\n", i); + } + for (i = 0; i < dev->data->nb_rx_queues; i++) { + + rxq = dev->data->rx_queues[i]; + + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl); + + /* Wait until RX Enable ready */ + poll_ms = 10; + do { + rte_delay_ms(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)); + } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not enable " + "Rx Queue %d\n", i); + rte_wmb(); + IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1); + + } +} diff --git a/lib/librte_ring/Makefile b/lib/librte_ring/Makefile new file mode 100644 index 0000000000..e77301e6f1 --- /dev/null +++ b/lib/librte_ring/Makefile @@ -0,0 +1,50 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_ring.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3 + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_RING) := rte_ring.c + +# install includes +SYMLINK-$(CONFIG_RTE_LIBRTE_RING)-include := rte_ring.h + +# this lib needs eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_RING) += lib/librte_eal + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c new file mode 100644 index 0000000000..3eb0d5e567 --- /dev/null +++ b/lib/librte_ring/rte_ring.c @@ -0,0 +1,283 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Derived from FreeBSD's bufring.c + * + ************************************************************************** + * + * Copyright (c) 2007,2008 Kip Macy kmacy@freebsd.org + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. The name of Kip Macy nor the names of other + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + ***************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_ring.h" + +TAILQ_HEAD(rte_ring_list, rte_ring); + +/* global list of ring (used for debug/dump) */ +static struct rte_ring_list *ring_list = NULL; + +/* true if x is a power of 2 */ +#define POWEROF2(x) ((((x)-1) & (x)) == 0) + +/* create the ring */ +struct rte_ring * +rte_ring_create(const char *name, unsigned count, int socket_id, + unsigned flags) +{ + char mz_name[RTE_MEMZONE_NAMESIZE]; + struct rte_ring *r; + const struct rte_memzone *mz; + size_t ring_size; + int mz_flags = 0; + + /* compilation-time checks */ + RTE_BUILD_BUG_ON((sizeof(struct rte_ring) & + CACHE_LINE_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) & + CACHE_LINE_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) & + CACHE_LINE_MASK) != 0); +#ifdef RTE_LIBRTE_RING_DEBUG + RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) & + CACHE_LINE_MASK) != 0); + RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) & + CACHE_LINE_MASK) != 0); +#endif + + /* check that we have an initialised tail queue */ + if (ring_list == NULL) + if ((ring_list = RTE_TAILQ_RESERVE("RTE_RING", rte_ring_list)) == NULL){ + rte_errno = E_RTE_NO_TAILQ; + return NULL; + } + + /* count must be a power of 2 */ + if (!POWEROF2(count)) { + rte_errno = EINVAL; + RTE_LOG(ERR, RING, "Requested size is not a power of 2\n"); + return NULL; + } + + rte_snprintf(mz_name, sizeof(mz_name), "RG_%s", name); + ring_size = count * sizeof(void *) + sizeof(struct rte_ring); + + /* reserve a memory zone for this ring. If we can't get rte_config or + * we are secondary process, the memzone_reserve function will set + * rte_errno for us appropriately - hence no check in this this function */ + mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags); + if (mz == NULL) { + RTE_LOG(ERR, RING, "Cannot reserve memory\n"); + return NULL; + } + + r = mz->addr; + + /* init the ring structure */ + memset(r, 0, sizeof(*r)); + rte_snprintf(r->name, sizeof(r->name), "%s", name); + r->flags = flags; + r->prod.bulk_default = r->cons.bulk_default = 1; + r->prod.watermark = count; + r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ); + r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ); + r->prod.size = r->cons.size = count; + r->prod.mask = r->cons.mask = count-1; + r->prod.head = r->cons.head = 0; + r->prod.tail = r->cons.tail = 0; + + TAILQ_INSERT_TAIL(ring_list, r, next); + return r; +} + +/* + * change the high water mark. If *count* is 0, water marking is + * disabled + */ +int +rte_ring_set_water_mark(struct rte_ring *r, unsigned count) +{ + if (count >= r->prod.size) + return -EINVAL; + + /* if count is 0, disable the watermarking */ + if (count == 0) + count = r->prod.size; + + r->prod.watermark = count; + return 0; +} + +/* dump the status of the ring on the console */ +void +rte_ring_dump(const struct rte_ring *r) +{ +#ifdef RTE_LIBRTE_RING_DEBUG + struct rte_ring_debug_stats sum; + unsigned lcore_id; +#endif + + printf("ring <%s>@%p\n", r->name, r); + printf(" flags=%x\n", r->flags); + printf(" size=%"PRIu32"\n", r->prod.size); + printf(" ct=%"PRIu32"\n", r->cons.tail); + printf(" ch=%"PRIu32"\n", r->cons.head); + printf(" pt=%"PRIu32"\n", r->prod.tail); + printf(" ph=%"PRIu32"\n", r->prod.head); + printf(" used=%u\n", rte_ring_count(r)); + printf(" avail=%u\n", rte_ring_free_count(r)); + if (r->prod.watermark == r->prod.size) + printf(" watermark=0\n"); + else + printf(" watermark=%"PRIu32"\n", r->prod.watermark); + printf(" bulk_default=%"PRIu32"\n", r->prod.bulk_default); + + /* sum and dump statistics */ +#ifdef RTE_LIBRTE_RING_DEBUG + memset(&sum, 0, sizeof(sum)); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk; + sum.enq_success_objs += r->stats[lcore_id].enq_success_objs; + sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk; + sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs; + sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk; + sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs; + sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk; + sum.deq_success_objs += r->stats[lcore_id].deq_success_objs; + sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk; + sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs; + } + printf(" size=%"PRIu32"\n", r->prod.size); + printf(" enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk); + printf(" enq_success_objs=%"PRIu64"\n", sum.enq_success_objs); + printf(" enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk); + printf(" enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs); + printf(" enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk); + printf(" enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs); + printf(" deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk); + printf(" deq_success_objs=%"PRIu64"\n", sum.deq_success_objs); + printf(" deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk); + printf(" deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs); +#else + printf(" no statistics available\n"); +#endif +} + +/* dump the status of all rings on the console */ +void +rte_ring_list_dump(void) +{ + const struct rte_ring *mp; + + /* check that we have an initialised tail queue */ + if (ring_list == NULL) + if ((ring_list = RTE_TAILQ_RESERVE("RTE_RING", rte_ring_list)) == NULL){ + rte_errno = E_RTE_NO_TAILQ; + return; + } + + TAILQ_FOREACH(mp, ring_list, next) { + rte_ring_dump(mp); + } +} + +/* search a ring from its name */ +struct rte_ring * +rte_ring_lookup(const char *name) +{ + struct rte_ring *r; + + /* check that we have an initialised tail queue */ + if (ring_list == NULL) + if ((ring_list = RTE_TAILQ_RESERVE("RTE_RING", rte_ring_list)) == NULL){ + rte_errno = E_RTE_NO_TAILQ; + return NULL; + } + + TAILQ_FOREACH(r, ring_list, next) { + if (strncmp(name, r->name, RTE_RING_NAMESIZE) == 0) + break; + } + + if (r == NULL) + rte_errno = ENOENT; + + return r; +} diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h new file mode 100644 index 0000000000..4086c78caf --- /dev/null +++ b/lib/librte_ring/rte_ring.h @@ -0,0 +1,830 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Derived from FreeBSD's bufring.h + * + ************************************************************************** + * + * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. The name of Kip Macy nor the names of other + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + ***************************************************************************/ + +#ifndef _RTE_RING_H_ +#define _RTE_RING_H_ + +/** + * @file + * RTE Ring + * + * The Ring Manager is a fixed-size queue, implemented as a table of + * pointers. Head and tail pointers are modified atomically, allowing + * concurrent access to it. It has the following features: + * + * - FIFO (First In First Out) + * - Maximum size is fixed; the pointers are stored in a table. + * - Lockless implementation. + * - Multi- or single-consumer dequeue. + * - Multi- or single-producer enqueue. + * - Bulk dequeue. + * - Bulk enqueue. + * + * Note: the ring implementation is not preemptable. A lcore must not + * be interrupted by another task that uses the same ring. + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef RTE_LIBRTE_RING_DEBUG +/** + * A structure that stores the ring statistics (per-lcore). + */ +struct rte_ring_debug_stats { + uint64_t enq_success_bulk; /**< Successful enqueues number. */ + uint64_t enq_success_objs; /**< Objects successfully enqueued. */ + uint64_t enq_quota_bulk; /**< Successful enqueues above watermark. */ + uint64_t enq_quota_objs; /**< Objects enqueued above watermark. */ + uint64_t enq_fail_bulk; /**< Failed enqueues number. */ + uint64_t enq_fail_objs; /**< Objects that failed to be enqueued. */ + uint64_t deq_success_bulk; /**< Successful dequeues number. */ + uint64_t deq_success_objs; /**< Objects successfully dequeued. */ + uint64_t deq_fail_bulk; /**< Failed dequeues number. */ + uint64_t deq_fail_objs; /**< Objects that failed to be dequeued. */ +} __rte_cache_aligned; +#endif + +#define RTE_RING_NAMESIZE 32 /**< The maximum length of a ring name. */ + +/** + * An RTE ring structure. + * + * The producer and the consumer have a head and a tail index. The particularity + * of these index is that they are not between 0 and size(ring). These indexes + * are between 0 and 2^32, and we mask their value when we access the ring[] + * field. Thanks to this assumption, we can do subtractions between 2 index + * values in a modulo-32bit base: that's why the overflow of the indexes is not + * a problem. + */ +struct rte_ring { + TAILQ_ENTRY(rte_ring) next; /**< Next in list. */ + + char name[RTE_RING_NAMESIZE]; /**< Name of the ring. */ + int flags; /**< Flags supplied at creation. */ + + /** Ring producer status. */ + struct prod { + volatile uint32_t bulk_default; /**< Default bulk count. */ + uint32_t watermark; /**< Maximum items before EDQUOT. */ + uint32_t sp_enqueue; /**< True, if single producer. */ + uint32_t size; /**< Size of ring. */ + uint32_t mask; /**< Mask (size-1) of ring. */ + volatile uint32_t head; /**< Producer head. */ + volatile uint32_t tail; /**< Producer tail. */ + } prod __rte_cache_aligned; + + /** Ring consumer status. */ + struct cons { + volatile uint32_t bulk_default; /**< Default bulk count. */ + uint32_t sc_dequeue; /**< True, if single consumer. */ + uint32_t size; /**< Size of the ring. */ + uint32_t mask; /**< Mask (size-1) of ring. */ + volatile uint32_t head; /**< Consumer head. */ + volatile uint32_t tail; /**< Consumer tail. */ + } cons __rte_cache_aligned; + + +#ifdef RTE_LIBRTE_RING_DEBUG + struct rte_ring_debug_stats stats[RTE_MAX_LCORE]; +#endif + + void * volatile ring[0] \ + __rte_cache_aligned; /**< Memory space of ring starts here. */ +}; + +#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */ +#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */ + +/** + * When debug is enabled, store ring statistics. + * @param r + * A pointer to the ring. + * @param name + * The name of the statistics field to increment in the ring. + * @param n + * The number to add to the object-oriented statistics. + */ +#ifdef RTE_LIBRTE_RING_DEBUG +#define __RING_STAT_ADD(r, name, n) do { \ + unsigned __lcore_id = rte_lcore_id(); \ + r->stats[__lcore_id].name##_objs += n; \ + r->stats[__lcore_id].name##_bulk += 1; \ + } while(0) +#else +#define __RING_STAT_ADD(r, name, n) do {} while(0) +#endif + +/** + * Create a new ring named *name* in memory. + * + * This function uses ``memzone_reserve()`` to allocate memory. Its size is + * set to *count*, which must be a power of two. Water marking is + * disabled by default. The default bulk count is initialized to 1. + * Note that the real usable ring size is *count-1* instead of + * *count*. + * + * @param name + * The name of the ring. + * @param count + * The size of the ring (must be a power of 2). + * @param socket_id + * The *socket_id* argument is the socket identifier in case of + * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA + * constraint for the reserved zone. + * @param flags + * An OR of the following: + * - RING_F_SP_ENQ: If this flag is set, the default behavior when + * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()`` + * is "single-producer". Otherwise, it is "multi-producers". + * - RING_F_SC_DEQ: If this flag is set, the default behavior when + * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()`` + * is "single-consumer". Otherwise, it is "multi-consumers". + * @return + * On success, the pointer to the new allocated ring. NULL on error with + * rte_errno set appropriately. Possible errno values include: + * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure + * - E_RTE_SECONDARY - function was called from a secondary process instance + * - E_RTE_NO_TAILQ - no tailq list could be got for the ring list + * - EINVAL - count provided is not a power of 2 + * - ENOSPC - the maximum number of memzones has already been allocated + * - EEXIST - a memzone with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memzone + */ +struct rte_ring *rte_ring_create(const char *name, unsigned count, + int socket_id, unsigned flags); + +/** + * Set the default bulk count for enqueue/dequeue. + * + * The parameter *count* is the default number of bulk elements to + * get/put when using ``rte_ring_*_{en,de}queue_bulk()``. It must be + * greater than 0 and less than half of the ring size. + * + * @param r + * A pointer to the ring structure. + * @param count + * A new water mark value. + * @return + * - 0: Success; default_bulk_count changed. + * - -EINVAL: Invalid count value. + */ +static inline int +rte_ring_set_bulk_count(struct rte_ring *r, unsigned count) +{ + if (unlikely(count == 0 || count >= r->prod.size)) + return -EINVAL; + + r->prod.bulk_default = r->cons.bulk_default = count; + return 0; +} + +/** + * Get the default bulk count for enqueue/dequeue. + * + * @param r + * A pointer to the ring structure. + * @return + * The default bulk count for enqueue/dequeue. + */ +static inline unsigned +rte_ring_get_bulk_count(struct rte_ring *r) +{ + return r->prod.bulk_default; +} + +/** + * Change the high water mark. + * + * If *count* is 0, water marking is disabled. Otherwise, it is set to the + * *count* value. The *count* value must be greater than 0 and less + * than the ring size. + * + * This function can be called at any time (not necessarilly at + * initialization). + * + * @param r + * A pointer to the ring structure. + * @param count + * The new water mark value. + * @return + * - 0: Success; water mark changed. + * - -EINVAL: Invalid water mark value. + */ +int rte_ring_set_water_mark(struct rte_ring *r, unsigned count); + +/** + * Dump the status of the ring to the console. + * + * @param r + * A pointer to the ring structure. + */ +void rte_ring_dump(const struct rte_ring *r); + +/** + * Enqueue several objects on the ring (multi-producers safe). + * + * This function uses a "compare and set" instruction to move the + * producer index atomically. + * + * @param r + * A pointer to the ring structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the ring from the obj_table. The + * value must be strictly positive. + * @return + * - 0: Success; objects enqueue. + * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the + * high water mark is exceeded. + * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued. + */ +static inline int +rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table, + unsigned n) +{ + uint32_t prod_head, prod_next; + uint32_t cons_tail, free_entries; + int success; + unsigned i; + uint32_t mask = r->prod.mask; + int ret; + + /* move prod.head atomically */ + do { + prod_head = r->prod.head; + cons_tail = r->cons.tail; + /* The subtraction is done between two unsigned 32bits value + * (the result is always modulo 32 bits even if we have + * prod_head > cons_tail). So 'free_entries' is always between 0 + * and size(ring)-1. */ + free_entries = (mask + cons_tail - prod_head); + + /* check that we have enough room in ring */ + if (unlikely(n > free_entries)) { + __RING_STAT_ADD(r, enq_fail, n); + return -ENOBUFS; + } + + prod_next = prod_head + n; + success = rte_atomic32_cmpset(&r->prod.head, prod_head, + prod_next); + } while (unlikely(success == 0)); + + /* write entries in ring */ + for (i = 0; likely(i < n); i++) + r->ring[(prod_head + i) & mask] = obj_table[i]; + rte_wmb(); + + /* return -EDQUOT if we exceed the watermark */ + if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) { + ret = -EDQUOT; + __RING_STAT_ADD(r, enq_quota, n); + } + else { + ret = 0; + __RING_STAT_ADD(r, enq_success, n); + } + + /* + * If there are other enqueues in progress that preceeded us, + * we need to wait for them to complete + */ + while (unlikely(r->prod.tail != prod_head)) + rte_pause(); + + r->prod.tail = prod_next; + return ret; +} + +/** + * Enqueue several objects on a ring (NOT multi-producers safe). + * + * @param r + * A pointer to the ring structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the ring from the obj_table. The + * value must be strictly positive. + * @return + * - 0: Success; objects enqueued. + * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the + * high water mark is exceeded. + * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. + */ +static inline int +rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table, + unsigned n) +{ + uint32_t prod_head, cons_tail; + uint32_t prod_next, free_entries; + unsigned i; + uint32_t mask = r->prod.mask; + int ret; + + prod_head = r->prod.head; + cons_tail = r->cons.tail; + /* The subtraction is done between two unsigned 32bits value + * (the result is always modulo 32 bits even if we have + * prod_head > cons_tail). So 'free_entries' is always between 0 + * and size(ring)-1. */ + free_entries = mask + cons_tail - prod_head; + + /* check that we have enough room in ring */ + if (unlikely(n > free_entries)) { + __RING_STAT_ADD(r, enq_fail, n); + return -ENOBUFS; + } + + prod_next = prod_head + n; + r->prod.head = prod_next; + + /* write entries in ring */ + for (i = 0; likely(i < n); i++) + r->ring[(prod_head + i) & mask] = obj_table[i]; + rte_wmb(); + + /* return -EDQUOT if we exceed the watermark */ + if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) { + ret = -EDQUOT; + __RING_STAT_ADD(r, enq_quota, n); + } + else { + ret = 0; + __RING_STAT_ADD(r, enq_success, n); + } + + r->prod.tail = prod_next; + return ret; +} + +/** + * Enqueue several objects on a ring. + * + * This function calls the multi-producer or the single-producer + * version depending on the default behavior that was specified at + * ring creation time (see flags). + * + * @param r + * A pointer to the ring structure. + * @param obj_table + * A pointer to a table of void * pointers (objects). + * @param n + * The number of objects to add in the ring from the obj_table. + * @return + * - 0: Success; objects enqueued. + * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the + * high water mark is exceeded. + * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. + */ +static inline int +rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table, + unsigned n) +{ + if (r->prod.sp_enqueue) + return rte_ring_sp_enqueue_bulk(r, obj_table, n); + else + return rte_ring_mp_enqueue_bulk(r, obj_table, n); +} + +/** + * Enqueue one object on a ring (multi-producers safe). + * + * This function uses a "compare and set" instruction to move the + * producer index atomically. + * + * @param r + * A pointer to the ring structure. + * @param obj + * A pointer to the object to be added. + * @return + * - 0: Success; objects enqueued. + * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the + * high water mark is exceeded. + * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. + */ +static inline int +rte_ring_mp_enqueue(struct rte_ring *r, void *obj) +{ + return rte_ring_mp_enqueue_bulk(r, &obj, 1); +} + +/** + * Enqueue one object on a ring (NOT multi-producers safe). + * + * @param r + * A pointer to the ring structure. + * @param obj + * A pointer to the object to be added. + * @return + * - 0: Success; objects enqueued. + * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the + * high water mark is exceeded. + * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. + */ +static inline int +rte_ring_sp_enqueue(struct rte_ring *r, void *obj) +{ + return rte_ring_sp_enqueue_bulk(r, &obj, 1); +} + +/** + * Enqueue one object on a ring. + * + * This function calls the multi-producer or the single-producer + * version, depending on the default behaviour that was specified at + * ring creation time (see flags). + * + * @param r + * A pointer to the ring structure. + * @param obj + * A pointer to the object to be added. + * @return + * - 0: Success; objects enqueued. + * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the + * high water mark is exceeded. + * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued. + */ +static inline int +rte_ring_enqueue(struct rte_ring *r, void *obj) +{ + if (r->prod.sp_enqueue) + return rte_ring_sp_enqueue(r, obj); + else + return rte_ring_mp_enqueue(r, obj); +} + +/** + * Dequeue several objects from a ring (multi-consumers safe). + * + * This function uses a "compare and set" instruction to move the + * consumer index atomically. + * + * @param r + * A pointer to the ring structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to dequeue from the ring to the obj_table, + * must be strictly positive + * @return + * - 0: Success; objects dequeued. + * - -ENOENT: Not enough entries in the ring to dequeue; no object is + * dequeued. + */ +static inline int +rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n) +{ + uint32_t cons_head, prod_tail; + uint32_t cons_next, entries; + int success; + unsigned i; + uint32_t mask = r->prod.mask; + + /* move cons.head atomically */ + do { + cons_head = r->cons.head; + prod_tail = r->prod.tail; + /* The subtraction is done between two unsigned 32bits value + * (the result is always modulo 32 bits even if we have + * cons_head > prod_tail). So 'entries' is always between 0 + * and size(ring)-1. */ + entries = (prod_tail - cons_head); + + /* check that we have enough entries in ring */ + if (unlikely(n > entries)) { + __RING_STAT_ADD(r, deq_fail, n); + return -ENOENT; + } + + cons_next = cons_head + n; + success = rte_atomic32_cmpset(&r->cons.head, cons_head, + cons_next); + } while (unlikely(success == 0)); + + /* copy in table */ + rte_rmb(); + for (i = 0; likely(i < n); i++) { + obj_table[i] = r->ring[(cons_head + i) & mask]; + } + + /* + * If there are other dequeues in progress that preceeded us, + * we need to wait for them to complete + */ + while (unlikely(r->cons.tail != cons_head)) + rte_pause(); + + __RING_STAT_ADD(r, deq_success, n); + r->cons.tail = cons_next; + return 0; +} + +/** + * Dequeue several objects from a ring (NOT multi-consumers safe). + * + * @param r + * A pointer to the ring structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to dequeue from the ring to the obj_table, + * must be strictly positive. + * @return + * - 0: Success; objects dequeued. + * - -ENOENT: Not enough entries in the ring to dequeue; no object is + * dequeued. + */ +static inline int +rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n) +{ + uint32_t cons_head, prod_tail; + uint32_t cons_next, entries; + unsigned i; + uint32_t mask = r->prod.mask; + + cons_head = r->cons.head; + prod_tail = r->prod.tail; + /* The subtraction is done between two unsigned 32bits value + * (the result is always modulo 32 bits even if we have + * cons_head > prod_tail). So 'entries' is always between 0 + * and size(ring)-1. */ + entries = prod_tail - cons_head; + + /* check that we have enough entries in ring */ + if (unlikely(n > entries)) { + __RING_STAT_ADD(r, deq_fail, n); + return -ENOENT; + } + + cons_next = cons_head + n; + r->cons.head = cons_next; + + /* copy in table */ + rte_rmb(); + for (i = 0; likely(i < n); i++) { + obj_table[i] = r->ring[(cons_head + i) & mask]; + } + + __RING_STAT_ADD(r, deq_success, n); + r->cons.tail = cons_next; + return 0; +} + +/** + * Dequeue several objects from a ring. + * + * This function calls the multi-consumers or the single-consumer + * version, depending on the default behaviour that was specified at + * ring creation time (see flags). + * + * @param r + * A pointer to the ring structure. + * @param obj_table + * A pointer to a table of void * pointers (objects) that will be filled. + * @param n + * The number of objects to dequeue from the ring to the obj_table. + * @return + * - 0: Success; objects dequeued. + * - -ENOENT: Not enough entries in the ring to dequeue, no object is + * dequeued. + */ +static inline int +rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n) +{ + if (r->cons.sc_dequeue) + return rte_ring_sc_dequeue_bulk(r, obj_table, n); + else + return rte_ring_mc_dequeue_bulk(r, obj_table, n); +} + +/** + * Dequeue one object from a ring (multi-consumers safe). + * + * This function uses a "compare and set" instruction to move the + * consumer index atomically. + * + * @param r + * A pointer to the ring structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success; objects dequeued. + * - -ENOENT: Not enough entries in the ring to dequeue; no object is + * dequeued. + */ +static inline int +rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p) +{ + return rte_ring_mc_dequeue_bulk(r, obj_p, 1); +} + +/** + * Dequeue one object from a ring (NOT multi-consumers safe). + * + * @param r + * A pointer to the ring structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success; objects dequeued. + * - -ENOENT: Not enough entries in the ring to dequeue, no object is + * dequeued. + */ +static inline int +rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p) +{ + return rte_ring_sc_dequeue_bulk(r, obj_p, 1); +} + +/** + * Dequeue one object from a ring. + * + * This function calls the multi-consumers or the single-consumer + * version depending on the default behaviour that was specified at + * ring creation time (see flags). + * + * @param r + * A pointer to the ring structure. + * @param obj_p + * A pointer to a void * pointer (object) that will be filled. + * @return + * - 0: Success, objects dequeued. + * - -ENOENT: Not enough entries in the ring to dequeue, no object is + * dequeued. + */ +static inline int +rte_ring_dequeue(struct rte_ring *r, void **obj_p) +{ + if (r->cons.sc_dequeue) + return rte_ring_sc_dequeue(r, obj_p); + else + return rte_ring_mc_dequeue(r, obj_p); +} + +/** + * Test if a ring is full. + * + * @param r + * A pointer to the ring structure. + * @return + * - 1: The ring is full. + * - 0: The ring is not full. + */ +static inline int +rte_ring_full(const struct rte_ring *r) +{ + uint32_t prod_tail = r->prod.tail; + uint32_t cons_tail = r->cons.tail; + return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0); +} + +/** + * Test if a ring is empty. + * + * @param r + * A pointer to the ring structure. + * @return + * - 1: The ring is empty. + * - 0: The ring is not empty. + */ +static inline int +rte_ring_empty(const struct rte_ring *r) +{ + uint32_t prod_tail = r->prod.tail; + uint32_t cons_tail = r->cons.tail; + return !!(cons_tail == prod_tail); +} + +/** + * Return the number of entries in a ring. + * + * @param r + * A pointer to the ring structure. + * @return + * The number of entries in the ring. + */ +static inline unsigned +rte_ring_count(const struct rte_ring *r) +{ + uint32_t prod_tail = r->prod.tail; + uint32_t cons_tail = r->cons.tail; + return ((prod_tail - cons_tail) & r->prod.mask); +} + +/** + * Return the number of free entries in a ring. + * + * @param r + * A pointer to the ring structure. + * @return + * The number of free entries in the ring. + */ +static inline unsigned +rte_ring_free_count(const struct rte_ring *r) +{ + uint32_t prod_tail = r->prod.tail; + uint32_t cons_tail = r->cons.tail; + return ((cons_tail - prod_tail - 1) & r->prod.mask); +} + +/** + * Dump the status of all rings on the console + */ +void rte_ring_list_dump(void); + +/** + * Search a ring from its name + * + * @param name + * The name of the ring. + * @return + * The pointer to the ring matching the name, or NULL if not found, + * with rte_errno set appropriately. Possible rte_errno values include: + * - ENOENT - required entry not available to return. + */ +struct rte_ring *rte_ring_lookup(const char *name); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_RING_H_ */ diff --git a/lib/librte_timer/Makefile b/lib/librte_timer/Makefile new file mode 100644 index 0000000000..155a96045a --- /dev/null +++ b/lib/librte_timer/Makefile @@ -0,0 +1,50 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_timer.a + +CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) + +# all source are stored in SRCS-y +SRCS-$(CONFIG_RTE_LIBRTE_TIMER) := rte_timer.c + +# install this header file +SYMLINK-$(CONFIG_RTE_LIBRTE_TIMER)-include := rte_timer.h + +# this lib needs eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_TIMER) += lib/librte_eal + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c new file mode 100644 index 0000000000..a944beee48 --- /dev/null +++ b/lib/librte_timer/rte_timer.c @@ -0,0 +1,506 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_timer.h" + +LIST_HEAD(rte_timer_list, rte_timer); + +struct priv_timer { + struct rte_timer_list pending; /**< list of pending timers */ + struct rte_timer_list expired; /**< list of expired timers */ + struct rte_timer_list done; /**< list of done timers */ + rte_spinlock_t list_lock; /**< lock to protect list access */ + + /** per-core variable that true if a timer was updated on this + * core since last reset of the variable */ + int updated; + + unsigned prev_lcore; /**< used for lcore round robin */ + +#ifdef RTE_LIBRTE_TIMER_DEBUG + /** per-lcore statistics */ + struct rte_timer_debug_stats stats; +#endif +} __rte_cache_aligned; + +/** per-lcore private info for timers */ +static struct priv_timer priv_timer[RTE_MAX_LCORE]; + +/* when debug is enabled, store some statistics */ +#ifdef RTE_LIBRTE_TIMER_DEBUG +#define __TIMER_STAT_ADD(name, n) do { \ + unsigned __lcore_id = rte_lcore_id(); \ + priv_timer[__lcore_id].stats.name += (n); \ + } while(0) +#else +#define __TIMER_STAT_ADD(name, n) do {} while(0) +#endif + +/* this macro allow to modify var while browsing the list */ +#define LIST_FOREACH_SAFE(var, var2, head, field) \ + for ((var) = ((head)->lh_first), \ + (var2) = ((var) ? ((var)->field.le_next) : NULL); \ + (var); \ + (var) = (var2), \ + (var2) = ((var) ? ((var)->field.le_next) : NULL)) + + +/* Init the timer library. */ +void +rte_timer_subsystem_init(void) +{ + unsigned lcore_id; + + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) { + LIST_INIT(&priv_timer[lcore_id].pending); + LIST_INIT(&priv_timer[lcore_id].expired); + LIST_INIT(&priv_timer[lcore_id].done); + rte_spinlock_init(&priv_timer[lcore_id].list_lock); + priv_timer[lcore_id].prev_lcore = lcore_id; + } +} + +/* Initialize the timer handle tim for use */ +void +rte_timer_init(struct rte_timer *tim) +{ + union rte_timer_status status; + + status.state = RTE_TIMER_STOP; + status.owner = RTE_TIMER_NO_OWNER; + tim->status.u32 = status.u32; +} + +/* + * if timer is pending or stopped (or running on the same core than + * us), mark timer as configuring, and on success return the previous + * status of the timer + */ +static int +timer_set_config_state(struct rte_timer *tim, + union rte_timer_status *ret_prev_status) +{ + union rte_timer_status prev_status, status; + int success = 0; + unsigned lcore_id; + + lcore_id = rte_lcore_id(); + + /* wait that the timer is in correct status before update, + * and mark it as beeing configured */ + while (success == 0) { + prev_status.u32 = tim->status.u32; + + /* timer is running on another core, exit */ + if (prev_status.state == RTE_TIMER_RUNNING && + (unsigned)prev_status.owner != lcore_id) + return -1; + + /* timer is beeing configured on another core */ + if (prev_status.state == RTE_TIMER_CONFIG) + return -1; + + /* here, we know that timer is stopped or pending, + * mark it atomically as beeing configured */ + status.state = RTE_TIMER_CONFIG; + status.owner = (int16_t)lcore_id; + success = rte_atomic32_cmpset(&tim->status.u32, + prev_status.u32, + status.u32); + } + + ret_prev_status->u32 = prev_status.u32; + return 0; +} + +/* + * if timer is pending, mark timer as running + */ +static int +timer_set_running_state(struct rte_timer *tim) +{ + union rte_timer_status prev_status, status; + unsigned lcore_id = rte_lcore_id(); + int success = 0; + + /* wait that the timer is in correct status before update, + * and mark it as running */ + while (success == 0) { + prev_status.u32 = tim->status.u32; + + /* timer is not pending anymore */ + if (prev_status.state != RTE_TIMER_PENDING) + return -1; + + /* here, we know that timer is stopped or pending, + * mark it atomically as beeing configured */ + status.state = RTE_TIMER_RUNNING; + status.owner = (int16_t)lcore_id; + success = rte_atomic32_cmpset(&tim->status.u32, + prev_status.u32, + status.u32); + } + + return 0; +} + +/* + * add in list, lock if needed + * timer must be in config state + * timer must not be in a list + */ +static void +timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked) +{ + uint64_t cur_time = rte_get_hpet_cycles(); + unsigned lcore_id = rte_lcore_id(); + struct rte_timer *t, *t_prev; + + /* if timer needs to be scheduled on another core, we need to + * lock the list; if it is on local core, we need to lock if + * we are not called from rte_timer_manage() */ + if (tim_lcore != lcore_id || !local_is_locked) + rte_spinlock_lock(&priv_timer[tim_lcore].list_lock); + + t = LIST_FIRST(&priv_timer[tim_lcore].pending); + + /* list is empty or 'tim' will expire before 't' */ + if (t == NULL || ((int64_t)(tim->expire - cur_time) < + (int64_t)(t->expire - cur_time))) { + LIST_INSERT_HEAD(&priv_timer[tim_lcore].pending, tim, next); + } + else { + t_prev = t; + + /* find an element that will expire after 'tim' */ + LIST_FOREACH(t, &priv_timer[tim_lcore].pending, next) { + if ((int64_t)(tim->expire - cur_time) < + (int64_t)(t->expire - cur_time)) { + LIST_INSERT_BEFORE(t, tim, next); + break; + } + t_prev = t; + } + + /* not found, insert at the end of the list */ + if (t == NULL) + LIST_INSERT_AFTER(t_prev, tim, next); + } + + if (tim_lcore != lcore_id || !local_is_locked) + rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock); +} + +/* + * del from list, lock if needed + * timer must be in config state + * timer must be in a list + */ +static void +timer_del(struct rte_timer *tim, unsigned prev_owner, int local_is_locked) +{ + unsigned lcore_id = rte_lcore_id(); + + /* if timer needs is pending another core, we need to lock the + * list; if it is on local core, we need to lock if we are not + * called from rte_timer_manage() */ + if (prev_owner != lcore_id || !local_is_locked) + rte_spinlock_lock(&priv_timer[prev_owner].list_lock); + + LIST_REMOVE(tim, next); + + if (prev_owner != lcore_id || !local_is_locked) + rte_spinlock_unlock(&priv_timer[prev_owner].list_lock); +} + +/* Reset and start the timer associated with the timer handle (private func) */ +static int +__rte_timer_reset(struct rte_timer *tim, uint64_t expire, + uint64_t period, unsigned tim_lcore, + rte_timer_cb_t fct, void *arg, + int local_is_locked) +{ + union rte_timer_status prev_status, status; + int ret; + unsigned lcore_id = rte_lcore_id(); + + /* round robin for tim_lcore */ + if (tim_lcore == (unsigned)LCORE_ID_ANY) { + tim_lcore = rte_get_next_lcore(priv_timer[lcore_id].prev_lcore, + 0, 1); + priv_timer[lcore_id].prev_lcore = tim_lcore; + } + + /* wait that the timer is in correct status before update, + * and mark it as beeing configured */ + ret = timer_set_config_state(tim, &prev_status); + if (ret < 0) + return -1; + + __TIMER_STAT_ADD(reset, 1); + priv_timer[lcore_id].updated = 1; + + /* remove it from list */ + if (prev_status.state == RTE_TIMER_PENDING || + prev_status.state == RTE_TIMER_RUNNING) { + timer_del(tim, prev_status.owner, local_is_locked); + __TIMER_STAT_ADD(pending, -1); + } + + tim->period = period; + tim->expire = expire; + tim->f = fct; + tim->arg = arg; + + __TIMER_STAT_ADD(pending, 1); + timer_add(tim, tim_lcore, local_is_locked); + + /* update state: as we are in CONFIG state, only us can modify + * the state so we don't need to use cmpset() here */ + rte_wmb(); + status.state = RTE_TIMER_PENDING; + status.owner = (int16_t)tim_lcore; + tim->status.u32 = status.u32; + + return 0; +} + +/* Reset and start the timer associated with the timer handle tim */ +int +rte_timer_reset(struct rte_timer *tim, uint64_t ticks, + enum rte_timer_type type, unsigned tim_lcore, + rte_timer_cb_t fct, void *arg) +{ + uint64_t cur_time = rte_get_hpet_cycles(); + uint64_t period; + + if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) && + !rte_lcore_is_enabled(tim_lcore))) + return -1; + + if (type == PERIODICAL) + period = ticks; + else + period = 0; + + __rte_timer_reset(tim, cur_time + ticks, period, tim_lcore, + fct, arg, 0); + + return 0; +} + +/* loop until rte_timer_reset() succeed */ +void +rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks, + enum rte_timer_type type, unsigned tim_lcore, + rte_timer_cb_t fct, void *arg) +{ + while (rte_timer_reset(tim, ticks, type, tim_lcore, + fct, arg) != 0); +} + +/* Stop the timer associated with the timer handle tim */ +int +rte_timer_stop(struct rte_timer *tim) +{ + union rte_timer_status prev_status, status; + unsigned lcore_id = rte_lcore_id(); + int ret; + + /* wait that the timer is in correct status before update, + * and mark it as beeing configured */ + ret = timer_set_config_state(tim, &prev_status); + if (ret < 0) + return -1; + + __TIMER_STAT_ADD(stop, 1); + priv_timer[lcore_id].updated = 1; + + /* remove it from list */ + if (prev_status.state == RTE_TIMER_PENDING || + prev_status.state == RTE_TIMER_RUNNING) { + timer_del(tim, prev_status.owner, 0); + __TIMER_STAT_ADD(pending, -1); + } + + /* mark timer as stopped */ + rte_wmb(); + status.state = RTE_TIMER_STOP; + status.owner = RTE_TIMER_NO_OWNER; + tim->status.u32 = status.u32; + + return 0; +} + +/* loop until rte_timer_stop() succeed */ +void +rte_timer_stop_sync(struct rte_timer *tim) +{ + while (rte_timer_stop(tim) != 0); +} + +/* Test the PENDING status of the timer handle tim */ +int +rte_timer_pending(struct rte_timer *tim) +{ + return tim->status.state == RTE_TIMER_PENDING; +} + +/* must be called periodically, run all timer that expired */ +void rte_timer_manage(void) +{ + union rte_timer_status status; + struct rte_timer *tim, *tim2; + unsigned lcore_id = rte_lcore_id(); + uint64_t cur_time = rte_get_hpet_cycles(); + int ret; + + __TIMER_STAT_ADD(manage, 1); + + /* browse ordered list, add expired timers in 'expired' list */ + rte_spinlock_lock(&priv_timer[lcore_id].list_lock); + + LIST_FOREACH_SAFE(tim, tim2, &priv_timer[lcore_id].pending, next) { + if ((int64_t)(cur_time - tim->expire) < 0) + break; + + LIST_REMOVE(tim, next); + LIST_INSERT_HEAD(&priv_timer[lcore_id].expired, tim, next); + } + + + /* for each timer of 'expired' list, check state and execute callback */ + while ((tim = LIST_FIRST(&priv_timer[lcore_id].expired)) != NULL) { + ret = timer_set_running_state(tim); + + /* remove from expired list, and add it in done list */ + LIST_REMOVE(tim, next); + LIST_INSERT_HEAD(&priv_timer[lcore_id].done, tim, next); + + /* this timer was not pending, continue */ + if (ret < 0) + continue; + + rte_spinlock_unlock(&priv_timer[lcore_id].list_lock); + + priv_timer[lcore_id].updated = 0; + + /* execute callback function with list unlocked */ + tim->f(tim, tim->arg); + + rte_spinlock_lock(&priv_timer[lcore_id].list_lock); + + /* the timer was stopped or reloaded by the callback + * function, we have nothing to do here */ + if (priv_timer[lcore_id].updated == 1) + continue; + + if (tim->period == 0) { + /* remove from done list and mark timer as stopped */ + LIST_REMOVE(tim, next); + __TIMER_STAT_ADD(pending, -1); + status.state = RTE_TIMER_STOP; + status.owner = RTE_TIMER_NO_OWNER; + rte_wmb(); + tim->status.u32 = status.u32; + } + else { + /* keep it in done list and mark timer as pending */ + status.state = RTE_TIMER_PENDING; + status.owner = (int16_t)lcore_id; + rte_wmb(); + tim->status.u32 = status.u32; + } + } + + /* finally, browse done list, some timer may have to be + * rescheduled automatically */ + LIST_FOREACH_SAFE(tim, tim2, &priv_timer[lcore_id].done, next) { + + /* reset may fail if timer is beeing modified, in this + * case the timer will remain in 'done' list until the + * core that is modifying it remove it */ + __rte_timer_reset(tim, cur_time + tim->period, + tim->period, lcore_id, tim->f, + tim->arg, 1); + } + + /* job finished, unlock the list lock */ + rte_spinlock_unlock(&priv_timer[lcore_id].list_lock); +} + +/* dump statistics about timers */ +void rte_timer_dump_stats(void) +{ +#ifdef RTE_LIBRTE_TIMER_DEBUG + struct rte_timer_debug_stats sum; + unsigned lcore_id; + + memset(&sum, 0, sizeof(sum)); + for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + sum.reset += priv_timer[lcore_id].stats.reset; + sum.stop += priv_timer[lcore_id].stats.stop; + sum.manage += priv_timer[lcore_id].stats.manage; + sum.pending += priv_timer[lcore_id].stats.pending; + } + printf("Timer statistics:\n"); + printf(" reset = %"PRIu64"\n", sum.reset); + printf(" stop = %"PRIu64"\n", sum.stop); + printf(" manage = %"PRIu64"\n", sum.manage); + printf(" pending = %"PRIu64"\n", sum.pending); +#else + printf("No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n"); +#endif +} diff --git a/lib/librte_timer/rte_timer.h b/lib/librte_timer/rte_timer.h new file mode 100644 index 0000000000..a44bc90d95 --- /dev/null +++ b/lib/librte_timer/rte_timer.h @@ -0,0 +1,332 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _RTE_TIMER_H_ +#define _RTE_TIMER_H_ + +/** + * @file + RTE Timer + * + * This library provides a timer service to RTE Data Plane execution + * units that allows the execution of callback functions asynchronously. + * + * - Timers can be periodic or single (one-shot). + * - The timers can be loaded from one core and executed on another. This has + * to be specified in the call to rte_timer_reset(). + * - High precision is possible. NOTE: this depends on the call frequency to + * rte_timer_manage() that check the timer expiration for the local core. + * - If not used in an application, for improved performance, it can be + * disabled at compilation time by not calling the rte_timer_manage() + * to improve performance. + * + * The timer library uses the rte_get_hpet_cycles() function that + * uses the HPET, when available, to provide a reliable time reference. [HPET + * routines are provided by EAL, which falls back to using the chip TSC (time- + * stamp counter) as fallback when HPET is not available] + * + * This library provides an interface to add, delete and restart a + * timer. The API is based on the BSD callout(9) API with a few + * differences. + * + * See the RTE architecture documentation for more information about the + * design of this library. + */ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define RTE_TIMER_STOP 0 /**< State: timer is stopped. */ +#define RTE_TIMER_PENDING 1 /**< State: timer is scheduled. */ +#define RTE_TIMER_RUNNING 2 /**< State: timer function is running. */ +#define RTE_TIMER_CONFIG 3 /**< State: timer is being configured. */ + +#define RTE_TIMER_NO_OWNER -1 /**< Timer has no owner. */ + +/** + * Timer type: Periodic or single (one-shot). + */ +enum rte_timer_type { + SINGLE, + PERIODICAL +}; + +/** + * Timer status: A union of the state (stopped, pending, running, + * config) and an owner (the id of the lcore that owns the timer). + */ +union rte_timer_status { + struct { + uint16_t state; /**< Stop, pending, running, config. */ + int16_t owner; /**< The lcore that owns the timer. */ + }; + uint32_t u32; /**< To atomic-set status + owner. */ +}; + +#ifdef RTE_LIBRTE_TIMER_DEBUG +/** + * A structure that stores the timer statistics (per-lcore). + */ +struct rte_timer_debug_stats { + uint64_t reset; /**< Number of success calls to rte_timer_reset(). */ + uint64_t stop; /**< Number of success calls to rte_timer_stop(). */ + uint64_t manage; /**< Number of calls to rte_timer_manage(). */ + uint64_t pending; /**< Number of pending/running timers. */ +}; +#endif + +struct rte_timer; + +/** + * Callback function type for timer expiry. + */ +typedef void (rte_timer_cb_t)(struct rte_timer *, void *); + +/** + * A structure describing a timer in RTE. + */ +struct rte_timer +{ + LIST_ENTRY(rte_timer) next; /**< Next and prev in list. */ + volatile union rte_timer_status status; /**< Status of timer. */ + uint64_t period; /**< Period of timer (0 if not periodic). */ + uint64_t expire; /**< Time when timer expire. */ + rte_timer_cb_t *f; /**< Callback function. */ + void *arg; /**< Argument to callback function. */ +}; + + +#ifdef __cplusplus +/** + * A C++ static initializer for a timer structure. + */ +#define RTE_TIMER_INITIALIZER { \ + {0, 0}, \ + {{RTE_TIMER_STOP, RTE_TIMER_NO_OWNER}}, \ + 0, \ + 0, \ + NULL, \ + NULL, \ + } +#else +/** + * A static initializer for a timer structure. + */ +#define RTE_TIMER_INITIALIZER { \ + .status = {{ \ + .state = RTE_TIMER_STOP, \ + .owner = RTE_TIMER_NO_OWNER, \ + }}, \ + } +#endif + +/** + * Initialize the timer library. + * + * Initializes internal variables (list, locks and so on) for the RTE + * timer library. + */ +void rte_timer_subsystem_init(void); + +/** + * Initialize a timer handle. + * + * The rte_timer_init() function initializes the timer handle *tim* + * for use. No operations can be performed on a timer before it is + * initialized. + * + * @param tim + * The timer to initialize. + */ +void rte_timer_init(struct rte_timer *tim); + +/** + * Reset and start the timer associated with the timer handle. + * + * The rte_timer_reset() function resets and starts the timer + * associated with the timer handle *tim*. When the timer expires after + * *ticks* HPET cycles, the function specified by *fct* will be called + * with the argument *arg* on core *tim_lcore*. + * + * If the timer associated with the timer handle is already running + * (in the RUNNING state), the function will fail. The user has to check + * the return value of the function to see if there is a chance that the + * timer is in the RUNNING state. + * + * If the timer is being configured on another core (the CONFIG state), + * it will also fail. + * + * If the timer is pending or stopped, it will be rescheduled with the + * new parameters. + * + * @param tim + * The timer handle. + * @param ticks + * The number of cycles (see rte_get_hpet_hz()) before the callback + * function is called. + * @param type + * The type can be either: + * - PERIODICAL: The timer is automatically reloaded after execution + * (returns to the PENDING state) + * - SINGLE: The timer is one-shot, that is, the timer goes to a + * STOPPED state after execution. + * @param tim_lcore + * The ID of the lcore where the timer callback function has to be + * executed. If tim_lcore is LCORE_ID_ANY, the timer library will + * launch it on a different core for each call (round-robin). + * @param fct + * The callback function of the timer. + * @param arg + * The user argument of the callback function. + * @return + * - 0: Success; the timer is scheduled. + * - (-1): Timer is in the RUNNING or CONFIG state. + */ +int rte_timer_reset(struct rte_timer *tim, uint64_t ticks, + enum rte_timer_type type, unsigned tim_lcore, + rte_timer_cb_t fct, void *arg); + + +/** + * Loop until rte_timer_reset() succeeds. + * + * Reset and start the timer associated with the timer handle. Always + * succeed. See rte_timer_reset() for details. + * + * @param tim + * The timer handle. + * @param ticks + * The number of cycles (see rte_get_hpet_hz()) before the callback + * function is called. + * @param type + * The type can be either: + * - PERIODICAL: The timer is automatically reloaded after execution + * (returns to the PENDING state) + * - SINGLE: The timer is one-shot, that is, the timer goes to a + * STOPPED state after execution. + * @param tim_lcore + * The ID of the lcore where the timer callback function has to be + * executed. If tim_lcore is LCORE_ID_ANY, the timer library will + * launch it on a different core for each call (round-robin). + * @param fct + * The callback function of the timer. + * @param arg + * The user argument of the callback function. + */ +void +rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks, + enum rte_timer_type type, unsigned tim_lcore, + rte_timer_cb_t fct, void *arg); + +/** + * Stop a timer. + * + * The rte_timer_stop() function stops the timer associated with the + * timer handle *tim*. It may fail if the timer is currently running or + * being configured. + * + * If the timer is pending or stopped (for instance, already expired), + * the function will succeed. The timer handle tim must have been + * initialized using rte_timer_init(), otherwise, undefined behavior + * will occur. + * + * This function can be called safely from a timer callback. If it + * succeeds, the timer is not referenced anymore by the timer library + * and the timer structure can be freed (even in the callback + * function). + * + * @param tim + * The timer handle. + * @return + * - 0: Success; the timer is stopped. + * - (-1): The timer is in the RUNNING or CONFIG state. + */ +int rte_timer_stop(struct rte_timer *tim); + + +/** + * Loop until rte_timer_stop() succeeds. + * + * After a call to this function, the timer identified by *tim* is + * stopped. See rte_timer_stop() for details. + * + * @param tim + * The timer handle. + */ +void rte_timer_stop_sync(struct rte_timer *tim); + +/** + * Test if a timer is pending. + * + * The rte_timer_pending() function tests the PENDING status + * of the timer handle *tim*. A PENDING timer is one that has been + * scheduled and whose function has not yet been called. + * + * @param tim + * The timer handle. + * @return + * - 0: The timer is not pending. + * - 1: The timer is pending. + */ +int rte_timer_pending(struct rte_timer *tim); + +/** + * Manage the timer list and execute callback functions. + * + * This function must be called periodically from all cores + * main_loop(). It browses the list of pending timers and runs all + * timers that are expired. + * + * The precision of the timer depends on the call frequency of this + * function. However, the more often the function is called, the more + * CPU resources it will use. + */ +void rte_timer_manage(void); + +/** + * Dump statistics about timers. + */ +void rte_timer_dump_stats(void); + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_TIMER_H_ */ diff --git a/mk/arch/i686/rte.vars.mk b/mk/arch/i686/rte.vars.mk new file mode 100644 index 0000000000..6f8e474d2f --- /dev/null +++ b/mk/arch/i686/rte.vars.mk @@ -0,0 +1,59 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# arch: +# +# - define ARCH variable (overriden by cmdline or by previous +# optional define in machine .mk) +# - define CROSS variable (overriden by cmdline or previous define +# in machine .mk) +# - define CPU_CFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - define CPU_LDFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - define CPU_ASFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - may override any previously defined variable +# +# examples for CONFIG_RTE_ARCH: i686, x86_64, x86_64_32 +# + +ARCH ?= i386 +CROSS ?= + +CPU_CFLAGS ?= -m32 +CPU_LDFLAGS ?= -m elf_i386 +CPU_ASFLAGS ?= -felf + +export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS diff --git a/mk/arch/x86_64/rte.vars.mk b/mk/arch/x86_64/rte.vars.mk new file mode 100644 index 0000000000..7d5beff44d --- /dev/null +++ b/mk/arch/x86_64/rte.vars.mk @@ -0,0 +1,59 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# arch: +# +# - define ARCH variable (overriden by cmdline or by previous +# optional define in machine .mk) +# - define CROSS variable (overriden by cmdline or previous define +# in machine .mk) +# - define CPU_CFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - define CPU_LDFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - define CPU_ASFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - may override any previously defined variable +# +# examples for CONFIG_RTE_ARCH: i686, x86_64, x86_64_32 +# + +ARCH ?= x86_64 +CROSS ?= + +CPU_CFLAGS ?= -m64 +CPU_LDFLAGS ?= -melf_x86_64 +CPU_ASFLAGS ?= -felf64 + +export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS diff --git a/mk/exec-env/linuxapp/rte.app.mk b/mk/exec-env/linuxapp/rte.app.mk new file mode 100644 index 0000000000..2a3002dd05 --- /dev/null +++ b/mk/exec-env/linuxapp/rte.app.mk @@ -0,0 +1,38 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +exec-env-appinstall: + @true + +exec-env-appclean: + @true diff --git a/mk/exec-env/linuxapp/rte.vars.mk b/mk/exec-env/linuxapp/rte.vars.mk new file mode 100644 index 0000000000..e0ed29819f --- /dev/null +++ b/mk/exec-env/linuxapp/rte.vars.mk @@ -0,0 +1,52 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# exec-env: +# +# - define EXECENV_CFLAGS variable (overriden by cmdline) +# - define EXECENV_LDFLAGS variable (overriden by cmdline) +# - define EXECENV_ASFLAGS variable (overriden by cmdline) +# - may override any previously defined variable +# +# examples for RTE_EXEC_ENV: linuxapp, baremetal +# + +EXECENV_CFLAGS = -pthread +EXECENV_LDFLAGS = +EXECENV_ASFLAGS = + +# force applications to link with gcc/icc instead of using ld +LINK_USING_CC := 1 + +export EXECENV_CFLAGS EXECENV_LDFLAGS EXECENV_ASFLAGS diff --git a/mk/internal/rte.build-post.mk b/mk/internal/rte.build-post.mk new file mode 100644 index 0000000000..fa7dd1bcbf --- /dev/null +++ b/mk/internal/rte.build-post.mk @@ -0,0 +1,64 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# build helper .mk + +# fast way, no need to do prebuild and postbuild +ifeq ($(PREBUILD)$(POSTBUILD),) + +_postbuild: $(_BUILD) + @touch _postbuild + +else # slower way + +_prebuild: $(PREBUILD) + @touch _prebuild + +ifneq ($(_BUILD),) +$(_BUILD): _prebuild +else +_BUILD = _prebuild +endif + +_build: $(_BUILD) + @touch _build + +ifneq ($(POSTBUILD),) +$(POSTBUILD): _build +else +POSTBUILD = _build +endif + +_postbuild: $(POSTBUILD) + @touch _postbuild +endif \ No newline at end of file diff --git a/mk/internal/rte.build-pre.mk b/mk/internal/rte.build-pre.mk new file mode 100644 index 0000000000..d47208293d --- /dev/null +++ b/mk/internal/rte.build-pre.mk @@ -0,0 +1,34 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +_BUILD_TARGETS := _prebuild _build _postbuild diff --git a/mk/internal/rte.clean-post.mk b/mk/internal/rte.clean-post.mk new file mode 100644 index 0000000000..6c859a61fd --- /dev/null +++ b/mk/internal/rte.clean-post.mk @@ -0,0 +1,64 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# clean helper .mk + +# fast way, no need to do preclean and postclean +ifeq ($(PRECLEAN)$(POSTCLEAN),) + +_postclean: $(_CLEAN) + @touch _postclean + +else # slower way + +_preclean: $(PRECLEAN) + @touch _preclean + +ifneq ($(_CLEAN),) +$(_CLEAN): _preclean +else +_CLEAN = _preclean +endif + +_clean: $(_CLEAN) + @touch _clean + +ifneq ($(POSTCLEAN),) +$(POSTCLEAN): _clean +else +POSTCLEAN = _clean +endif + +_postclean: $(POSTCLEAN) + @touch _postclean +endif diff --git a/mk/internal/rte.clean-pre.mk b/mk/internal/rte.clean-pre.mk new file mode 100644 index 0000000000..aaec294a1a --- /dev/null +++ b/mk/internal/rte.clean-pre.mk @@ -0,0 +1,34 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +_CLEAN_TARGETS := _preclean _clean _postclean diff --git a/mk/internal/rte.compile-post.mk b/mk/internal/rte.compile-post.mk new file mode 100644 index 0000000000..f868e92b2d --- /dev/null +++ b/mk/internal/rte.compile-post.mk @@ -0,0 +1,35 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# no rule no build these files +$(DEPS-y) $(CMDS-y): diff --git a/mk/internal/rte.compile-pre.mk b/mk/internal/rte.compile-pre.mk new file mode 100644 index 0000000000..8ef975065d --- /dev/null +++ b/mk/internal/rte.compile-pre.mk @@ -0,0 +1,178 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# Common to rte.lib.mk, rte.app.mk, rte.obj.mk +# + +SRCS-all := $(SRCS-y) $(SRCS-n) $(SRCS-) + +# convert source to obj file +src2obj = $(strip $(patsubst %.c,%.o,\ + $(patsubst %.S,%_s.o,$(1)))) + +# add a dot in front of the file name +dotfile = $(strip $(foreach f,$(1),\ + $(join $(dir $f),.$(notdir $f)))) + +# convert source/obj files into dot-dep filename (does not +# include .S files) +src2dep = $(strip $(call dotfile,$(patsubst %.c,%.o.d, \ + $(patsubst %.S,,$(1))))) +obj2dep = $(strip $(call dotfile,$(patsubst %.o,%.o.d,$(1)))) + +# convert source/obj files into dot-cmd filename +src2cmd = $(strip $(call dotfile,$(patsubst %.c,%.o.cmd, \ + $(patsubst %.S,%_s.o.cmd,$(1))))) +obj2cmd = $(strip $(call dotfile,$(patsubst %.o,%.o.cmd,$(1)))) + +OBJS-y := $(call src2obj,$(SRCS-y)) +OBJS-n := $(call src2obj,$(SRCS-n)) +OBJS- := $(call src2obj,$(SRCS-)) +OBJS-all := $(filter-out $(SRCS-all),$(OBJS-y) $(OBJS-n) $(OBJS-)) + +DEPS-y := $(call src2dep,$(SRCS-y)) +DEPS-n := $(call src2dep,$(SRCS-n)) +DEPS- := $(call src2dep,$(SRCS-)) +DEPS-all := $(DEPS-y) $(DEPS-n) $(DEPS-) +DEPSTMP-all := $(DEPS-all:%.d=%.d.tmp) + +CMDS-y := $(call src2cmd,$(SRCS-y)) +CMDS-n := $(call src2cmd,$(SRCS-n)) +CMDS- := $(call src2cmd,$(SRCS-)) +CMDS-all := $(CMDS-y) $(CMDS-n) $(CMDS-) + +-include $(DEPS-y) $(CMDS-y) + +# command to compile a .c file to generate an object +ifeq ($(USE_HOST),1) +C_TO_O = $(HOSTCC) -Wp,-MD,$(call obj2dep,$(@)).tmp $(HOST_CFLAGS) \ + $(CFLAGS_$(@)) $(HOST_EXTRA_CFLAGS) -o $@ -c $< +C_TO_O_STR = $(subst ','\'',$(C_TO_O)) #'# fix syntax highlight +C_TO_O_DISP = $(if $(V),"$(C_TO_O_STR)"," HOSTCC $(@)") +else +C_TO_O = $(CC) -Wp,-MD,$(call obj2dep,$(@)).tmp $(CFLAGS) \ + $(CFLAGS_$(@)) $(EXTRA_CFLAGS) -o $@ -c $< +C_TO_O_STR = $(subst ','\'',$(C_TO_O)) #'# fix syntax highlight +C_TO_O_DISP = $(if $(V),"$(C_TO_O_STR)"," CC $(@)") +endif +C_TO_O_CMD = "cmd_$@ = $(C_TO_O_STR)" +C_TO_O_DO = @set -e; \ + echo $(C_TO_O_DISP); \ + $(C_TO_O) && \ + echo $(C_TO_O_CMD) > $(call obj2cmd,$(@)) && \ + sed 's,'$@':,dep_'$@' =,' $(call obj2dep,$(@)).tmp > $(call obj2dep,$(@)) && \ + rm -f $(call obj2dep,$(@)).tmp + +# return an empty string if string are equal +compare = $(strip $(subst $(1),,$(2)) $(subst $(2),,$(1))) + +# return a non-empty string if the dst file does not exist +file_missing = $(call compare,$(wildcard $@),$@) + +# return a non-empty string if cmdline changed +cmdline_changed = $(call compare,$(cmd_$@),$(1)) + +# return a non-empty string if a dependency file does not exist +depfile_missing = $(call compare,$(wildcard $(dep_$@)),$(dep_$@)) + +# return an empty string if no prereq is newer than target +# - $^ -> names of all the prerequisites +# - $(wildcard $^) -> every existing prereq +# - $(filter-out $(wildcard $^),$^) -> every prereq that don't +# exist (filter-out removes existing ones from the list) +# - $? -> names of all the prerequisites newer than target +depfile_newer = $(strip $(filter-out FORCE,$? \ + $(filter-out $(wildcard $^),$^))) + +# return 1 if parameter is a non-empty string, else 0 +boolean = $(if $1,1,0) + +# +# Compile .c file if needed +# Note: dep_$$@ is from the .d file and DEP_$$@ can be specified by +# user (by default it is empty) +# +.SECONDEXPANSION: +%.o: %.c $$(wildcard $$(dep_$$@)) $$(DEP_$$(@)) FORCE + @[ -d $(dir $@) ] || mkdir -p $(dir $@) + $(if $(D),\ + @echo -n "$< -> $@ " ; \ + echo -n "file_missing=$(call boolean,$(file_missing)) " ; \ + echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(C_TO_O_STR))) " ; \ + echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \ + echo "depfile_newer=$(call boolean,$(depfile_newer))") + $(if $(or \ + $(file_missing),\ + $(call cmdline_changed,$(C_TO_O_STR)),\ + $(depfile_missing),\ + $(depfile_newer)),\ + $(C_TO_O_DO)) + +# command to assemble a .S file to generate an object +ifeq ($(USE_HOST),1) +S_TO_O = $(CPP) $(HOST_CPPFLAGS) $($(@)_CPPFLAGS) $(HOST_EXTRA_CPPFLAGS) $< $(@).tmp && \ + $(HOSTAS) $(HOST_ASFLAGS) $($(@)_ASFLAGS) $(HOST_EXTRA_ASFLAGS) -o $@ $(@).tmp +S_TO_O_STR = $(subst ','\'',$(S_TO_O)) #'# fix syntax highlight +S_TO_O_DISP = $(if $(V),"$(S_TO_O_STR)"," HOSTAS $(@)") +else +S_TO_O = $(CPP) $(CPPFLAGS) $($(@)_CPPFLAGS) $(EXTRA_CPPFLAGS) $< -o $(@).tmp && \ + $(AS) $(ASFLAGS) $($(@)_ASFLAGS) $(EXTRA_ASFLAGS) -o $@ $(@).tmp +S_TO_O_STR = $(subst ','\'',$(S_TO_O)) #'# fix syntax highlight +S_TO_O_DISP = $(if $(V),"$(S_TO_O_STR)"," AS $(@)") +endif + +S_TO_O_CMD = "cmd_$@ = $(S_TO_O_STR)" +S_TO_O_DO = @set -e; \ + echo $(S_TO_O_DISP); \ + $(S_TO_O) && \ + echo $(S_TO_O_CMD) > $(call obj2cmd,$(@)) + +# +# Compile .S file if needed +# Note: DEP_$$@ can be specified by user (by default it is empty) +# +%_s.o: %.S $$(DEP_$$@) FORCE + @[ ! -d $(dir $@) ] || mkdir -p $(dir $@) + $(if $(D),\ + @echo -n "$< -> $@ " ; \ + echo -n "file_missing=$(call boolean,$(file_missing)) " ; \ + echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(S_TO_O_STR))) " ; \ + echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \ + echo "depfile_newer=$(call boolean,$(depfile_newer)) ") + $(if $(or \ + $(file_missing),\ + $(call cmdline_changed,$(S_TO_O_STR)),\ + $(depfile_missing),\ + $(depfile_newer)),\ + $(S_TO_O_DO)) diff --git a/mk/internal/rte.depdirs-post.mk b/mk/internal/rte.depdirs-post.mk new file mode 100644 index 0000000000..2a8e8fe7af --- /dev/null +++ b/mk/internal/rte.depdirs-post.mk @@ -0,0 +1,44 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +.PHONY: depdirs +depdirs: + @for d in $(DEPDIRS-y); do \ + $(RTE_SDK)/scripts/depdirs-rule.sh $(S) $$d ; \ + done + +.PHONY: depgraph +depgraph: + @for d in $(DEPDIRS-y); do \ + echo " \"$(S)\" -> \"$$d\"" ; \ + done diff --git a/mk/internal/rte.depdirs-pre.mk b/mk/internal/rte.depdirs-pre.mk new file mode 100644 index 0000000000..8b4fb92675 --- /dev/null +++ b/mk/internal/rte.depdirs-pre.mk @@ -0,0 +1,34 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# nothing diff --git a/mk/internal/rte.exthelp-post.mk b/mk/internal/rte.exthelp-post.mk new file mode 100644 index 0000000000..b27ba27d11 --- /dev/null +++ b/mk/internal/rte.exthelp-post.mk @@ -0,0 +1,41 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +HELP_FILE = $(RTE_SDK)/doc/rst/developpers_reference/app_mkhelp.rst + +help: + @if [ ! -f $(HELP_FILE) ]; then \ + echo "Cannot find RTE SDK documentation" ; \ + exit 0 ; \ + fi + @sed -e '1,/.*OF THE POSSIBILITY OF SUCH DAMAGE.*/ d' $(HELP_FILE) diff --git a/mk/internal/rte.install-post.mk b/mk/internal/rte.install-post.mk new file mode 100644 index 0000000000..1e84a10633 --- /dev/null +++ b/mk/internal/rte.install-post.mk @@ -0,0 +1,101 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# install helper .mk + +# +# generate rules to install files in RTE_OUTPUT. +# +# arg1: relative install dir in RTE_OUTPUT +# arg2: relative file name in a source dir (VPATH) +# +define install_rule +$(addprefix $(RTE_OUTPUT)/$(1)/,$(notdir $(2))): $(2) + @echo " INSTALL-FILE $(addprefix $(1)/,$(notdir $(2)))" + @[ -d $(RTE_OUTPUT)/$(1) ] || mkdir -p $(RTE_OUTPUT)/$(1) + @cp -rf $$(<) $(RTE_OUTPUT)/$(1) +endef + +$(foreach dir,$(INSTALL-DIRS-y),\ + $(foreach file,$(INSTALL-y-$(dir)),\ + $(eval $(call install_rule,$(dir),$(file))))) + + +# +# generate rules to install symbolic links of files in RTE_OUTPUT. +# +# arg1: relative install dir in RTE_OUTPUT +# arg2: relative file name in a source dir (VPATH) +# +define symlink_rule +$(addprefix $(RTE_OUTPUT)/$(1)/,$(notdir $(2))): $(2) + @echo " SYMLINK-FILE $(addprefix $(1)/,$(notdir $(2)))" + @[ -d $(RTE_OUTPUT)/$(1) ] || mkdir -p $(RTE_OUTPUT)/$(1) + $(Q)ln -nsf `$(RTE_SDK)/scripts/relpath.sh $$(<) $(RTE_OUTPUT)/$(1)` \ + $(RTE_OUTPUT)/$(1) +endef + +$(foreach dir,$(SYMLINK-DIRS-y),\ + $(foreach file,$(SYMLINK-y-$(dir)),\ + $(eval $(call symlink_rule,$(dir),$(file))))) + + +# fast way, no need to do preinstall and postinstall +ifeq ($(PREINSTALL)$(POSTINSTALL),) + +_postinstall: $(_INSTALL) + @touch _postinstall + +else # slower way + +_preinstall: $(PREINSTALL) + @touch _preinstall + +ifneq ($(_INSTALL),) +$(_INSTALL): _preinstall +else +_INSTALL = _preinstall +endif + +_install: $(_INSTALL) + @touch _install + +ifneq ($(POSTINSTALL),) +$(POSTINSTALL): _install +else +POSTINSTALL = _install +endif + +_postinstall: $(POSTINSTALL) + @touch _postinstall +endif diff --git a/mk/internal/rte.install-pre.mk b/mk/internal/rte.install-pre.mk new file mode 100644 index 0000000000..c8a9b3026b --- /dev/null +++ b/mk/internal/rte.install-pre.mk @@ -0,0 +1,62 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# get all variables starting with "INSTALL-y-", and extract the +# installation dir and path +# +INSTALL-y := $(filter INSTALL-y-%,$(.VARIABLES)) +INSTALL-n := $(filter INSTALL-n-%,$(.VARIABLES)) +INSTALL- := $(filter INSTALL--%,$(.VARIABLES)) +INSTALL-DIRS-y := $(patsubst INSTALL-y-%,%,$(INSTALL-y)) +INSTALL-FILES-y := $(foreach i,$(INSTALL-DIRS-y),\ + $(addprefix $(RTE_OUTPUT)/$(i)/,$(notdir $(INSTALL-y-$(i))))) +INSTALL-FILES-all := $(foreach i,$(INSTALL-DIRS-y) $(INSTALL-DIRS-n) $(INSTALL-DIRS-),\ + $(addprefix $(RTE_OUTPUT)/$(i)/,$(notdir $(INSTALL-y-$(i))))) + +_INSTALL_TARGETS := _preinstall _install _postinstall + +# +# get all variables starting with "SYMLINK-y-", and extract the +# installation dir and path +# +SYMLINK-y := $(filter SYMLINK-y-%,$(.VARIABLES)) +SYMLINK-n := $(filter SYMLINK-n-%,$(.VARIABLES)) +SYMLINK- := $(filter SYMLINK--%,$(.VARIABLES)) +SYMLINK-DIRS-y := $(patsubst SYMLINK-y-%,%,$(SYMLINK-y)) +SYMLINK-FILES-y := $(foreach i,$(SYMLINK-DIRS-y),\ + $(addprefix $(RTE_OUTPUT)/$(i)/,$(notdir $(SYMLINK-y-$(i))))) +SYMLINK-FILES-all := $(foreach i,$(SYMLINK-DIRS-y) $(SYMLINK-DIRS-n) $(SYMLINK-DIRS-),\ + $(addprefix $(RTE_OUTPUT)/$(i)/,$(notdir $(SYMLINK-y-$(i))))) + +_SYMLINK_TARGETS := _presymlink _symlink _postsymlink diff --git a/mk/machine/atm/rte.vars.mk b/mk/machine/atm/rte.vars.mk new file mode 100644 index 0000000000..e49e48bd08 --- /dev/null +++ b/mk/machine/atm/rte.vars.mk @@ -0,0 +1,61 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# machine: +# +# - can define ARCH variable (overriden by cmdline value) +# - can define CROSS variable (overriden by cmdline value) +# - define MACHINE_CFLAGS variable (overriden by cmdline value) +# - define MACHINE_LDFLAGS variable (overriden by cmdline value) +# - define MACHINE_ASFLAGS variable (overriden by cmdline value) +# - can define CPU_CFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_LDFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_ASFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - may override any previously defined variable +# + +# ARCH = +# CROSS = +# MACHINE_CFLAGS = +# MACHINE_LDFLAGS = +# MACHINE_ASFLAGS = +# CPU_CFLAGS = +# CPU_LDFLAGS = +# CPU_ASFLAGS = + +MACHINE_CFLAGS = -march=atom +CPUFLAGS = SSE SSE2 SSE3 SSSE3 diff --git a/mk/machine/default/rte.vars.mk b/mk/machine/default/rte.vars.mk new file mode 100644 index 0000000000..35d9d4cb0a --- /dev/null +++ b/mk/machine/default/rte.vars.mk @@ -0,0 +1,61 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# machine: +# +# - can define ARCH variable (overriden by cmdline value) +# - can define CROSS variable (overriden by cmdline value) +# - define MACHINE_CFLAGS variable (overriden by cmdline value) +# - define MACHINE_LDFLAGS variable (overriden by cmdline value) +# - define MACHINE_ASFLAGS variable (overriden by cmdline value) +# - can define CPU_CFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_LDFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_ASFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - may override any previously defined variable +# + +# ARCH = +# CROSS = +# MACHINE_CFLAGS = +# MACHINE_LDFLAGS = +# MACHINE_ASFLAGS = +# CPU_CFLAGS = +# CPU_LDFLAGS = +# CPU_ASFLAGS = + +MACHINE_CFLAGS += -march=core2 +CPUFLAGS = SSE SSE2 SSE3 diff --git a/mk/machine/ivb/rte.vars.mk b/mk/machine/ivb/rte.vars.mk new file mode 100644 index 0000000000..cc5a3bdf26 --- /dev/null +++ b/mk/machine/ivb/rte.vars.mk @@ -0,0 +1,61 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# machine: +# +# - can define ARCH variable (overriden by cmdline value) +# - can define CROSS variable (overriden by cmdline value) +# - define MACHINE_CFLAGS variable (overriden by cmdline value) +# - define MACHINE_LDFLAGS variable (overriden by cmdline value) +# - define MACHINE_ASFLAGS variable (overriden by cmdline value) +# - can define CPU_CFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_LDFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_ASFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - may override any previously defined variable +# + +# ARCH = +# CROSS = +# MACHINE_CFLAGS = +# MACHINE_LDFLAGS = +# MACHINE_ASFLAGS = +# CPU_CFLAGS = +# CPU_LDFLAGS = +# CPU_ASFLAGS = + +MACHINE_CFLAGS = -march=core-avx-i +CPUFLAGS = SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AES PCLMULQDQ AVX RDRAND FSGSBASE F16C diff --git a/mk/machine/native/rte.vars.mk b/mk/machine/native/rte.vars.mk new file mode 100644 index 0000000000..5f4c7dfb69 --- /dev/null +++ b/mk/machine/native/rte.vars.mk @@ -0,0 +1,111 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# machine: +# +# - can define ARCH variable (overriden by cmdline value) +# - can define CROSS variable (overriden by cmdline value) +# - define MACHINE_CFLAGS variable (overriden by cmdline value) +# - define MACHINE_LDFLAGS variable (overriden by cmdline value) +# - define MACHINE_ASFLAGS variable (overriden by cmdline value) +# - can define CPU_CFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_LDFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_ASFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - may override any previously defined variable +# + +# ARCH = +# CROSS = +# MACHINE_CFLAGS = +# MACHINE_LDFLAGS = +# MACHINE_ASFLAGS = +# CPU_CFLAGS = +# CPU_LDFLAGS = +# CPU_ASFLAGS = + +MACHINE_CFLAGS = -march=native +AUTO_CPUFLAGS = $(shell cat /proc/cpuinfo | grep flags -m 1) + +# adding flags to CPUFLAGS + +ifneq ($(filter $(AUTO_CPUFLAGS),sse),) + CPUFLAGS += SSE +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),sse2),) + CPUFLAGS += SSE2 +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),sse3),) + CPUFLAGS += SSE3 +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),ssse3),) + CPUFLAGS += SSSE3 +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),sse4_1),) + CPUFLAGS += SSE4_1 +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),sse4_2),) + CPUFLAGS += SSE4_2 +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),aes),) + CPUFLAGS += AES +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),pclmulqdq),) + CPUFLAGS += PCLMULQDQ +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),avx),) + CPUFLAGS += AVX +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),rdrnd),) + CPUFLAGS += RDRAND +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),fsgsbase),) + CPUFLAGS += FSGSBASE +endif + +ifneq ($(filter $(AUTO_CPUFLAGS),f16c),) + CPUFLAGS += F16C +endif \ No newline at end of file diff --git a/mk/machine/nhm/rte.vars.mk b/mk/machine/nhm/rte.vars.mk new file mode 100644 index 0000000000..9291d28b28 --- /dev/null +++ b/mk/machine/nhm/rte.vars.mk @@ -0,0 +1,61 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# machine: +# +# - can define ARCH variable (overriden by cmdline value) +# - can define CROSS variable (overriden by cmdline value) +# - define MACHINE_CFLAGS variable (overriden by cmdline value) +# - define MACHINE_LDFLAGS variable (overriden by cmdline value) +# - define MACHINE_ASFLAGS variable (overriden by cmdline value) +# - can define CPU_CFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_LDFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_ASFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - may override any previously defined variable +# + +# ARCH = +# CROSS = +# MACHINE_CFLAGS = +# MACHINE_LDFLAGS = +# MACHINE_ASFLAGS = +# CPU_CFLAGS = +# CPU_LDFLAGS = +# CPU_ASFLAGS = + +MACHINE_CFLAGS = -march=corei7 +CPUFLAGS = SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 diff --git a/mk/machine/snb/rte.vars.mk b/mk/machine/snb/rte.vars.mk new file mode 100644 index 0000000000..63f3e6be66 --- /dev/null +++ b/mk/machine/snb/rte.vars.mk @@ -0,0 +1,61 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# machine: +# +# - can define ARCH variable (overriden by cmdline value) +# - can define CROSS variable (overriden by cmdline value) +# - define MACHINE_CFLAGS variable (overriden by cmdline value) +# - define MACHINE_LDFLAGS variable (overriden by cmdline value) +# - define MACHINE_ASFLAGS variable (overriden by cmdline value) +# - can define CPU_CFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_LDFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_ASFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - may override any previously defined variable +# + +# ARCH = +# CROSS = +# MACHINE_CFLAGS = +# MACHINE_LDFLAGS = +# MACHINE_ASFLAGS = +# CPU_CFLAGS = +# CPU_LDFLAGS = +# CPU_ASFLAGS = + +MACHINE_CFLAGS = -march=corei7-avx +CPUFLAGS = SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AES PCLMULQDQ AVX diff --git a/mk/machine/wsm/rte.vars.mk b/mk/machine/wsm/rte.vars.mk new file mode 100644 index 0000000000..98176c42d7 --- /dev/null +++ b/mk/machine/wsm/rte.vars.mk @@ -0,0 +1,61 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# machine: +# +# - can define ARCH variable (overriden by cmdline value) +# - can define CROSS variable (overriden by cmdline value) +# - define MACHINE_CFLAGS variable (overriden by cmdline value) +# - define MACHINE_LDFLAGS variable (overriden by cmdline value) +# - define MACHINE_ASFLAGS variable (overriden by cmdline value) +# - can define CPU_CFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_LDFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_ASFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - may override any previously defined variable +# + +# ARCH = +# CROSS = +# MACHINE_CFLAGS = +# MACHINE_LDFLAGS = +# MACHINE_ASFLAGS = +# CPU_CFLAGS = +# CPU_LDFLAGS = +# CPU_ASFLAGS = + +MACHINE_CFLAGS = -march=corei7 -maes -mpclmul +CPUFLAGS = SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AES PCLMULQDQ diff --git a/mk/rte.app.mk b/mk/rte.app.mk new file mode 100644 index 0000000000..019e7c35fa --- /dev/null +++ b/mk/rte.app.mk @@ -0,0 +1,236 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/internal/rte.compile-pre.mk +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.build-pre.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk + +# VPATH contains at least SRCDIR +VPATH += $(SRCDIR) + +_BUILD = $(APP) +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) +_INSTALL += $(RTE_OUTPUT)/app/$(APP) $(RTE_OUTPUT)/app/$(APP).map +POSTINSTALL += target-appinstall +_CLEAN = doclean +POSTCLEAN += target-appclean + +ifeq ($(NO_LDSCRIPT),) +LDSCRIPT = $(RTE_LDSCRIPT) +endif + +# default path for libs +LDLIBS += -L$(RTE_SDK_BIN)/lib + +# +# Include libraries depending on config if NO_AUTOLIBS is not set +# Order is important: from higher level to lower level +# +ifeq ($(NO_AUTOLIBS),) + +ifeq ($(CONFIG_RTE_LIBRTE_IGB_PMD),y) +LDLIBS += -lrte_pmd_igb +endif + +ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y) +LDLIBS += -lrte_pmd_ixgbe +endif + +ifeq ($(CONFIG_RTE_LIBRTE_MBUF),y) +LDLIBS += -lrte_mbuf +endif + +ifeq ($(CONFIG_RTE_LIBRTE_CMDLINE),y) +LDLIBS += -lrte_cmdline +endif + +ifeq ($(CONFIG_RTE_LIBRTE_TIMER),y) +LDLIBS += -lrte_timer +endif + +ifeq ($(CONFIG_RTE_LIBRTE_HASH),y) +LDLIBS += -lrte_hash +endif + +ifeq ($(CONFIG_RTE_LIBRTE_LPM),y) +LDLIBS += -lrte_lpm +endif + +LDLIBS += --start-group + +ifeq ($(CONFIG_RTE_LIBRTE_ETHER),y) +LDLIBS += -lethdev +endif + +ifeq ($(CONFIG_RTE_LIBRTE_MALLOC),y) +LDLIBS += -lrte_malloc +endif + +ifeq ($(CONFIG_RTE_LIBRTE_MEMPOOL),y) +LDLIBS += -lrte_mempool +endif + +ifeq ($(CONFIG_RTE_LIBRTE_RING),y) +LDLIBS += -lrte_ring +endif + +ifeq ($(CONFIG_RTE_LIBC),y) +LDLIBS += -lc +endif + +ifeq ($(CONFIG_RTE_LIBGLOSS),y) +LDLIBS += -lgloss +endif + +ifeq ($(CONFIG_RTE_LIBRTE_EAL),y) +LDLIBS += -lrte_eal +endif + +LDLIBS += $(EXECENV_LDLIBS) + +LDLIBS += --end-group + +endif # ifeq ($(NO_AUTOLIBS),) + +LDLIBS += $(CPU_LDLIBS) + +.PHONY: all +all: install + +.PHONY: install +install: build _postinstall + +_postinstall: build + +.PHONY: build +build: _postbuild + +exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1)))) + +ifeq ($(LINK_USING_CC),1) +comma := , +LDLIBS := $(addprefix -Wl$(comma),$(LDLIBS)) +LDFLAGS := $(addprefix -Wl$(comma),$(LDFLAGS)) +EXTRA_LDFLAGS := $(addprefix -Wl$(comma),$(EXTRA_LDFLAGS)) +O_TO_EXE = $(CC) $(CFLAGS) $(LDFLAGS_$(@)) \ + -Wl,-Map=$(@).map,--cref -o $@ $(OBJS-y) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDLIBS) +else +O_TO_EXE = $(LD) $(LDFLAGS) $(LDFLAGS_$(@)) $(EXTRA_LDFLAGS) \ + -Map=$(@).map --cref -o $@ $(OBJS-y) $(LDLIBS) +endif +O_TO_EXE_STR = $(subst ','\'',$(O_TO_EXE)) #'# fix syntax highlight +O_TO_EXE_DISP = $(if $(V),"$(O_TO_EXE_STR)"," LD $(@)") +O_TO_EXE_CMD = "cmd_$@ = $(O_TO_EXE_STR)" +O_TO_EXE_DO = @set -e; \ + echo $(O_TO_EXE_DISP); \ + $(O_TO_EXE) && \ + echo $(O_TO_EXE_CMD) > $(call exe2cmd,$(@)) + +-include .$(APP).cmd + +# path where libraries are retrieved +LDLIBS_PATH := $(subst -Wl$(comma)-L,,$(filter -Wl$(comma)-L%,$(LDLIBS))) +LDLIBS_PATH += $(subst -L,,$(filter -L%,$(LDLIBS))) + +# list of .a files that are linked to this application +LDLIBS_NAMES := $(patsubst -l%,lib%.a,$(filter -l%,$(LDLIBS))) +LDLIBS_NAMES += $(patsubst -Wl$(comma)-l%,lib%.a,$(filter -Wl$(comma)-l%,$(LDLIBS))) + +# list of found libraries files (useful for deps). If not found, the +# library is silently ignored and dep won't be checked +LDLIBS_FILES := $(wildcard $(foreach dir,$(LDLIBS_PATH),\ + $(addprefix $(dir)/,$(LDLIBS_NAMES)))) + +# +# Compile executable file if needed +# +$(APP): $(OBJS-y) $(LDLIBS_FILES) $(DEP_$(APP)) $(LDSCRIPT) FORCE + @[ -d $(dir $@) ] || mkdir -p $(dir $@) + $(if $(D),\ + @echo -n "$< -> $@ " ; \ + echo -n "file_missing=$(call boolean,$(file_missing)) " ; \ + echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_EXE_STR))) " ; \ + echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \ + echo "depfile_newer=$(call boolean,$(depfile_newer)) ") + $(if $(or \ + $(file_missing),\ + $(call cmdline_changed,$(O_TO_EXE_STR)),\ + $(depfile_missing),\ + $(depfile_newer)),\ + $(O_TO_EXE_DO)) + +# +# install app in $(RTE_OUTPUT)/app +# +$(RTE_OUTPUT)/app/$(APP): $(APP) + @echo " INSTALL-APP $(APP)" + @[ -d $(RTE_OUTPUT)/app ] || mkdir -p $(RTE_OUTPUT)/app + $(Q)cp -f $(APP) $(RTE_OUTPUT)/app + +# +# install app map file in $(RTE_OUTPUT)/app +# +$(RTE_OUTPUT)/app/$(APP).map: $(APP) + @echo " INSTALL-MAP $(APP).map" + @[ -d $(RTE_OUTPUT)/app ] || mkdir -p $(RTE_OUTPUT)/app + $(Q)cp -f $(APP).map $(RTE_OUTPUT)/app + +# +# Clean all generated files +# +.PHONY: clean +clean: _postclean + $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) + +.PHONY: doclean +doclean: + $(Q)rm -rf $(APP) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \ + $(CMDS-all) $(INSTALL-FILES-all) .$(APP).cmd + + +include $(RTE_SDK)/mk/internal/rte.compile-post.mk +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.build-post.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk + +ifneq ($(wildcard $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.app.mk),) +include $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.app.mk +else +include $(RTE_SDK)/mk/target/generic/rte.app.mk +endif + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.doc.mk b/mk/rte.doc.mk new file mode 100644 index 0000000000..b57504a91f --- /dev/null +++ b/mk/rte.doc.mk @@ -0,0 +1,127 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +DEFAULT_DPI ?= 300 + +ifeq ($(BASEDOCDIR),) +$(error "must be called from RTE root Makefile") +endif +ifeq ($(DOCDIR),) +$(error "must be called from RTE root Makefile") +endif + +VPATH = $(abspath $(BASEDOCDIR)/$(DOCDIR)) + +pngfiles = $(patsubst %.svg,%.png,$(SVG)) +pdfimgfiles = $(patsubst %.svg,%.pdf,$(SVG)) +htmlfiles = $(patsubst %.rst,%.html,$(RST)) +pdffiles = $(patsubst %.rst,%.pdf,$(RST)) + +.PHONY: all doc clean + +compare = $(strip $(subst $(1),,$(2)) $(subst $(2),,$(1))) +dirname = $(patsubst %/,%,$(dir $1)) + +# windows only: this is needed for native programs that do not handle +# unix-like paths on win32 +ifdef COMSPEC +winpath = "$(shell cygpath --windows $(abspath $(1)))" +else +winpath = $(1) +endif + +all doc: $(pngfiles) $(htmlfiles) $(pdffiles) $(DIRS) + @true + +htmldoc: $(pngfiles) $(htmlfiles) $(DIRS) + @true + +pdfdoc: $(pngfiles) $(pdffiles) $(DIRS) + @true + +doxydoc: $(pdfimgfiles) $(DIRS) + @true + +.PHONY: $(DIRS) +$(DIRS): + @[ -d $(CURDIR)/$@ ] || mkdir -p $(CURDIR)/$@ + $(Q)$(MAKE) DOCDIR=$(DOCDIR)/$@ BASEDOCDIR=$(BASEDOCDIR)/.. \ + -f $(RTE_SDK)/doc/$(DOCDIR)/$@/Makefile -C $(CURDIR)/$@ $(MAKECMDGOALS) + +%.png: %.svg + @echo " INKSCAPE $(@)" + $(Q)inkscape -d $(DEFAULT_DPI) -D -b ffffff -y 1.0 -e $(call winpath,$(@)) $(call winpath,$(<)) + +%.pdf: %.svg + @echo " INKSCAPE $(@)" + $(Q)inkscape -d $(DEFAULT_DPI) -D -b ffffff -y 1.0 -A $(call winpath,$(@)) $(call winpath,$(<)) + +.SECONDEXPANSION: +$(foreach f,$(RST),$(eval DEP_$(f:%.rst=%.html) = $(DEP_$(f)))) +%.html: %.rst $$(DEP_$$@) + @echo " RST2HTML $(@)" + $(Q)mkdir -p `dirname $(@)` ; \ + python $(BASEDOCDIR)/gen/gen-common.py html $(BASEDOCDIR) > $(BASEDOCDIR)/gen/rte.rst ; \ + python $(BASEDOCDIR)/html/rst2html-highlight.py --link-stylesheet \ + --stylesheet-path=$(BASEDOCDIR)/html/rte.css \ + --strip-comments< $(<) > $(@) ; \ + +# there is a bug in rst2pdf (issue 311): replacement of DSTDIR is not +# what we expect: we should not have to add doc/ +ifdef COMSPEC +WORKAROUND_PATH=$(BASEDOCDIR) +else +WORKAROUND_PATH=$(BASEDOCDIR)/doc +endif + +.SECONDEXPANSION: +$(foreach f,$(RST),$(eval DEP_$(f:%.rst=%.pdf) = $(DEP_$(f)))) +%.pdf: %.rst $$(DEP_$$@) + @echo " RST2PDF $(@)" + $(Q)mkdir -p `dirname $(@)` ; \ + python $(BASEDOCDIR)/gen/gen-common.py pdf $(BASEDOCDIR) > $(BASEDOCDIR)/gen/rte.rst ; \ + rst2pdf -s $(BASEDOCDIR)/pdf/rte-stylesheet.json \ + --default-dpi=300 < $(<) > $(@) + +CLEANDIRS = $(addsuffix _clean,$(DIRS)) + +docclean clean: $(CLEANDIRS) + @rm -f $(htmlfiles) $(pdffiles) $(pngfiles) $(pdfimgfiles) $(BASEDOCDIR)/gen/rte.rst + +%_clean: + @if [ -f $(RTE_SDK)/doc/$(DOCDIR)/$*/Makefile -a -d $(CURDIR)/$* ]; then \ + $(MAKE) DOCDIR=$(DOCDIR)/$* BASEDOCDIR=$(BASEDOCDIR)/.. \ + -f $(RTE_SDK)/doc/$(DOCDIR)/$*/Makefile -C $(CURDIR)/$* clean ; \ + fi + +.NOTPARALLEL: diff --git a/mk/rte.extapp.mk b/mk/rte.extapp.mk new file mode 100644 index 0000000000..f6eb6d7639 --- /dev/null +++ b/mk/rte.extapp.mk @@ -0,0 +1,56 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +MAKEFLAGS += --no-print-directory + +# we must create the output dir first and recall the same Makefile +# from this directory +ifeq ($(NOT_FIRST_CALL),) + +NOT_FIRST_CALL = 1 +export NOT_FIRST_CALL + +all: + $(Q)mkdir -p $(RTE_OUTPUT) + $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) \ + S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR) + +%:: + $(Q)mkdir -p $(RTE_OUTPUT) + $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) $@ \ + S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR) +else +include $(RTE_SDK)/mk/rte.app.mk +endif + +include $(RTE_SDK)/mk/internal/rte.exthelp-post.mk diff --git a/mk/rte.extlib.mk b/mk/rte.extlib.mk new file mode 100644 index 0000000000..af72d354e3 --- /dev/null +++ b/mk/rte.extlib.mk @@ -0,0 +1,56 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +MAKEFLAGS += --no-print-directory + +# we must create the output dir first and recall the same Makefile +# from this directory +ifeq ($(NOT_FIRST_CALL),) + +NOT_FIRST_CALL = 1 +export NOT_FIRST_CALL + +all: + $(Q)mkdir -p $(RTE_OUTPUT) + $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) \ + S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR) + +%:: + $(Q)mkdir -p $(RTE_OUTPUT) + $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) $@ \ + S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR) +else +include $(RTE_SDK)/mk/rte.lib.mk +endif + +include $(RTE_SDK)/mk/internal/rte.exthelp-post.mk diff --git a/mk/rte.extobj.mk b/mk/rte.extobj.mk new file mode 100644 index 0000000000..96f9dbbeff --- /dev/null +++ b/mk/rte.extobj.mk @@ -0,0 +1,56 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +MAKEFLAGS += --no-print-directory + +# we must create the output dir first and recall the same Makefile +# from this directory +ifeq ($(NOT_FIRST_CALL),) + +NOT_FIRST_CALL = 1 +export NOT_FIRST_CALL + +all: + $(Q)mkdir -p $(RTE_OUTPUT) + $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) \ + S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR) + +%:: + $(Q)mkdir -p $(RTE_OUTPUT) + $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) $@ \ + S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR) +else +include $(RTE_SDK)/mk/rte.obj.mk +endif + +include $(RTE_SDK)/mk/internal/rte.exthelp-post.mk diff --git a/mk/rte.extvars.mk b/mk/rte.extvars.mk new file mode 100644 index 0000000000..b7e5c2c1eb --- /dev/null +++ b/mk/rte.extvars.mk @@ -0,0 +1,83 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# directory where sources are located +# +ifdef S +ifeq ("$(origin S)", "command line") +RTE_SRCDIR := $(abspath $(S)) +endif +endif +RTE_SRCDIR ?= $(CURDIR) +export RTE_SRCDIR + +# +# Makefile to call once $(RTE_OUTPUT) is created +# +ifdef M +ifeq ("$(origin M)", "command line") +RTE_EXTMK := $(abspath $(M)) +endif +endif +RTE_EXTMK ?= $(RTE_SRCDIR)/Makefile +export RTE_EXTMK + +RTE_SDK_BIN := $(RTE_SDK)/$(RTE_TARGET) + +# +# Output files wil go in a separate directory: default output is +# $(RTE_SRCDIR)/build +# Output dir can be given as command line using "O=" +# +ifdef O +ifeq ("$(origin O)", "command line") +RTE_OUTPUT := $(abspath $(O)) +endif +endif +RTE_OUTPUT ?= $(RTE_SRCDIR)/build +export RTE_OUTPUT + +# if we are building an external application, include SDK +# configuration and include project configuration if any +include $(RTE_SDK_BIN)/.config +ifneq ($(wildcard $(RTE_OUTPUT)/.config),) + include $(RTE_OUTPUT)/.config +endif +# remove double-quotes from config names +RTE_ARCH := $(CONFIG_RTE_ARCH:"%"=%) +RTE_MACHINE := $(CONFIG_RTE_MACHINE:"%"=%) +RTE_EXEC_ENV := $(CONFIG_RTE_EXEC_ENV:"%"=%) +RTE_TOOLCHAIN := $(CONFIG_RTE_TOOLCHAIN:"%"=%) + + diff --git a/mk/rte.gnuconfigure.mk b/mk/rte.gnuconfigure.mk new file mode 100644 index 0000000000..f031be38d8 --- /dev/null +++ b/mk/rte.gnuconfigure.mk @@ -0,0 +1,76 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/internal/rte.build-pre.mk +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk + +# VPATH contains at least SRCDIR +VPATH += $(SRCDIR) +_BUILD = configure +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) +_CLEAN = doclean + +.PHONY: all +all: install + +.PHONY: install +install: build _postinstall + +_postinstall: build + +.PHONY: build +build: _postbuild + +configure: + $(Q)cd $(CONFIGURE_PATH) ; \ + ./configure --prefix $(CONFIGURE_PREFIX) $(CONFIGURE_ARGS) ; \ + make ; \ + make install + +.PHONY: clean +clean: _postclean + +.PHONY: doclean +doclean: + $(Q)cd $(CONFIGURE_PATH) ; make clean + $(Q)rm -f $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) + +include $(RTE_SDK)/mk/internal/rte.build-post.mk +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.hostapp.mk b/mk/rte.hostapp.mk new file mode 100644 index 0000000000..15e1478133 --- /dev/null +++ b/mk/rte.hostapp.mk @@ -0,0 +1,125 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# tell rte.compile-pre.mk to use HOSTCC instead of CC +USE_HOST := 1 +include $(RTE_SDK)/mk/internal/rte.compile-pre.mk +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.build-pre.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk + +# VPATH contains at least SRCDIR +VPATH += $(SRCDIR) + +_BUILD = $(HOSTAPP) +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/hostapp/$(HOSTAPP) +_CLEAN = doclean + +.PHONY: all +all: install + +.PHONY: install +install: build _postinstall + +_postinstall: build + +.PHONY: build +build: _postbuild + +exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1)))) + +O_TO_EXE = $(HOSTCC) $(HOST_LDFLAGS) $(LDFLAGS_$(@)) \ + $(EXTRA_HOST_LDFLAGS) -o $@ $(OBJS-y) $(LDLIBS) +O_TO_EXE_STR = $(subst ','\'',$(O_TO_EXE)) #'# fix syntax highlight +O_TO_EXE_DISP = $(if $(V),"$(O_TO_EXE_STR)"," HOSTLD $(@)") +O_TO_EXE_CMD = "cmd_$@ = $(O_TO_EXE_STR)" +O_TO_EXE_DO = @set -e; \ + echo $(O_TO_EXE_DISP); \ + $(O_TO_EXE) && \ + echo $(O_TO_EXE_CMD) > $(call exe2cmd,$(@)) + +-include .$(HOSTAPP).cmd + +# list of .a files that are linked to this application +LDLIBS_FILES := $(wildcard \ + $(addprefix $(RTE_OUTPUT)/lib/, \ + $(patsubst -l%,lib%.a,$(filter -l%,$(LDLIBS))))) + +# +# Compile executable file if needed +# +$(HOSTAPP): $(OBJS-y) $(LDLIBS_FILES) FORCE + @[ -d $(dir $@) ] || mkdir -p $(dir $@) + $(if $(D),\ + @echo -n "$@ -> $< " ; \ + echo -n "file_missing=$(call boolean,$(file_missing)) " ; \ + echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_EXE_STR))) " ; \ + echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \ + echo "depfile_newer=$(call boolean,$(depfile_newer)) ") + $(if $(or \ + $(file_missing),\ + $(call cmdline_changed,$(O_TO_EXE_STR)),\ + $(depfile_missing),\ + $(depfile_newer)),\ + $(O_TO_EXE_DO)) + +# +# install app in $(RTE_OUTPUT)/hostapp +# +$(RTE_OUTPUT)/hostapp/$(HOSTAPP): $(HOSTAPP) + @echo " INSTALL-HOSTAPP $(HOSTAPP)" + @[ -d $(RTE_OUTPUT)/hostapp ] || mkdir -p $(RTE_OUTPUT)/hostapp + $(Q)cp -f $(HOSTAPP) $(RTE_OUTPUT)/hostapp + +# +# Clean all generated files +# +.PHONY: clean +clean: _postclean + $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) + +.PHONY: doclean +doclean: + $(Q)rm -rf $(HOSTAPP) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \ + $(CMDS-all) $(INSTALL-FILES-all) .$(HOSTAPP).cmd + + +include $(RTE_SDK)/mk/internal/rte.compile-post.mk +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.build-post.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.hostlib.mk b/mk/rte.hostlib.mk new file mode 100644 index 0000000000..fcaade1fe7 --- /dev/null +++ b/mk/rte.hostlib.mk @@ -0,0 +1,118 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# tell rte.compile-pre.mk to use HOSTCC instead of CC +USE_HOST := 1 +include $(RTE_SDK)/mk/internal/rte.compile-pre.mk +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.build-pre.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk + +# VPATH contains at least SRCDIR +VPATH += $(SRCDIR) + +_BUILD = $(HOSTLIB) +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/hostlib/$(HOSTLIB) +_CLEAN = doclean + +.PHONY: all +all: install + +.PHONY: install +install: build _postinstall + +_postinstall: build + +.PHONY: build +build: _postbuild + +exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1)))) + +O_TO_A = $(AR) crus $(HOSTLIB) $(OBJS-y) +O_TO_A_STR = $(subst ','\'',$(O_TO_A)) #'# fix syntax highlight +O_TO_A_DISP = $(if $(V),"$(O_TO_A_STR)"," HOSTAR $(@)") +O_TO_A_CMD = "cmd_$@ = $(O_TO_A_STR)" +O_TO_A_DO = @set -e; \ + echo $(O_TO_A_DISP); \ + $(O_TO_A) && \ + echo $(O_TO_A_CMD) > $(call exe2cmd,$(@)) + +-include .$(HOSTLIB).cmd + +# +# Archive objects in .a file if needed +# +$(HOSTLIB): $(OBJS-y) FORCE + @[ -d $(dir $@) ] || mkdir -p $(dir $@) + $(if $(D),\ + @echo -n "$@ -> $< " ; \ + echo -n "file_missing=$(call boolean,$(file_missing)) " ; \ + echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_A_STR))) " ; \ + echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \ + echo "depfile_newer=$(call boolean,$(depfile_newer)) ") + $(if $(or \ + $(file_missing),\ + $(call cmdline_changed,$(O_TO_A_STR)),\ + $(depfile_missing),\ + $(depfile_newer)),\ + $(O_TO_A_DO)) + +# +# install lib in $(RTE_OUTPUT)/hostlib +# +$(RTE_OUTPUT)/hostlib/$(HOSTLIB): $(HOSTLIB) + @echo " INSTALL-HOSTLIB $(HOSTLIB)" + @[ -d $(RTE_OUTPUT)/hostlib ] || mkdir -p $(RTE_OUTPUT)/hostlib + $(Q)cp -f $(HOSTLIB) $(RTE_OUTPUT)/hostlib + +# +# Clean all generated files +# +.PHONY: clean +clean: _postclean + +.PHONY: doclean +doclean: + $(Q)rm -rf $(HOSTLIB) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \ + $(CMDS-all) $(INSTALL-FILES-all) + $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) + +include $(RTE_SDK)/mk/internal/rte.compile-post.mk +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.build-post.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.install.mk b/mk/rte.install.mk new file mode 100644 index 0000000000..9087aaf2f4 --- /dev/null +++ b/mk/rte.install.mk @@ -0,0 +1,60 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# install-only makefile (no build target) + +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk + +# VPATH contains at least SRCDIR +VPATH += $(SRCDIR) + +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) +_CLEAN = doclean + +.PHONY: all +all: _postinstall + @true + +.PHONY: clean +clean: _postclean + +.PHONY: doclean +doclean: + @rm -rf $(INSTALL-FILES-all) + @rm -f $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) + +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk new file mode 100644 index 0000000000..d3737fe763 --- /dev/null +++ b/mk/rte.lib.mk @@ -0,0 +1,116 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/internal/rte.compile-pre.mk +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.build-pre.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk + +# VPATH contains at least SRCDIR +VPATH += $(SRCDIR) + +_BUILD = $(LIB) +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/lib/$(LIB) +_CLEAN = doclean + +.PHONY: all +all: install + +.PHONY: install +install: build _postinstall + +_postinstall: build + +.PHONY: build +build: _postbuild + +exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1)))) + +O_TO_A = $(AR) crus $(LIB) $(OBJS-y) +O_TO_A_STR = $(subst ','\'',$(O_TO_A)) #'# fix syntax highlight +O_TO_A_DISP = $(if $(V),"$(O_TO_A_STR)"," AR $(@)") +O_TO_A_CMD = "cmd_$@ = $(O_TO_A_STR)" +O_TO_A_DO = @set -e; \ + echo $(O_TO_A_DISP); \ + $(O_TO_A) && \ + echo $(O_TO_A_CMD) > $(call exe2cmd,$(@)) + +-include .$(LIB).cmd + +# +# Archive objects in .a file if needed +# +$(LIB): $(OBJS-y) $(DEP_$(LIB)) FORCE + @[ -d $(dir $@) ] || mkdir -p $(dir $@) + $(if $(D),\ + @echo -n "$< -> $@ " ; \ + echo -n "file_missing=$(call boolean,$(file_missing)) " ; \ + echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_A_STR))) " ; \ + echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \ + echo "depfile_newer=$(call boolean,$(depfile_newer)) ") + $(if $(or \ + $(file_missing),\ + $(call cmdline_changed,$(O_TO_A_STR)),\ + $(depfile_missing),\ + $(depfile_newer)),\ + $(O_TO_A_DO)) + +# +# install lib in $(RTE_OUTPUT)/lib +# +$(RTE_OUTPUT)/lib/$(LIB): $(LIB) + @echo " INSTALL-LIB $(LIB)" + @[ -d $(RTE_OUTPUT)/lib ] || mkdir -p $(RTE_OUTPUT)/lib + $(Q)cp -f $(LIB) $(RTE_OUTPUT)/lib + +# +# Clean all generated files +# +.PHONY: clean +clean: _postclean + +.PHONY: doclean +doclean: + $(Q)rm -rf $(LIB) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \ + $(CMDS-all) $(INSTALL-FILES-all) + $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) + +include $(RTE_SDK)/mk/internal/rte.compile-post.mk +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.build-post.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.module.mk b/mk/rte.module.mk new file mode 100644 index 0000000000..3c95fae575 --- /dev/null +++ b/mk/rte.module.mk @@ -0,0 +1,117 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +##### if sourced from kernel Kbuild system +ifneq ($(KERNELRELEASE),) +override EXTRA_CFLAGS = $(MODULE_CFLAGS) $(EXTRA_KERNEL_CFLAGS) +obj-m += $(MODULE).o +ifneq ($(MODULE),$(notdir $(SRCS-y:%.c=%))) +$(MODULE)-objs += $(notdir $(SRCS-y:%.c=%.o)) +endif + +##### if launched from rte build system +else + +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.build-pre.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk + +# VPATH contains at least SRCDIR +VPATH += $(SRCDIR) + +_BUILD = $(MODULE).ko +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) \ + $(RTE_OUTPUT)/kmod/$(MODULE).ko +_CLEAN = doclean + +SRCS_LINKS = $(addsuffix _link,$(SRCS-y)) + +compare = $(strip $(subst $(1),,$(2)) $(subst $(2),,$(1))) + +.PHONY: all +all: install + +.PHONY: install +install: build _postinstall + +_postinstall: build + +.PHONY: build +build: _postbuild + +# Link all sources in build directory +%_link: FORCE + $(if $(call compare,$(notdir $*),$*),\ + @if [ ! -f $(notdir $(*)) ]; then ln -nfs $(*) . ; fi,\ + @if [ ! -f $(notdir $(*)) ]; then ln -nfs $(SRCDIR)/$(*) . ; fi) + +# build module +$(MODULE).ko: $(SRCS_LINKS) + @if [ ! -f $(notdir Makefile) ]; then ln -nfs $(SRCDIR)/Makefile . ; fi + @$(MAKE) -C $(RTE_KERNELDIR) M=$(CURDIR) O=$(RTE_KERNELDIR) + +# install module in $(RTE_OUTPUT)/kmod +$(RTE_OUTPUT)/kmod/$(MODULE).ko: $(MODULE).ko + @echo INSTALL-MODULE $(MODULE).ko + @[ -d $(RTE_OUTPUT)/kmod ] || mkdir -p $(RTE_OUTPUT)/kmod + @cp -f $(MODULE).ko $(RTE_OUTPUT)/kmod + +# install module +modules_install: + @$(MAKE) -C $(RTE_KERNELDIR) M=$(CURDIR) O=$(RTE_KERNELDIR) \ + modules_install + +.PHONY: clean +clean: _postclean + +# do a make clean and remove links +.PHONY: doclean +doclean: + @if [ ! -f $(notdir Makefile) ]; then ln -nfs $(SRCDIR)/Makefile . ; fi + $(Q)$(MAKE) -C $(RTE_KERNELDIR) M=$(CURDIR) O=$(RTE_KERNELDIR) clean + @$(foreach FILE,$(SRCS-y) $(SRCS-n) $(SRCS-),\ + if [ -h $(notdir $(FILE)) ]; then rm -f $(notdir $(FILE)) ; fi ;) + @if [ -h $(notdir Makefile) ]; then rm -f $(notdir Makefile) ; fi + @rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) \ + $(INSTALL-FILES-all) + +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.build-post.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk + +.PHONY: FORCE +FORCE: + +endif diff --git a/mk/rte.obj.mk b/mk/rte.obj.mk new file mode 100644 index 0000000000..6005b39f73 --- /dev/null +++ b/mk/rte.obj.mk @@ -0,0 +1,114 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/internal/rte.compile-pre.mk +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.build-pre.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk + +# VPATH contains at least SRCDIR +VPATH += $(SRCDIR) + +ifneq ($(OBJ),) +_BUILD = $(OBJ) +else +_BUILD = $(OBJS-y) +endif +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) +_CLEAN = doclean + +.PHONY: all +all: install + +.PHONY: install +install: build _postinstall + +_postinstall: build + +.PHONY: build +build: _postbuild + +ifneq ($(OBJ),) +exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1)))) + +O_TO_O = $(LD) -r -o $(OBJ) $(OBJS-y) +O_TO_O_STR = $(subst ','\'',$(O_TO_O)) #'# fix syntax highlight +O_TO_O_DISP = $(if $(V),"$(O_TO_O_STR)"," LD $(@)") +O_TO_O_CMD = "cmd_$@ = $(O_TO_O_STR)" +O_TO_O_DO = @set -e; \ + echo $(O_TO_O_DISP); \ + $(O_TO_O) && \ + echo $(O_TO_O_CMD) > $(call exe2cmd,$(@)) + +-include .$(OBJ).cmd + +# +# Archive objects in .a file if needed +# +$(OBJ): $(OBJS-y) FORCE + @[ -d $(dir $@) ] || mkdir -p $(dir $@) + $(if $(D),\ + @echo -n "$< -> $@ " ; \ + echo -n "file_missing=$(call boolean,$(file_missing)) " ; \ + echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_O_STR))) " ; \ + echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \ + echo "depfile_newer=$(call boolean,$(depfile_newer)) ") + $(if $(or \ + $(file_missing),\ + $(call cmdline_changed,$(O_TO_O_STR)),\ + $(depfile_missing),\ + $(depfile_newer)),\ + $(O_TO_O_DO)) +endif + +# +# Clean all generated files +# +.PHONY: clean +clean: _postclean + +.PHONY: doclean +doclean: + @rm -rf $(OBJ) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \ + $(CMDS-all) $(INSTALL-FILES-all) + @rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) + +include $(RTE_SDK)/mk/internal/rte.compile-post.mk +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.build-post.mk +include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.sdkbuild.mk b/mk/rte.sdkbuild.mk new file mode 100644 index 0000000000..0a56063c45 --- /dev/null +++ b/mk/rte.sdkbuild.mk @@ -0,0 +1,102 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# include rte.vars.mk if config file exists +# +ifeq (,$(wildcard $(RTE_OUTPUT)/.config)) + $(error "need a make config first") +else + include $(RTE_SDK)/mk/rte.vars.mk +endif + +# +# include .depdirs and define rules to order priorities between build +# of directories. +# +-include $(RTE_OUTPUT)/.depdirs + +define depdirs_rule +$(1): $(sort $(LOCAL_DEPDIRS-$(1))) +endef + +$(foreach d,$(ROOTDIRS-y),$(eval $(call depdirs_rule,$(d)))) + +# +# build and clean targets +# + +CLEANDIRS = $(addsuffix _clean,$(ROOTDIRS-y) $(ROOTDIRS-n) $(ROOTDIRS-)) + +.PHONY: build +build: $(ROOTDIRS-y) + @echo Build complete + +.PHONY: clean +clean: $(CLEANDIRS) + @rm -rf $(RTE_OUTPUT)/include $(RTE_OUTPUT)/app \ + $(RTE_OUTPUT)/hostapp $(RTE_OUTPUT)/lib \ + $(RTE_OUTPUT)/hostlib $(RTE_OUTPUT)/kmod + @[ -d $(RTE_OUTPUT)/include ] || mkdir -p $(RTE_OUTPUT)/include + @$(RTE_SDK)/scripts/gen-config-h.sh $(RTE_OUTPUT)/.config \ + > $(RTE_OUTPUT)/include/rte_config.h + $(Q)$(MAKE) -f $(RTE_SDK)/Makefile gcovclean + @echo Clean complete + +.SECONDEXPANSION: +.PHONY: $(ROOTDIRS-y) +$(ROOTDIRS-y): + @[ -d $(BUILDDIR)/$@ ] || mkdir -p $(BUILDDIR)/$@ + @echo "== Build $@" + $(Q)$(MAKE) S=$@ -f $(RTE_SRCDIR)/$@/Makefile -C $(BUILDDIR)/$@ all + +%_clean: + @echo "== Clean $*" + $(Q)if [ -f $(RTE_SRCDIR)/$*/Makefile -a -d $(BUILDDIR)/$* ]; then \ + $(MAKE) S=$* -f $(RTE_SRCDIR)/$*/Makefile -C $(BUILDDIR)/$* clean ; \ + fi + +RTE_MAKE_SUBTARGET ?= all + +%_sub: $(addsuffix _sub,$(FULL_DEPDIRS-$(*))) + @echo $(addsuffix _sub,$(FULL_DEPDIRS-$(*))) + @[ -d $(BUILDDIR)/$* ] || mkdir -p $(BUILDDIR)/$* + @echo "== Build $*" + $(Q)$(MAKE) S=$* -f $(RTE_SRCDIR)/$*/Makefile -C $(BUILDDIR)/$* \ + $(RTE_MAKE_SUBTARGET) + +.PHONY: all +all: build + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.sdkconfig.mk b/mk/rte.sdkconfig.mk new file mode 100644 index 0000000000..ed81c47471 --- /dev/null +++ b/mk/rte.sdkconfig.mk @@ -0,0 +1,109 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +INSTALL_CONFIGS := $(filter-out %~,\ + $(patsubst $(RTE_SRCDIR)/config/defconfig_%,%,\ + $(wildcard $(RTE_SRCDIR)/config/defconfig_*))) +INSTALL_TARGETS := $(addsuffix _install,$(INSTALL_CONFIGS)) + +.PHONY: config +ifeq ($(RTE_CONFIG_TEMPLATE),) +config: + @echo -n "No template specified. Use T=template " ; \ + echo "among the following list:" ; \ + for t in $(INSTALL_CONFIGS); do \ + echo " $$t" ; \ + done +else +config: $(RTE_OUTPUT)/include/rte_config.h $(RTE_OUTPUT)/Makefile + $(Q)$(MAKE) depdirs + @echo "Configuration done" +endif + +ifdef NODOTCONF +$(RTE_OUTPUT)/.config: ; +else +$(RTE_OUTPUT)/.config: $(RTE_CONFIG_TEMPLATE) FORCE + @[ -d $(RTE_OUTPUT) ] || mkdir -p $(RTE_OUTPUT) + $(Q)if [ "$(RTE_CONFIG_TEMPLATE)" != "" -a -f "$(RTE_CONFIG_TEMPLATE)" ]; then \ + if ! cmp -s $(RTE_CONFIG_TEMPLATE) $(RTE_OUTPUT)/.config; then \ + cp $(RTE_CONFIG_TEMPLATE) $(RTE_OUTPUT)/.config ; \ + fi ; \ + else \ + echo -n "No template specified. Use T=template " ; \ + echo "among the following list:" ; \ + for t in $(INSTALL_CONFIGS); do \ + echo " $$t" ; \ + done ; \ + fi +endif + +# generate a Makefile for this build directory +# use a relative path so it will continue to work even if we move the directory +SDK_RELPATH=$(shell $(RTE_SDK)/scripts/relpath.sh $(abspath $(RTE_SRCDIR)) \ + $(abspath $(RTE_OUTPUT))) +OUTPUT_RELPATH=$(shell $(RTE_SDK)/scripts/relpath.sh $(abspath $(RTE_OUTPUT)) \ + $(abspath $(RTE_SRCDIR))) +$(RTE_OUTPUT)/Makefile: + @[ -d $(RTE_OUTPUT) ] || mkdir -p $(RTE_OUTPUT) + $(Q)$(RTE_SDK)/scripts/gen-build-mk.sh $(SDK_RELPATH) $(OUTPUT_RELPATH) \ + > $(RTE_OUTPUT)/Makefile + +# clean installed files, and generate a new config header file +# if NODOTCONF variable is defined, don't try to rebuild .config +$(RTE_OUTPUT)/include/rte_config.h: $(RTE_OUTPUT)/.config + $(Q)rm -rf $(RTE_OUTPUT)/include $(RTE_OUTPUT)/app \ + $(RTE_OUTPUT)/hostapp $(RTE_OUTPUT)/lib \ + $(RTE_OUTPUT)/hostlib + @[ -d $(RTE_OUTPUT)/include ] || mkdir -p $(RTE_OUTPUT)/include + $(Q)$(RTE_SDK)/scripts/gen-config-h.sh $(RTE_OUTPUT)/.config \ + > $(RTE_OUTPUT)/include/rte_config.h + +# generate the rte_config.h +.PHONY: headerconfig +headerconfig: $(RTE_OUTPUT)/include/rte_config.h + @true + +# check that .config is present, and if yes, check that rte_config.h +# is up to date +.PHONY: checkconfig +checkconfig: + @if [ ! -f $(RTE_OUTPUT)/.config ]; then \ + echo "No .config in build directory"; \ + exit 1; \ + fi + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk \ + headerconfig NODOTCONF=1 + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.sdkdepdirs.mk b/mk/rte.sdkdepdirs.mk new file mode 100644 index 0000000000..bfda0b37ba --- /dev/null +++ b/mk/rte.sdkdepdirs.mk @@ -0,0 +1,65 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq (,$(wildcard $(RTE_OUTPUT)/.config)) + $(error "need a make config first") +endif +ifeq (,$(wildcard $(RTE_OUTPUT)/Makefile)) + $(error "need a make config first") +endif + +# use a "for" in a shell to process dependencies: we don't want this +# task to be run in parallel. +..PHONY: depdirs +depdirs: + @rm -f $(RTE_OUTPUT)/.depdirs ; \ + for d in $(ROOTDIRS-y); do \ + if [ -f $(RTE_SRCDIR)/$$d/Makefile ]; then \ + [ -d $(BUILDDIR)/$$d ] || mkdir -p $(BUILDDIR)/$$d ; \ + $(MAKE) S=$$d -f $(RTE_SRCDIR)/$$d/Makefile depdirs \ + >> $(RTE_OUTPUT)/.depdirs ; \ + fi ; \ + done + +.PHONY: depgraph +depgraph: + @echo "digraph unix {" ; \ + echo " size=\"6,6\";" ; \ + echo " node [color=lightblue2, style=filled];" ; \ + for d in $(ROOTDIRS-y); do \ + echo " \"root\" -> \"$$d\"" ; \ + if [ -f $(RTE_SRCDIR)/$$d/Makefile ]; then \ + $(MAKE) S=$$d -f $(RTE_SRCDIR)/$$d/Makefile depgraph ; \ + fi ; \ + done ; \ + echo "}" diff --git a/mk/rte.sdkdoc.mk b/mk/rte.sdkdoc.mk new file mode 100644 index 0000000000..8d7a296c8a --- /dev/null +++ b/mk/rte.sdkdoc.mk @@ -0,0 +1,73 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifdef O +ifeq ("$(origin O)", "command line") +$(error "Cannot use O= with doc target") +endif +endif + +ifdef T +ifeq ("$(origin T)", "command line") +$(error "Cannot use T= with doc target") +endif +endif + +.PHONY: doc +doc: + $(Q)$(MAKE) -C $(RTE_SDK)/doc/images $@ BASEDOCDIR=.. DOCDIR=images + $(Q)$(MAKE) -f $(RTE_SDK)/doc/rst/Makefile -C $(RTE_SDK)/doc/pdf pdfdoc BASEDOCDIR=.. DOCDIR=rst + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk doxydoc + +.PHONY: pdfdoc +pdfdoc: + $(Q)$(MAKE) -C $(RTE_SDK)/doc/images $@ BASEDOCDIR=.. DOCDIR=images + $(Q)$(MAKE) -f $(RTE_SDK)/doc/rst/Makefile -C $(RTE_SDK)/doc/pdf $@ BASEDOCDIR=.. DOCDIR=rst + +.PHONY: doxydoc +doxydoc: + $(Q)$(MAKE) -C $(RTE_SDK)/doc/images $@ BASEDOCDIR=.. DOCDIR=images + $(Q)mkdir -p $(RTE_SDK)/doc/latex + $(Q)mkdir -p $(RTE_SDK)/doc/pdf/api + $(Q)cat $(RTE_SDK)/doc/gen/doxygen_pdf/Doxyfile | doxygen - + $(Q)mv $(RTE_SDK)/doc/images/*.pdf $(RTE_SDK)/doc/latex/ + $(Q)sed -i s/darkgray/headercolour/g $(RTE_SDK)/doc/latex/doxygen.sty + $(Q)cp $(RTE_SDK)/doc/gen/doxygen_pdf/Makefile_doxygen $(RTE_SDK)/doc/latex/Makefile + $(Q)$(MAKE) -C $(RTE_SDK)/doc/latex + $(Q)cp $(RTE_SDK)/doc/latex/refman.pdf $(RTE_SDK)/doc/pdf/api/api.pdf + +.PHONY: docclean +docclean: + $(Q)$(MAKE) -C $(RTE_SDK)/doc/images $@ BASEDOCDIR=.. DOCDIR=images + $(Q)$(MAKE) -f $(RTE_SDK)/doc/rst/Makefile -C $(RTE_SDK)/doc/pdf $@ BASEDOCDIR=.. DOCDIR=rst + $(Q)rm -rf $(RTE_SDK)/doc/pdf/api $(RTE_SDK)/doc/latex diff --git a/mk/rte.sdkgcov.mk b/mk/rte.sdkgcov.mk new file mode 100644 index 0000000000..7ad1e74cd3 --- /dev/null +++ b/mk/rte.sdkgcov.mk @@ -0,0 +1,69 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifdef T +ifeq ("$(origin T)", "command line") +$(error "Cannot use T= with gcov target") +endif +endif + +ifeq (,$(wildcard $(RTE_OUTPUT)/.config)) + $(error "need a make config first") +else + include $(RTE_SDK)/mk/rte.vars.mk +endif +ifeq (,$(wildcard $(RTE_OUTPUT)/Makefile)) + $(error "need a make config first") +endif + +INPUTDIR = $(RTE_OUTPUT) +OUTPUTDIR = $(RTE_OUTPUT)/gcov + +.PHONY: gcovclean +gcovclean: + $(Q)find $(INPUTDIR)/build -name "*.gcno" -o -name "*.gcda" -exec rm {} \; + $(Q)rm -rf $(OUTPUTDIR) + +.PHONY: gcov +gcov: + $(Q)for APP in test ; do \ + mkdir -p $(OUTPUTDIR)/$$APP ; cd $(OUTPUTDIR)/$$APP ; \ + for FIC in `strings $(RTE_OUTPUT)/app/$$APP | grep gcda | sed s,gcda,o,` ; do \ + SUBDIR=`basename $$FIC`;\ + mkdir $$SUBDIR ;\ + cd $$SUBDIR ;\ + $(GCOV) $(RTE_OUTPUT)/app/$$APP -o $$FIC > gcov.log; \ + cd - >/dev/null;\ + done ; \ + cd - >/dev/null; \ + done diff --git a/mk/rte.sdkinstall.mk b/mk/rte.sdkinstall.mk new file mode 100644 index 0000000000..59e34161bc --- /dev/null +++ b/mk/rte.sdkinstall.mk @@ -0,0 +1,76 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifdef O +ifeq ("$(origin O)", "command line") +$(error "Cannot use O= with install target") +endif +endif + +# Targets to install can be specified in command line. It can be a +# target name or a name containing jokers "*". Example: +# x86_64-default-*-gcc +ifndef T +T=* +endif + +# +# install: build sdk for all supported targets +# +INSTALL_CONFIGS := $(patsubst $(RTE_SRCDIR)/config/defconfig_%,%,\ + $(wildcard $(RTE_SRCDIR)/config/defconfig_$(T))) +INSTALL_TARGETS := $(addsuffix _install,\ + $(filter-out %~,$(INSTALL_CONFIGS))) + +.PHONY: install +install: $(INSTALL_TARGETS) + +%_install: + @echo ================== Installing $* + $(Q)$(MAKE) config T=$* O=$* + $(Q)$(MAKE) all O=$* + +# +# uninstall: remove all built sdk +# +UNINSTALL_TARGETS := $(addsuffix _uninstall,\ + $(filter-out %~,$(INSTALL_CONFIGS))) + +.PHONY: uninstall +uninstall: $(UNINSTALL_TARGETS) + +%_uninstall: + @echo ================== Uninstalling $* + $(Q)rm -rf $* + + diff --git a/mk/rte.sdkroot.mk b/mk/rte.sdkroot.mk new file mode 100644 index 0000000000..5b87b68c65 --- /dev/null +++ b/mk/rte.sdkroot.mk @@ -0,0 +1,158 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +MAKEFLAGS += --no-print-directory + +# define Q to '@' or not. $(Q) is used to prefix all shell commands to +# be executed silently. +Q=@ +ifdef V +ifeq ("$(origin V)", "command line") +Q= +endif +endif +export Q + +ifeq ($(RTE_SDK),) +$(error RTE_SDK is not defined) +endif + +RTE_SRCDIR = $(CURDIR) +export RTE_SRCDIR + +BUILDING_RTE_SDK := 1 +export BUILDING_RTE_SDK + +# +# We can specify the configuration template when doing the "make +# config". For instance: make config T=i686-default-baremetal-gcc +# +RTE_CONFIG_TEMPLATE := +ifdef T +ifeq ("$(origin T)", "command line") +RTE_CONFIG_TEMPLATE := $(RTE_SRCDIR)/config/defconfig_$(T) +endif +endif +export RTE_CONFIG_TEMPLATE + +# +# Default output is $(RTE_SRCDIR)/build +# output files wil go in a separate directory +# +ifdef O +ifeq ("$(origin O)", "command line") +RTE_OUTPUT := $(abspath $(O)) +endif +endif +RTE_OUTPUT ?= $(RTE_SRCDIR)/build +export RTE_OUTPUT + +# the directory where intermediate build files are stored, like *.o, +# *.d, *.cmd, ... +BUILDDIR = $(RTE_OUTPUT)/build +export BUILDDIR + +export ROOTDIRS-y ROOTDIRS- ROOTDIRS-n + +.PHONY: default +default: all + +.PHONY: config +config: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk config + +.PHONY: test +test: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktest.mk test + +.PHONY: fast_test ring_test mempool_test +fast_test ring_test mempool_test: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktest.mk $@ + +.PHONY: testall +testall: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktestall.mk testall + +.PHONY: testimport +testimport: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktestall.mk testimport + +.PHONY: install +install: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkinstall.mk install + +.PHONY: uninstall +uninstall: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkinstall.mk uninstall + +.PHONY: doc +doc: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk doc + +.PHONY: pdfdoc +pdfdoc: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk pdfdoc + +.PHONY: doxydoc +doxydoc: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk doxydoc + +.PHONY: docclean +docclean: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk docclean + +.PHONY: depdirs +depdirs: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdepdirs.mk depdirs + +.PHONY: depgraph +depgraph: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdepdirs.mk depgraph + +.PHONY: gcovclean +gcovclean: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkgcov.mk gcovclean + +.PHONY: gcov +gcov: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkgcov.mk gcov + +.PHONY: help +help: + @sed -e '1,/.*==================================.*/ d' \ + doc/rst/developers_reference/sdk_mkhelp.rst + +# all other build targets +%: + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk checkconfig + $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkbuild.mk $@ diff --git a/mk/rte.sdktest.mk b/mk/rte.sdktest.mk new file mode 100644 index 0000000000..22ccbe3ed5 --- /dev/null +++ b/mk/rte.sdktest.mk @@ -0,0 +1,66 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq (,$(wildcard $(RTE_OUTPUT)/.config)) + $(error "need a make config first") +else + include $(RTE_SDK)/mk/rte.vars.mk +endif +ifeq (,$(wildcard $(RTE_OUTPUT)/Makefile)) + $(error "need a make config first") +endif + +DATE := $(shell date '+%Y%m%d-%H%M') +AUTOTEST_DIR := $(RTE_OUTPUT)/autotest-$(DATE) + +DIR := $(shell basename $(RTE_OUTPUT)) + +# +# test: launch auto-tests, very simple for now. +# +PHONY: test fast_test + +fast_test: BLACKLIST=-Ring,Mempool +ring_test: WHITELIST=Ring +mempool_test: WHITELIST=Mempool +test fast_test ring_test mempool_test: + @mkdir -p $(AUTOTEST_DIR) ; \ + cd $(AUTOTEST_DIR) ; \ + if [ -f $(RTE_OUTPUT)/app/test ]; then \ + python $(RTE_SDK)/app/test/autotest.py \ + $(RTE_OUTPUT)/app/test \ + $(DIR) $(RTE_TARGET) \ + $(BLACKLIST) $(WHITELIST); \ + else \ + echo "No test found, please do a 'make build' first, or specify O=" ; \ + fi diff --git a/mk/rte.sdktestall.mk b/mk/rte.sdktestall.mk new file mode 100644 index 0000000000..10f10d202b --- /dev/null +++ b/mk/rte.sdktestall.mk @@ -0,0 +1,65 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifdef O +ifeq ("$(origin O)", "command line") +$(error "Cannot use O= with testall target") +endif +endif + +# Targets to test can be specified in command line. It can be a +# target name or a name containing jokers "*". Example: +# x86_64-default-*-gcc +ifndef T +T=* +endif + +# +# testall: launch test for all supported targets +# +TESTALL_CONFIGS := $(patsubst $(RTE_SRCDIR)/config/defconfig_%,%,\ + $(wildcard $(RTE_SRCDIR)/config/defconfig_$(T))) +TESTALL_TARGETS := $(addsuffix _testall,\ + $(filter-out %~,$(TESTALL_CONFIGS))) +.PHONY: testall +testall: $(TESTALL_TARGETS) + +%_testall: + @echo ================== Test $* + $(Q)$(MAKE) test O=$* + +# +# import autotests in documentation +# +testimport: + $(Q)$(RTE_SDK)/scripts/import_autotest.sh $(TESTALL_CONFIGS) diff --git a/mk/rte.subdir.mk b/mk/rte.subdir.mk new file mode 100644 index 0000000000..f0ae3fbbae --- /dev/null +++ b/mk/rte.subdir.mk @@ -0,0 +1,114 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# .mk to build subdirectories +# + +include $(RTE_SDK)/mk/internal/rte.install-pre.mk +include $(RTE_SDK)/mk/internal/rte.clean-pre.mk +include $(RTE_SDK)/mk/internal/rte.build-pre.mk + +CLEANDIRS = $(addsuffix _clean,$(DIRS-y) $(DIRS-n) $(DIRS-)) + +VPATH += $(SRCDIR) +_BUILD = $(DIRS-y) +_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) +_CLEAN = $(CLEANDIRS) + +.PHONY: all +all: install + +.PHONY: install +install: build _postinstall + +_postinstall: build + +.PHONY: build +build: _postbuild + +.SECONDEXPANSION: +.PHONY: $(DIRS-y) +$(DIRS-y): + @[ -d $(CURDIR)/$@ ] || mkdir -p $(CURDIR)/$@ + @echo "== Build $S/$@" + @$(MAKE) S=$S/$@ -f $(SRCDIR)/$@/Makefile -C $(CURDIR)/$@ all + +.PHONY: clean +clean: _postclean + +%_clean: + @echo "== Clean $S/$*" + @if [ -f $(SRCDIR)/$*/Makefile -a -d $(CURDIR)/$* ]; then \ + $(MAKE) S=$S/$* -f $(SRCDIR)/$*/Makefile -C $(CURDIR)/$* clean ; \ + fi + @rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) + +# +# include .depdirs and define rules to order priorities between build +# of directories. +# +include $(RTE_OUTPUT)/.depdirs + +define depdirs_rule +$(1): $(sort $(patsubst $(S)/%,%,$(LOCAL_DEPDIRS-$(S)/$(1)))) +endef + +$(foreach d,$(DIRS-y),$(eval $(call depdirs_rule,$(d)))) + + +# use a "for" in a shell to process dependencies: we don't want this +# task to be run in parallel. +.PHONY: depdirs +depdirs: + @for d in $(DIRS-y); do \ + if [ -f $(SRCDIR)/$$d/Makefile ]; then \ + $(MAKE) S=$S/$$d -f $(SRCDIR)/$$d/Makefile depdirs ; \ + fi ; \ + done + +.PHONY: depgraph +depgraph: + @for d in $(DIRS-y); do \ + echo " \"$(S)\" -> \"$(S)/$$d\"" ; \ + if [ -f $(SRCDIR)/$$d/Makefile ]; then \ + $(MAKE) S=$S/$$d -f $(SRCDIR)/$$d/Makefile depgraph ; \ + fi ; \ + done + +include $(RTE_SDK)/mk/internal/rte.install-post.mk +include $(RTE_SDK)/mk/internal/rte.clean-post.mk +include $(RTE_SDK)/mk/internal/rte.build-post.mk + +.PHONY: FORCE +FORCE: diff --git a/mk/rte.vars.mk b/mk/rte.vars.mk new file mode 100644 index 0000000000..c56ee4ee1b --- /dev/null +++ b/mk/rte.vars.mk @@ -0,0 +1,125 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# To be included at the beginning of all RTE user Makefiles. This +# .mk will define the RTE environment variables by including the +# config file of SDK. It also includes the config file from external +# application if any. +# + +ifeq ($(RTE_SDK),) +$(error RTE_SDK is not defined) +endif +ifeq ($(wildcard $(RTE_SDK)),) +$(error RTE_SDK variable points to an invalid location) +endif + +# define Q to '@' or not. $(Q) is used to prefix all shell commands to +# be executed silently. +Q=@ +ifdef V +ifeq ("$(origin V)", "command line") +Q= +endif +endif +export Q + +# if we are building SDK, only includes SDK configuration +ifneq ($(BUILDING_RTE_SDK),) + include $(RTE_OUTPUT)/.config + # remove double-quotes from config names + RTE_ARCH := $(CONFIG_RTE_ARCH:"%"=%) + RTE_MACHINE := $(CONFIG_RTE_MACHINE:"%"=%) + RTE_EXEC_ENV := $(CONFIG_RTE_EXEC_ENV:"%"=%) + RTE_TOOLCHAIN := $(CONFIG_RTE_TOOLCHAIN:"%"=%) + RTE_TARGET := $(RTE_ARCH)-$(RTE_MACHINE)-$(RTE_EXEC_ENV)-$(RTE_TOOLCHAIN) + RTE_SDK_BIN := $(RTE_OUTPUT) +endif + +# RTE_TARGET is deducted from config when we are building the SDK. +# Else, when building an external app, RTE_TARGET must be specified +# by the user. +ifeq ($(RTE_TARGET),) +$(error RTE_TARGET is not defined) +endif + +ifeq ($(BUILDING_RTE_SDK),) +# if we are building an external app/lib, include rte.extvars.mk that will +# define RTE_OUTPUT, RTE_SRCDIR, RTE_EXTMK, RTE_SDK_BIN, (etc ...) +include $(RTE_SDK)/mk/rte.extvars.mk +endif + +ifeq ($(RTE_ARCH),) +$(error RTE_ARCH is not defined) +endif + +ifeq ($(RTE_MACHINE),) +$(error RTE_MACHINE is not defined) +endif + +ifeq ($(RTE_EXEC_ENV),) +$(error RTE_EXEC_ENV is not defined) +endif + +ifeq ($(RTE_TOOLCHAIN),) +$(error RTE_TOOLCHAIN is not defined) +endif + +# can be overriden by make command line or exported environment variable +RTE_KERNELDIR ?= /lib/modules/$(shell uname -r)/build + +export RTE_TARGET +export RTE_ARCH +export RTE_MACHINE +export RTE_EXEC_ENV +export RTE_TOOLCHAIN + +# SRCDIR is the current source directory +ifdef S +SRCDIR := $(abspath $(RTE_SRCDIR)/$(S)) +else +SRCDIR := $(RTE_SRCDIR) +endif + +# helper: return y if option is set to y, else return an empty string +testopt = $(if $(strip $(subst y,,$(1)) $(subst $(1),,y)),,y) + +# helper: return an empty string if option is set, else return y +not = $(if $(strip $(subst y,,$(1)) $(subst $(1),,y)),,y) + +ifneq ($(wildcard $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.vars.mk),) +include $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.vars.mk +else +include $(RTE_SDK)/mk/target/generic/rte.vars.mk +endif diff --git a/mk/target/generic/rte.app.mk b/mk/target/generic/rte.app.mk new file mode 100644 index 0000000000..e1f3b66651 --- /dev/null +++ b/mk/target/generic/rte.app.mk @@ -0,0 +1,43 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# define Makefile targets that are specific to an environment. +# +include $(RTE_SDK)/mk/exec-env/$(RTE_EXEC_ENV)/rte.app.mk + +.PHONY: exec-env-appinstall +target-appinstall: exec-env-appinstall + +.PHONY: exec-env-appclean +target-appclean: exec-env-appclean diff --git a/mk/target/generic/rte.vars.mk b/mk/target/generic/rte.vars.mk new file mode 100644 index 0000000000..343c5a4b70 --- /dev/null +++ b/mk/target/generic/rte.vars.mk @@ -0,0 +1,150 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# This .mk is the generic target rte.var.mk ; it includes .mk for +# the specified machine, architecture, toolchain (compiler) and +# executive environment. +# + +# +# machine: +# +# - can define ARCH variable (overriden by cmdline value) +# - can define CROSS variable (overriden by cmdline value) +# - define MACHINE_CFLAGS variable (overriden by cmdline value) +# - define MACHINE_LDFLAGS variable (overriden by cmdline value) +# - define MACHINE_ASFLAGS variable (overriden by cmdline value) +# - can define CPU_CFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_LDFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# - can define CPU_ASFLAGS variable (overriden by cmdline value) that +# overrides the one defined in arch. +# +# examples for RTE_MACHINE: default, pc, bensley, tylesburg, ... +# +include $(RTE_SDK)/mk/machine/$(RTE_MACHINE)/rte.vars.mk + +# +# arch: +# +# - define ARCH variable (overriden by cmdline or by previous +# optional define in machine .mk) +# - define CROSS variable (overriden by cmdline or previous define +# in machine .mk) +# - define CPU_CFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - define CPU_LDFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - define CPU_ASFLAGS variable (overriden by cmdline or previous +# define in machine .mk) +# - may override any previously defined variable +# +# examples for RTE_ARCH: i686, x86_64 +# +include $(RTE_SDK)/mk/arch/$(RTE_ARCH)/rte.vars.mk + +# +# toolchain: +# +# - define CC, LD, AR, AS, ... +# - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value) +# - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value) +# - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value) +# - may override any previously defined variable +# +# examples for RTE_TOOLCHAIN: gcc, icc +# +include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.vars.mk + +# +# exec-env: +# +# - define EXECENV_CFLAGS variable (overriden by cmdline) +# - define EXECENV_LDFLAGS variable (overriden by cmdline) +# - define EXECENV_ASFLAGS variable (overriden by cmdline) +# - may override any previously defined variable +# +# examples for RTE_EXEC_ENV: linuxapp, baremetal +# +include $(RTE_SDK)/mk/exec-env/$(RTE_EXEC_ENV)/rte.vars.mk + +# Don't set CFLAGS/LDFLAGS flags for kernel module, all flags are +# provided by Kbuild framework. +ifeq ($(KERNELRELEASE),) + +# merge all CFLAGS +CFLAGS := $(CPU_CFLAGS) $(EXECENV_CFLAGS) $(TOOLCHAIN_CFLAGS) $(MACHINE_CFLAGS) +CFLAGS += $(TARGET_CFLAGS) + +# merge all LDFLAGS +LDFLAGS := $(CPU_LDFLAGS) $(EXECENV_LDFLAGS) $(TOOLCHAIN_LDFLAGS) $(MACHINE_LDFLAGS) +LDFLAGS += $(TARGET_LDFLAGS) + +# merge all ASFLAGS +ASFLAGS := $(CPU_ASFLAGS) $(EXECENV_ASFLAGS) $(TOOLCHAIN_ASFLAGS) $(MACHINE_ASFLAGS) +ASFLAGS += $(TARGET_ASFLAGS) + +# add default include and lib paths +CFLAGS += -I$(RTE_OUTPUT)/include +LDFLAGS += -L$(RTE_OUTPUT)/lib + +# always include rte_config.h: the one in $(RTE_OUTPUT)/include is +# the configuration of SDK when $(BUILDING_RTE_SDK) is true, or the +# configuration of the application if $(BUILDING_RTE_SDK) is not +# defined. +ifeq ($(BUILDING_RTE_SDK),1) +# building sdk +CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h +ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y) +CFLAGS += -include $(RTE_OUTPUT)/include/rte_warnings.h +endif +else +# if we are building an external application, include SDK's lib and +# includes too +CFLAGS += -I$(RTE_SDK_BIN)/include +ifneq ($(wildcard $(RTE_OUTPUT)/include/rte_config.h),) +CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h +endif +CFLAGS += -include $(RTE_SDK_BIN)/include/rte_config.h +ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y) +CFLAGS += -include $(RTE_SDK_BIN)/include/rte_warnings.h +endif +LDFLAGS += -L$(RTE_SDK_BIN)/lib +endif + +export CFLAGS +export LDFLAGS + +endif diff --git a/mk/toolchain/gcc/rte.toolchain-compat.mk b/mk/toolchain/gcc/rte.toolchain-compat.mk new file mode 100644 index 0000000000..4e65122867 --- /dev/null +++ b/mk/toolchain/gcc/rte.toolchain-compat.mk @@ -0,0 +1,93 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# CPUID-related options +# +# This was added to support compiler versions which might not support all the +# flags we need +# + +#find out GCC version + +GCC_MAJOR_VERSION = $(shell gcc -dumpversion | cut -f1 -d.) + +# if GCC is not 4.x +ifneq ($(GCC_MAJOR_VERSION),4) + MACHINE_CFLAGS = +$(warning You are not using GCC 4.x. This is neither supported, nor tested.) + + +else + GCC_MINOR_VERSION = $(shell gcc -dumpversion | cut -f2 -d.) + +# GCC graceful degradation +# GCC 4.2.x - added support for generic target +# GCC 4.3.x - added support for core2, ssse3, sse4.1, sse4.2 +# GCC 4.4.x - added support for avx, aes, pclmul +# GCC 4.5.x - added support for atom +# GCC 4.6.x - added support for corei7, corei7-avx +# GCC 4.7.x - added support for fsgsbase, rdrnd, f16c, core-avx-i, core-avx2 + + ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 7 && echo 1), 1) + MACHINE_CFLAGS := $(patsubst -march=core-avx-i,-march=corei7-avx,$(MACHINE_CFLAGS)) + MACHINE_CFLAGS := $(patsubst -march=core-avx2,-march=corei7-avx,$(MACHINE_CFLAGS)) + endif + ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 6 && echo 1), 1) + MACHINE_CFLAGS := $(patsubst -march=corei7-avx,-march=core2 -maes -mpclmul -mavx,$(MACHINE_CFLAGS)) + MACHINE_CFLAGS := $(patsubst -march=corei7,-march=core2 -maes -mpclmul,$(MACHINE_CFLAGS)) + endif + ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 5 && echo 1), 1) + MACHINE_CFLAGS := $(patsubst -march=atom,-march=core2 -mssse3,$(MACHINE_CFLAGS)) + endif + ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 4 && echo 1), 1) + MACHINE_CFLAGS := $(filter-out -mavx -mpclmul -maes,$(MACHINE_CFLAGS)) + endif + ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 3 && echo 1), 1) + MACHINE_CFLAGS := $(filter-out -msse% -mssse%,$(MACHINE_CFLAGS)) + MACHINE_CFLAGS := $(patsubst -march=core2,-march=generic,$(MACHINE_CFLAGS)) + MACHINE_CFLAGS += -msse3 + endif + ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 2 && echo 1), 1) + MACHINE_CFLAGS := $(filter-out -march% -mtune% -msse%,$(MACHINE_CFLAGS)) + endif +endif +MACHINE_CFLAGS += $(addprefix -DRTE_MACHINE_CPUFLAG_,$(CPUFLAGS)) + +# To strip whitespace +comma:= , +empty:= +space:= $(empty) $(empty) +CPUFLAGSTMP1 := $(addprefix RTE_CPUFLAG_,$(CPUFLAGS)) +CPUFLAGSTMP2 := $(subst $(space),$(comma),$(CPUFLAGSTMP1)) +MACHINE_CFLAGS += -DRTE_COMPILE_TIME_CPUFLAGS=$(CPUFLAGSTMP2) diff --git a/mk/toolchain/gcc/rte.vars.mk b/mk/toolchain/gcc/rte.vars.mk new file mode 100644 index 0000000000..d640515d95 --- /dev/null +++ b/mk/toolchain/gcc/rte.vars.mk @@ -0,0 +1,87 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# toolchain: +# +# - define CC, LD, AR, AS, ... (overriden by cmdline value) +# - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value) +# - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value) +# - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value) +# +# examples for RTE_TOOLCHAIN: gcc, icc +# + +CC = $(CROSS)gcc +CPP = $(CROSS)cpp +# for now, we don't use as but nasm. +# AS = $(CROSS)as +AS = nasm +AR = $(CROSS)ar +LD = $(CROSS)ld +OBJCOPY = $(CROSS)objcopy +OBJDUMP = $(CROSS)objdump +STRIP = $(CROSS)strip +READELF = $(CROSS)readelf +GCOV = $(CROSS)gcov + +HOSTCC = gcc +HOSTAS = as + +TOOLCHAIN_ASFLAGS = +TOOLCHAIN_CFLAGS = +TOOLCHAIN_LDFLAGS = + +ifeq ($(CONFIG_RTE_LIBRTE_GCOV),y) +TOOLCHAIN_CFLAGS += --coverage +TOOLCHAIN_LDFLAGS += --coverage +ifeq (,$(findstring -O0,$(EXTRA_CFLAGS))) + $(warning "EXTRA_CFLAGS doesn't contains -O0, coverage will be inaccurate with optimizations enabled") +endif +endif + +WERROR_FLAGS := -W -Wall -Werror -Wstrict-prototypes -Wmissing-prototypes +WERROR_FLAGS += -Wmissing-declarations -Wold-style-definition -Wpointer-arith +WERROR_FLAGS += -Wcast-align -Wnested-externs -Wcast-qual +WERROR_FLAGS += -Wformat-nonliteral -Wformat-security + +ifeq ($(CONFIG_RTE_EXEC_ENV),"linuxapp") +# These trigger warnings in newlib, so can't be used for baremetal +WERROR_FLAGS += -Wundef -Wwrite-strings +endif + +# process cpu flags +include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.toolchain-compat.mk + +export CC AS AR LD OBJCOPY OBJDUMP STRIP READELF +export TOOLCHAIN_CFLAGS TOOLCHAIN_LDFLAGS TOOLCHAIN_ASFLAGS diff --git a/mk/toolchain/icc/rte.toolchain-compat.mk b/mk/toolchain/icc/rte.toolchain-compat.mk new file mode 100644 index 0000000000..5540f86788 --- /dev/null +++ b/mk/toolchain/icc/rte.toolchain-compat.mk @@ -0,0 +1,82 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# CPUID-related options +# +# This was added to support compiler versions which might not support all the +# flags we need +# + +# find out ICC version + +ICC_MAJOR_VERSION = $(shell icc -dumpversion | cut -f1 -d.) + +ifneq ($(ICC_MAJOR_VERSION),12) + MACHINE_CFLAGS = -xSSE3 +$(warning You are not using ICC 12.x. This is neither supported, nor tested.) + +else +# proceed to adjust compiler flags + + ICC_MINOR_VERSION = $(shell icc -dumpversion | cut -f2 -d.) + +# replace GCC flags with ICC flags + ifeq ($(shell test $(ICC_MINOR_VERSION) -lt 2 && echo 1), 1) + # Atom + MACHINE_CFLAGS := $(patsubst -march=atom,-xSSSE3_ATOM -march=atom,$(MACHINE_CFLAGS)) + # nehalem/westmere + MACHINE_CFLAGS := $(patsubst -march=corei7,-xSSE4.2 -march=corei7,$(MACHINE_CFLAGS)) + # sandy bridge + MACHINE_CFLAGS := $(patsubst -march=corei7-avx,-xAVX,$(MACHINE_CFLAGS)) + # ivy bridge + MACHINE_CFLAGS := $(patsubst -march=core-avx-i,-xCORE-AVX-I,$(MACHINE_CFLAGS)) + # remove westmere flags + MACHINE_CFLAGS := $(filter-out -mpclmul -maes,$(MACHINE_CFLAGS)) + endif + ifeq ($(shell test $(ICC_MINOR_VERSION) -lt 1 && echo 1), 1) + # Atom + MACHINE_CFLAGS := $(patsubst -xSSSE3_ATOM,-xSSE3_ATOM,$(MACHINE_CFLAGS)) + # remove march options + MACHINE_CFLAGS := $(patsubst -march=%,-xSSE3,$(MACHINE_CFLAGS)) + endif +endif +MACHINE_CFLAGS += $(addprefix -DRTE_MACHINE_CPUFLAG_,$(CPUFLAGS)) + +# To strip whitespace +comma:= , +empty:= +space:= $(empty) $(empty) +CPUFLAGSTMP1 := $(addprefix RTE_CPUFLAG_,$(CPUFLAGS)) +CPUFLAGSTMP2 := $(subst $(space),$(comma),$(CPUFLAGSTMP1)) +MACHINE_CFLAGS += -DRTE_COMPILE_TIME_CPUFLAGS=$(CPUFLAGSTMP2) diff --git a/mk/toolchain/icc/rte.vars.mk b/mk/toolchain/icc/rte.vars.mk new file mode 100644 index 0000000000..5eca8ac1a9 --- /dev/null +++ b/mk/toolchain/icc/rte.vars.mk @@ -0,0 +1,98 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# toolchain: +# +# - define CC, LD, AR, AS, ... (overriden by cmdline value) +# - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value) +# - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value) +# - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value) +# +# examples for RTE_TOOLCHAIN: gcc, icc +# + +# Warning: we do not use CROSS environment variable as icc is mainly a +# x86->x86 compiler + +ifeq ($(KERNELRELEASE),) +CC = icc +else +CC = gcc +endif +CPP = cpp +AS = nasm +AR = ar +LD = ld +OBJCOPY = objcopy +OBJDUMP = objdump +STRIP = strip +READELF = readelf + +ifeq ($(KERNELRELEASE),) +HOSTCC = icc +else +HOSTCC = gcc +endif +HOSTAS = as + +TOOLCHAIN_CFLAGS = +TOOLCHAIN_LDFLAGS = +TOOLCHAIN_ASFLAGS = + +# Turn off some ICC warnings - +# Remark #271 : trailing comma is nonstandard +# Warning #1478 : function "" (declared at line N of "") +# was declared "deprecated" +ifeq ($(CONFIG_RTE_EXEC_ENV),"linuxapp") +WERROR_FLAGS := -Wall -Werror-all -w2 -diag-disable 271 -diag-warning 1478 +else + +# Turn off some ICC warnings - +# Remark #193 : zero used for undefined preprocessing identifier +# (needed for newlib) +# Remark #271 : trailing comma is nonstandard +# Remark #1292 : attribute "warning" ignored ((warning ("the use of +# `mktemp' is dangerous; use `mkstemp' instead")))); +# (needed for newlib) +# Warning #1478 : function "" (declared at line N of "") +# was declared "deprecated" +WERROR_FLAGS := -Wall -Werror-all -w2 -diag-disable 193,271,1292 \ + -diag-warning 1478 +endif + +# process cpu flags +include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.toolchain-compat.mk + +export CC AS AR LD OBJCOPY OBJDUMP STRIP READELF +export TOOLCHAIN_CFLAGS TOOLCHAIN_LDFLAGS TOOLCHAIN_ASFLAGS diff --git a/scripts/Makefile b/scripts/Makefile new file mode 100644 index 0000000000..8557253fcd --- /dev/null +++ b/scripts/Makefile @@ -0,0 +1,38 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +DIRS-y += testhost + +include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/scripts/depdirs-rule.sh b/scripts/depdirs-rule.sh new file mode 100755 index 0000000000..3b0ea56070 --- /dev/null +++ b/scripts/depdirs-rule.sh @@ -0,0 +1,97 @@ +#!/bin/sh + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# This (obscure) bash script finds the smallest different path between +# path1 and path2 given as command line argument. The given paths MUST +# be relative paths, the script is not designed to work with absolute +# paths. +# +# The script will then generate Makefile code that can be saved in a +# file and included in build system. +# +# For instance: +# depdirs-rule.sh a/b/c/d a/b/e/f +# Will print: +# FULL_DEPDIRS-a/b/c/d += a/b/e/f +# LOCAL_DEPDIRS-a/b/c += a/b/e +# +# The script returns 0 except if invalid arguments are given. +# + +if [ $# -ne 2 ]; then + echo "Bad arguments" + echo "Usage:" + echo " $0 path1 path2" + exit 1 +fi + +left1=${1%%/*} +right1=${1#*/} +prev_right1=$1 +prev_left1= + +left2=${2%%/*} +right2=${2#*/} +prev_right2=$2 +prev_left2= + +while [ "${right1}" != "" -a "${right2}" != "" ]; do + + if [ "$left1" != "$left2" ]; then + break + fi + + prev_left1=$left1 + left1=$left1/${right1%%/*} + prev_right1=$right1 + right1=${prev_right1#*/} + if [ "$right1" = "$prev_right1" ]; then + right1="" + fi + + prev_left2=$left2 + left2=$left2/${right2%%/*} + prev_right2=$right2 + right2=${prev_right2#*/} + if [ "$right2" = "$prev_right2" ]; then + right2="" + fi +done + +echo FULL_DEPDIRS-$1 += $2 +echo LOCAL_DEPDIRS-$left1 += $left2 + +exit 0 diff --git a/scripts/gen-build-mk.sh b/scripts/gen-build-mk.sh new file mode 100755 index 0000000000..d7732104c4 --- /dev/null +++ b/scripts/gen-build-mk.sh @@ -0,0 +1,55 @@ +#!/bin/sh + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# Auto-generate a Makefile in build directory +# Args: +# $1: path of project src root +# $2: path of build dir (can be relative to $1) + +echo "# Automatically generated by gen-build-mk.sh" +echo +echo "ifdef O" +echo "ifeq (\"\$(origin O)\", \"command line\")" +echo "\$(error \"Cannot specify O= as you are already in a build directory\")" +echo "endif" +echo "endif" +echo +echo "MAKEFLAGS += --no-print-directory" +echo +echo "all:" +echo " @\$(MAKE) -C $1 O=$2" +echo +echo "%::" +echo " @\$(MAKE) -C $1 O=$2 \$@" diff --git a/scripts/gen-config-h.sh b/scripts/gen-config-h.sh new file mode 100755 index 0000000000..4d15e6f561 --- /dev/null +++ b/scripts/gen-config-h.sh @@ -0,0 +1,41 @@ +#!/bin/sh + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +grep CONFIG_ $1 \ +| grep -v '^#' \ +| sed 's,CONFIG_\(.*\)=y.*$,#define \1 1,' \ +| sed 's,CONFIG_\(.*\)=n.*$,#undef \1,' \ +| sed 's,CONFIG_\(.*\)=\(.*\)$,#define \1 \2,' \ +| sed 's,\# CONFIG_\(.*\) is not set$,#undef \1,' diff --git a/scripts/import_autotest.sh b/scripts/import_autotest.sh new file mode 100755 index 0000000000..3b3767bd65 --- /dev/null +++ b/scripts/import_autotest.sh @@ -0,0 +1,87 @@ +#!/bin/sh + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# import autotests in documentation +# called by rte.sdktestall.mk from RTE_SDK root directory +# arguments are the list of targets +# + +echo "This will overwrite current autotest results in doc/rst/test_report" +echo "and in doc/images/ directories" +echo -n "Are you sure ? [y/N] >" +read ans +if [ "$ans" != "y" -a "$ans" != "Y" ]; then + echo "Aborted" + exit 0 +fi + +rm doc/images/autotests/Makefile + +for t in $*; do + echo -------- $t + rm -rf doc/rst/test_report/autotests/$t + + # no autotest dir, skip + if ! ls -d $t/autotest-*/*.rst 2> /dev/null > /dev/null; then + continue; + fi + + for f in $t/autotest*/*.rst; do + if [ ! -f $f ]; then + continue + fi + mkdir -p doc/rst/test_report/autotests/$t + cp $f doc/rst/test_report/autotests/$t + done + rm -rf doc/images/autotests/$t + for f in $t/autotest*/*.svg; do + if [ ! -f $f ]; then + continue + fi + mkdir -p doc/images/autotests/$t + cp $f doc/images/autotests/$t + echo "SVG += `basename $f`" >> doc/images/autotests/$t/Makefile + done + + if [ -f doc/images/autotests/$t/Makefile ]; then + echo >> doc/images/autotests/$t/Makefile + echo 'include $(RTE_SDK)/mk/rte.doc.mk' >> doc/images/autotests/$t/Makefile + fi + + echo "DIRS += $t" >> doc/images/autotests/Makefile +done + +echo 'include $(RTE_SDK)/mk/rte.doc.mk' >> doc/images/autotests/Makefile diff --git a/scripts/relpath.sh b/scripts/relpath.sh new file mode 100755 index 0000000000..9a3440bce1 --- /dev/null +++ b/scripts/relpath.sh @@ -0,0 +1,100 @@ +#!/bin/sh + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# print the relative path of $1 from $2 directory +# $1 and $2 MUST be absolute paths +# + +if [ $# -ne 2 ]; then + echo "Bad arguments" + echo "Usage:" + echo " $0 path1 path2" + exit 1 +fi + +REL1=${1#/} +REL2=${2#/} + +left1=${REL1%%/*} +right1=${REL1#*/} +prev_right1=$REL1 +prev_left1= + +left2=${REL2%%/*} +right2=${REL2#*/} +prev_right2=$REL2 +prev_left2= + +while [ "${right1}" != "" -a "${right2}" != "" ]; do + + if [ "$left1" != "$left2" ]; then + break + fi + + prev_left1=$left1 + left1=$left1/${right1%%/*} + prev_right1=$right1 + right1=${prev_right1#*/} + if [ "$right1" = "$prev_right1" ]; then + right1="" + fi + + prev_left2=$left2 + left2=$left2/${right2%%/*} + prev_right2=$right2 + right2=${prev_right2#*/} + if [ "$right2" = "$prev_right2" ]; then + right2="" + fi +done + +if [ "${left1}" != "${left2}" ]; then + right2=${prev_right2} + right1=${prev_right1} +fi + +while [ "${right2}" != "" ]; do + prefix=${prefix}../ + prev_right2=$right2 + right2=${right2#*/} + if [ "$right2" = "$prev_right2" ]; then + right2="" + fi +done + +echo ${prefix}${right1} + +exit 0 diff --git a/scripts/test-framework.sh b/scripts/test-framework.sh new file mode 100755 index 0000000000..56cb457aee --- /dev/null +++ b/scripts/test-framework.sh @@ -0,0 +1,133 @@ +#!/bin/sh + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# script to check that dependancies are working in the framework +# must be executed from root + +# do a first build +make config T=x86_64-default-linuxapp-gcc O=deptest +make -j8 O=deptest + +MOD_APP_TEST1=`stat deptest/app/test | grep Modify` +MOD_APP_TEST_MEMPOOL1=`stat deptest/build/app/test/test_mempool.o | grep Modify` +MOD_LIB_MEMPOOL1=`stat deptest/lib/librte_mempool.a | grep Modify` +MOD_LIB_MBUF1=`stat deptest/lib/librte_mbuf.a | grep Modify` + +echo "----- touch mempool.c, and check that deps are updated" +sleep 1 +touch lib/librte_mempool/rte_mempool.c +make -j8 O=deptest + +MOD_APP_TEST2=`stat deptest/app/test | grep Modify` +MOD_APP_TEST_MEMPOOL2=`stat deptest/build/app/test/test_mempool.o | grep Modify` +MOD_LIB_MEMPOOL2=`stat deptest/lib/librte_mempool.a | grep Modify` +MOD_LIB_MBUF2=`stat deptest/lib/librte_mbuf.a | grep Modify` + +if [ "${MOD_APP_TEST1}" = "${MOD_APP_TEST2}" ]; then + echo ${MOD_APP_TEST1} / ${MOD_APP_TEST2} + echo "Bad deps on deptest/app/test" + exit 1 +fi +if [ "${MOD_APP_TEST_MEMPOOL1}" != "${MOD_APP_TEST_MEMPOOL2}" ]; then + echo "Bad deps on deptest/build/app/test/test_mempool.o" + exit 1 +fi +if [ "${MOD_LIB_MEMPOOL1}" = "${MOD_LIB_MEMPOOL2}" ]; then + echo "Bad deps on deptest/lib/librte_mempool.a" + exit 1 +fi +if [ "${MOD_LIB_MBUF1}" != "${MOD_LIB_MBUF2}" ]; then + echo "Bad deps on deptest/lib/librte_mbuf.a" + exit 1 +fi + +echo "----- touch mempool.h, and check that deps are updated" +sleep 1 +touch lib/librte_mempool/rte_mempool.h +make -j8 O=deptest + +MOD_APP_TEST3=`stat deptest/app/test | grep Modify` +MOD_APP_TEST_MEMPOOL3=`stat deptest/build/app/test/test_mempool.o | grep Modify` +MOD_LIB_MEMPOOL3=`stat deptest/lib/librte_mempool.a | grep Modify` +MOD_LIB_MBUF3=`stat deptest/lib/librte_mbuf.a | grep Modify` + +if [ "${MOD_APP_TEST2}" = "${MOD_APP_TEST3}" ]; then + echo "Bad deps on deptest/app/test" + exit 1 +fi +if [ "${MOD_APP_TEST_MEMPOOL2}" = "${MOD_APP_TEST_MEMPOOL3}" ]; then + echo "Bad deps on deptest/build/app/test/test_mempool.o" + exit 1 +fi +if [ "${MOD_LIB_MEMPOOL2}" = "${MOD_LIB_MEMPOOL3}" ]; then + echo "Bad deps on deptest/lib/librte_mempool.a" + exit 1 +fi +if [ "${MOD_LIB_MBUF2}" = "${MOD_LIB_MBUF3}" ]; then + echo "Bad deps on deptest/lib/librte_mbuf.a" + exit 1 +fi + + +echo "----- change mempool.c's CFLAGS, and check that deps are updated" +sleep 1 +make -j8 O=deptest CFLAGS_rte_mempool.o="-DDUMMY_TEST" + +MOD_APP_TEST4=`stat deptest/app/test | grep Modify` +MOD_APP_TEST_MEMPOOL4=`stat deptest/build/app/test/test_mempool.o | grep Modify` +MOD_LIB_MEMPOOL4=`stat deptest/lib/librte_mempool.a | grep Modify` +MOD_LIB_MBUF4=`stat deptest/lib/librte_mbuf.a | grep Modify` + +if [ "${MOD_APP_TEST3}" = "${MOD_APP_TEST4}" ]; then + echo "Bad deps on deptest/app/test" + exit 1 +fi +if [ "${MOD_APP_TEST_MEMPOOL3}" != "${MOD_APP_TEST_MEMPOOL4}" ]; then + echo "Bad deps on deptest/build/app/test/test_mempool.o" + exit 1 +fi +if [ "${MOD_LIB_MEMPOOL3}" = "${MOD_LIB_MEMPOOL4}" ]; then + echo "Bad deps on deptest/lib/librte_mempool.a" + exit 1 +fi +if [ "${MOD_LIB_MBUF3}" != "${MOD_LIB_MBUF4}" ]; then + echo "Bad deps on deptest/lib/librte_mbuf.a" + exit 1 +fi + + +echo "----- Deps check ok" +rm -rf deptest +exit 0 diff --git a/scripts/testhost/Makefile b/scripts/testhost/Makefile new file mode 100644 index 0000000000..c2ac583c33 --- /dev/null +++ b/scripts/testhost/Makefile @@ -0,0 +1,50 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +HOSTAPP = testhost + +HOST_CFLAGS += -I$(SRCDIR) + +# HOST_LDFLAGS += + +# +# all source are stored in SRCS-y +# +SRCS-y := testhost.c + +include $(RTE_SDK)/mk/rte.hostapp.mk diff --git a/scripts/testhost/testhost.c b/scripts/testhost/testhost.c new file mode 100644 index 0000000000..bf268226b2 --- /dev/null +++ b/scripts/testhost/testhost.c @@ -0,0 +1,57 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include + +struct toto { + int x; + int y; +}; + +int main(int argc, char **argv) +{ + struct toto t[] = { + { .x = 1, .y = 2 }, + { .x = 1, .y = 2 }, + { .x = 1, .y = 2 }, + { .x = 1, .y = 2 }, + }; + + struct toto u[4]; + + printf("%zu %zu\n", sizeof(t), sizeof(u)); + + return 0; +} diff --git a/tools/setup.sh b/tools/setup.sh new file mode 100755 index 0000000000..3726528f60 --- /dev/null +++ b/tools/setup.sh @@ -0,0 +1,420 @@ +#! /bin/bash + +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +# +# Run with "source /path/to/setup.sh" +# + +# +# Change to DPDK directory ( /.. ), and export it as RTE_SDK +# +cd $(dirname ${BASH_SOURCE[0]})/.. +export RTE_SDK=$PWD +echo "------------------------------------------------------------------------------" +echo " RTE_SDK exported as $RTE_SDK" +echo "------------------------------------------------------------------------------" + +# +# Application EAL parameters for setting memory options (amount/channels/ranks). +# +EAL_PARAMS='-n 4' + +# +# Sets QUIT variable so script will finish. +# +quit() +{ + QUIT=$1 +} + +# +# Sets up envronment variables for ICC. +# +setup_icc() +{ + DEFAULT_PATH=/opt/intel/bin/iccvars.sh + param=$1 + shpath=`which iccvars.sh 2> /dev/null` + if [ $? -eq 0 ] ; then + echo "Loading iccvars.sh from $shpath for $param" + source $shpath $param + elif [ -f $DEFAULT_PATH ] ; then + echo "Loading iccvars.sh from $DEFAULT_PATH for $param" + source $DEFAULT_PATH $param + else + echo "## ERROR: cannot find 'iccvars.sh' script to set up ICC." + echo "## To fix, please add the directory that contains" + echo "## iccvars.sh to your 'PATH' environment variable." + quit + fi +} + +# +# Sets RTE_TARGET and does a "make install". +# +setup_target() +{ + option=$1 + export RTE_TARGET=${TARGETS[option]} + + compiler=${RTE_TARGET##*-} + if [ "$compiler" == "icc" ] ; then + platform=${RTE_TARGET%%-*} + if [ "$platform" == "x86_64" ] ; then + setup_icc intel64 + else + setup_icc ia32 + fi + fi + if [ "$QUIT" == "0" ] ; then + make install T=${RTE_TARGET} + fi + echo "------------------------------------------------------------------------------" + echo " RTE_TARGET exported as $RTE_TARGET" + echo "------------------------------------------------------------------------------" +} + +# +# Uninstall all targets. +# +uninstall_targets() +{ + make uninstall +} + +# +# Creates hugepage filesystem. +# +create_mnt_huge() +{ + echo "Creating /mnt/huge and mounting as hugetlbfs" + sudo mkdir -p /mnt/huge + + grep -s '/mnt/huge' /proc/mounts > /dev/null + if [ $? -ne 0 ] ; then + sudo mount -t hugetlbfs nodev /mnt/huge + fi +} + +# +# Removes hugepage filesystem. +# +remove_mnt_huge() +{ + echo "Unmounting /mnt/huge and removing directory" + grep -s '/mnt/huge' /proc/mounts > /dev/null + if [ $? -eq 0 ] ; then + sudo umount /mnt/huge + fi + + if [ -d /mnt/huge ] ; then + sudo rm -R /mnt/huge + fi +} + +# +# Unloads igb_uio.ko. +# +remove_igb_uio_module() +{ + echo "Unloading any existing DPDK UIO module" + /sbin/lsmod | grep -s igb_uio > /dev/null + if [ $? -eq 0 ] ; then + sudo /sbin/rmmod igb_uio + fi +} + +# +# Loads new igb_uio.ko (and uio module if needed). +# +load_igb_uio_module() +{ + if [ ! -f $RTE_SDK/$RTE_TARGET/kmod/igb_uio.ko ];then + echo "## ERROR: Target does not have the DPDK UIO Kernel Module." + echo " To fix, please try to rebuild target." + return + fi + + remove_igb_uio_module + + /sbin/lsmod | grep -s uio > /dev/null + if [ $? -ne 0 ] ; then + if [ -f /lib/modules/$(uname -r)/kernel/drivers/uio/uio.ko ] ; then + echo "Loading uio module" + sudo /sbin/modprobe uio + fi + fi + + # UIO may be compiled into kernel, so it may not be an error if it can't + # be loaded. + + echo "Loading DPDK UIO module" + sudo /sbin/insmod $RTE_SDK/$RTE_TARGET/kmod/igb_uio.ko + if [ $? -ne 0 ] ; then + echo "## ERROR: Could not load kmod/igb_uio.ko." + quit + fi +} + +# +# Removes all reserved hugepages. +# +clear_huge_pages() +{ + echo > .echo_tmp + for d in /sys/devices/system/node/node? ; do + echo "echo 0 > $d/hugepages/hugepages-2048kB/nr_hugepages" >> .echo_tmp + done + echo "Removing currently reserved hugepages" + sudo sh .echo_tmp + rm -f .echo_tmp + + remove_mnt_huge +} + +# +# Creates hugepages. +# +set_non_numa_pages() +{ + clear_huge_pages + + echo "" + echo " Input the number of 2MB pages" + echo " Example: to have 128MB of hugepages available, enter '64' to" + echo " reserve 64 * 2MB pages" + echo -n "Number of pages: " + read Pages + + echo "echo $Pages > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" > .echo_tmp + + echo "Reserving hugepages" + sudo sh .echo_tmp + rm -f .echo_tmp + + create_mnt_huge +} + +# +# Creates hugepages on specific NUMA nodes. +# +set_numa_pages() +{ + clear_huge_pages + + echo "" + echo " Input the number of 2MB pages for each node" + echo " Example: to have 128MB of hugepages available per node," + echo " enter '64' to reserve 64 * 2MB pages on each node" + + echo > .echo_tmp + for d in /sys/devices/system/node/node? ; do + node=$(basename $d) + echo -n "Number of pages for $node: " + read Pages + echo "echo $Pages > $d/hugepages/hugepages-2048kB/nr_hugepages" >> .echo_tmp + done + echo "Reserving hugepages" + sudo sh .echo_tmp + rm -f .echo_tmp + + create_mnt_huge +} + +# +# Run unit test application. +# +run_test_app() +{ + echo "" + echo " Enter hex bitmask of cores to execute test app on" + echo " Example: to execute app on cores 0 to 7, enter 0xff" + echo -n "bitmask: " + read Bitmask + echo "Launching app" + sudo ${RTE_TARGET}/app/test -c $Bitmask $EAL_PARAMS +} + +# +# Run unit testpmd application. +# +run_testpmd_app() +{ + echo "" + echo " Enter hex bitmask of cores to execute testpmd app on" + echo " Example: to execute app on cores 0 to 7, enter 0xff" + echo -n "bitmask: " + read Bitmask + echo "Launching app" + sudo ${RTE_TARGET}/app/testpmd -c $Bitmask $EAL_PARAMS -- -i +} + +# +# Print hugepage information. +# +grep_meminfo() +{ + grep -i huge /proc/meminfo +} + +# +# List all hugepage file references +# +ls_mnt_huge() +{ + ls -lh /mnt/huge +} + +# +# Options for building a target. Note that this step MUST be first as it sets +# up TARGETS[] starting from 1, and this is accessed in setup_target using the +# user entered option. +# +step1_func() +{ + TITLE="Select the DPDK environment to build" + CONFIG_NUM=1 + for cfg in config/defconfig_* ; do + cfg=${cfg/config\/defconfig_/} + TEXT[$CONFIG_NUM]="$cfg" + TARGETS[$CONFIG_NUM]=$cfg + FUNC[$CONFIG_NUM]="setup_target" + let "CONFIG_NUM+=1" + done +} + +# +# Options for setting up environment. +# +step2_func() +{ + TITLE="Setup linuxapp environment" + + TEXT[1]="Insert IGB UIO module" + FUNC[1]="load_igb_uio_module" + + TEXT[2]="Setup hugepage mappings for non-NUMA systems" + FUNC[2]="set_non_numa_pages" + + TEXT[3]="Setup hugepage mappings for NUMA systems" + FUNC[3]="set_numa_pages" +} + +# +# Options for running applications. +# +step3_func() +{ + TITLE="Run test application for linuxapp environment" + + TEXT[1]="Run test application (\$RTE_TARGET/app/test)" + FUNC[1]="run_test_app" + + TEXT[2]="Run testpmd application in interactive mode (\$RTE_TARGET/app/testpmd)" + FUNC[2]="run_testpmd_app" +} + +# +# Other options +# +step4_func() +{ + TITLE="Other tools" + + TEXT[1]="List hugepage info from /proc/meminfo" + FUNC[1]="grep_meminfo" + + TEXT[2]="List hugepage files in /mnt/huge" + FUNC[2]="ls_mnt_huge" +} + +# +# Options for cleaning up the system +# +step5_func() +{ + TITLE="Uninstall and system cleanup" + + TEXT[1]="Uninstall all targets" + FUNC[1]="uninstall_targets" + + TEXT[2]="Remove IGB UIO module" + FUNC[2]="remove_igb_uio_module" + + TEXT[3]="Remove hugepage mappings" + FUNC[3]="clear_huge_pages" +} + +STEPS[1]="step1_func" +STEPS[2]="step2_func" +STEPS[3]="step3_func" +STEPS[4]="step4_func" +STEPS[5]="step5_func" + +QUIT=0 + +while [ "$QUIT" == "0" ]; do + OPTION_NUM=1 + + for s in $(seq ${#STEPS[@]}) ; do + ${STEPS[s]} + + echo "----------------------------------------------------------" + echo " Step $s: ${TITLE}" + echo "----------------------------------------------------------" + + for i in $(seq ${#TEXT[@]}) ; do + echo "[$OPTION_NUM] ${TEXT[i]}" + OPTIONS[$OPTION_NUM]=${FUNC[i]} + let "OPTION_NUM+=1" + done + + # Clear TEXT and FUNC arrays before next step + unset TEXT + unset FUNC + + echo "" + done + + echo "[$OPTION_NUM] Exit Script" + OPTIONS[$OPTION_NUM]="quit" + echo "" + echo -n "Option: " + read our_entry + echo "" + ${OPTIONS[our_entry]} ${our_entry} + echo + echo -n "Press enter to continue ..."; read +done -- 2.20.1

    bVa9X#!}lgulJlWE-66b4Ca5(mvFyDn2q@iu2dFtrSx<~-hIP3YUm-JeF~$TBcZqpmCd~Js z&orh?#8$tU<)ig1xdof=HI;p=Xc#%8`Z3gT<|`&dZaOe1u7X%br7_*l?ulVvXLz61MUmZ=0mr5ake4 zWD~}70S>OBr@cX0kbfm{>|@pYf)fGA%eFI+^h`Zxx#1Zo&1Sk3`BQTK_X*uS4u09u zGz~XQJo}a{(@$VbS6LUG*4C{YmHRI7Ld%nbjD`fh9q9ehLCpwz^8%a%1_jDXs>WvfO2p5e z-@_m2IZhXmd?P~0g3Op7(9S;`(-w*NFbhO8RCf6}aa#_Q8?!zSq+XQW))V1W_*zTS z6j4VslZKBoSmu}M0lu2}PesWaJSNy)}{L_sFMiGvC zlG=y;W1(q3Ql+~2pH(a^POM!mG#{sYWFd7usNcGDqfCX`gB#Nlk+Hl$CB#4Cu>W#& zMBOmgx!-^t0zAGj#UyEj69{IUF-nEZpO@2ob7@$H)f1{WuULO!*v{^_hbPkT%mCA` zY+4dDh(Qu#9*XQY%=lwtlU%zoVM5q_q!BTsUSefOJ2f*j;Voxz>*5Bx8Rg3HB&&Xz zEqtJKOT%G5Y)$XW6&_&CCqO3iJey44pwB>~aW0%WJdO~vSzr#EP6w;*PhQS(aiJu+ zk|*UL36iA|0%YP>52L3A#o)|OQ^ma<)IyH!r5K3gw8zj*TbdMHBPR9rXP}xJz`VpL zpvf*O^FN~`;a@KwnC_VdyZ(|&DxJY*VgfvS;3r9kcV9!CATP@uP^BpU>F7hxZS=6| zI_eCReFl1=|4UF$VJdVyUsre?^Bt95pHwGu5Q`Q*1FgbGWBy30N1zN3bJbyP5EqZN zxDL8ST}pKHU%g+86B8xgZ@=47JyzrVQH}pk^iA_8|zwlZcy=f z9-bcP|3c7u0gI|v55$$Q!7N81`Y2i3#npjX)%`HK8xnM3mO|QV4gM74O_@s<=(0{y zdp22%J%ttK?|TG>P7zfv7`jq?UNjz?_jtC1BC7`uc;zsBEX#V3pUH z79ih_X|Z%nl^`dwz!@w}x(6?ZrBZU6?I#lCs?SZg2yPns2M(ZORR@ArLm^e^=ZAvY zbrsKL*!e*Yo#&RirL?{)lC_5pbW)B$Y?Sl7ncBR5am}+uYSUNpnXIWk1Ot@(F!;@oB`Ckz(1#M2ZGm~nlFg!4Lb7lT-DBSk+Z z_49`YQo27$+kf5A0N2bt$=uCr*Y2%UojNulw36g0VvpoahpMmyd;I~G){QG=A08eYT zVE-Xj5i$sgQe>-u#KVu%E%vDeH4V4QtT?U@qi|aIl*m%_0vHgoS`GaB@sCf~jyzx0 zWE;YvrAaA0{!6SmkFC91dp>P>H!g#F^tS;(Vok)S;WEJ|xG?;)r&0nqT3qBf7lh`8dOE)qE)SOcZ7$Nd}fcv96y4G%0W(=p1 z5s#C**@gR+TjvEPQ?0d+!U%f2GDFsEXXKR~EvolOaoI!iLv@9Pzk3neyA0GM$+s13 zCaKL{Q4qk|DeatCm(>*F0Wb4wBPe@hJ4<++DLcFYy4F4j`|A$@#{7F0JU5Q|)1-L07G(t}dYUDj|&3vBhiu5E40ES7R9M?S0hK)zBLEti0e}1%s8P^cH$r0UiHSAqrXzb16;<-fZs4=?z*FGU z6e7k)C=ILZko@@`0U6s?*FaU_zy*7FCg9Bpaa4Oe=sW|#^h1|Ip61rvH&E@+bOB7D=%8?~@M5H<}gxHR;Z? zkKJoYyKW8ar&$T$6Fhy$NlH?V;@S?4wk!>CC}o}uw?H|0U`}7~tQaKFWtjK5#gH&< zGGjNUHs1w+RbJ>@ujxVdHDU-3QV(+y!~eh7k&ySD%_QPP^4R&QM&+WTI&aFrC)>M{ z9`tQQV9Qlch>xC=KFnFg2W1HDylT;^ zu2r5sM}zX364nl!aU99o0hfK;Ks>c2kCflHM0*@)hj~!E?Qbk8B47vLJCx+3&?1$W zGjqwhUk7(w)&0X$)L+{{z@ue|EV(?cKNm%^qPb7Ats#ta zyoSZujk1~&V<_vSB=8_cioneXY35ryNS(KQ?&#jJSQN7k=^>8 z10vaCykA#!Q7l!F*fDLVv!xPE{L>zc@z=E-NMnIn}lo==M$oH{~ zaz00;w(aA5A4rNRnD=E~+=;5A;w&xttEak0c><0)gjvRl=;A)1%>lJR5JID=CNub; z%5z2igo5|Wcf=F2=<4K_FQy@nHMkK{&(t!8<%LIwU7bPHlFPV)V%wsTk7b^fl>&+9 z@ul!npJUaF4(c`6IlA52`V5~Zar=h66BIn0gdt`&&&((Zv3R_{W+(Ivv>V|G{+u&KfYu+cJgQ-- zfbR>!b5x_WAtyA*Ar`wTSki&piEz>g_Oh{|haNW66i6I+k4eJ0pi{t4{gN!}fbHIV zztqxIEq9_igt(rc0@>o27)DT>wypSx+4^#Jdt=67QyfueX)*{dxy0ili&fRq;N6cZ zTlgyTv>r_{GvhoPv#t&InDMqLW>l7ZszkmuT7g9)agi@3(3__uhYUGp|4me{IZE#K zC295FGMFHnt=etaC?Y$Ro|y>sOPzkP6$a&b$&Eyn$LJo%1^r(_;-;cgr#~j8e81>B z$9QMA+0PiZIC`TJDQ`mi1#DaL>alBKL`ztm9OY& z#V_aQ_8c5qd6IX8eN~rr-u#uYXh%%p1t*El;Q91I5|OhIX6<1Q0(T1HCs@}rkk@JY zt<#bd221TScn9(E83>Q*4Adca1izRnb^7THl(ek`KTVtQ-)1Qk#f}{2V>trzlQ7q2 zR-=s7da(XB2Qo$cm+Y~HaH-N0z)i5sv&o(5TgM`|weR;VG7DxiAmsj*q5KtSh#XZCTeF^}Gsy^?vQ$>4LCXS4t27kF{$ z*}6(B1NbM9amgbWjG>bWeUa8qhdFP;HZ*{=^hFgn@_SH%Uv2%L&<9ECXg0U>FBpX6 z9Nc&kMcV?vUDVW!8KSekE6j<&s&xT8^*`VIuOA^bdYR4gU*Gc;xGiiyTaW))*!%jCA1l4umR{} z{eQD>u2s+{TKXW@mx^1&Hv?@%U z>XJn}Zvga!)KdT#_^W{oLLn~}8ggy|i{QWA4x4E~&%9ADjzPh1N-!p%RgueQAn+mM zUyZFf%+}}J(M~inCC!GJ?+Vons2B>`2#m=-ZV(7 z_xAPKO;gcbl?`~-Mw3EFHm%7;k=SdT*VUVFw}wdd$d)@=R;#aB!&t!*`BOBNB>CzV zV~ydnB`5JHV}c!aO83D@Bo{RT$J|MLyMuC5FoQ<&>vzb;%_FvB8Py4`WzRl1kc`%! z^&7~}y?bBXa(#S>rVcHh`q7dtkiGKiFhPIs5A7dn$7BO^SMQ8se@myFN>scKy9OTPtrYKOrBbz;goT^hl z*BD!J=x3T-vDjd&Me)|%+Eh#0h&}@)LzdvhVsubeSZM&}-pneH$}8ls4yhPlck34S z$M@p*Tt9xD3*HM(S|&c8ISvd_j9M@A-uRo_$b@;3R~GtY2MzT1tV}_xoHXXXoPiXt z?cCUvmek=UufG>kF!gu4oY#7wHex!C;g;|XE^YfJ6p@%WqIuXO+ax~~-=NbLm;c4g z9IJ&Sk2M} zn8cCkroVxzIj0#QV+IOv?sZHyP)QdHiwRk zQ9Ii~&_AGA0MI4-hW;keo}RRUNT=*b8OB&^{VCQR_Z{rESb(2)mbnN#G8N;UF2uRqb&OTY zyGx9KOc5q^b8b;&jr*k5{hr!_Px0T%CnxAm&jHrL8>m~jLAT7@I0X6mx3 zP>bcT@WyFQYOclm@SJikz=N`!+D8Wje5_`FpbCR^0>Cu1!abh z6r6}Vl7grW(j*wZDe=jbGYTblDCIE^E*IbkkuVUgbN$#*^IPv+iN&GJ_%Zs`vAjqj zTYo{LY{rUnM!BNqvT6I|ZsN!Itf>`2lfwm3+zN!Cp90pBb%HQC#oK7dEoQ?(u-h zy!bs(%IWc(D=vIIVw&|KIH=5{FPihEna}&b6OS*o?Lr53a~{=;v0lFc`(7|upBnz0QsH^*8UtS=Wtke|{3&%B1w(Q`3Wlk3FYYW4#hsnnu2xEzi5iJtPALVV)n zvFa)us5Ah3X+506GV`~d6~OGD6K7I=un9@7OS`YwQkjo$ja~TODjj<)DlVRcJ1|)A zhm)53NcUBIatt=G2iygj%$XGJC<_*Kraz}m-b8mA0VFk;@Qc4Qef|cKE%U)2|DsB; zTn0dre{m& z`wa9NX{YU33UGD#PkUnkzh)nRu6P(5IaX8VJi9tEivUdiXTWnE{=anrXO2tGsy9gw zP4u_;H<>*Z?f79!%qybX0kReq^o6ZRzHQvYxHpJScRuCIzQUASTbl0Uy1oz zxm|fgj5DPro5t9L4YxVa%slQmN%qIw0qhG>xjIYhgi`6zv|pkx+%o{eb(h|X6N2>>cdB?1kj0vr*8qqkIL|`-!sVo& zM7t*2hSCDKPOmRkgaa3NwmW_^~B6S)f8%EVk@i{aCvg;$9!E9z{t&jKJED3 zVB}lC0$)x+sknBvvfjQfR^w}bbOq~1U03FQ5h*^C~m*paG<%i z?qIFer{ZR5g3sv|99eIkvvE*)2{mdRT{UrHcbe>9nvH197=Ba5YiKl&yoZ_uGPHYb zUh5*zOC%N*t7T;`7Up?sq91QihkqfhmwZ&dJ9165*DrZ+vy!H&Ol9tKht^)vXAWm3 z1@BPJ;V!q(ftu)Qpkm24IHW~wX;Xyx=BzBbF(LXsP@p z^wK8yn5-Vy1ZN;XI_YbAD^O$Poj(FKaveJZHFyH-$8gwi4`vTVd>W|p>qf_K^Ke-0 zNyp6Z+sQvJRga9-t|(L`E{JwXe{fH;7{X%#awfz7Q_keyC%!0rulx-3SKfpe03FyR zf0~7*($KYGcjO(i6kuDEFI_mk9xV@FGQ}*Z6YtsW>N3bMJNB^Q@?EfFdPn3mH>Q2{ zO}87qU-!dH-XwS5UA=i*oR-A#3Bo8sU1^2M5NudcWbOPwC{n2)XIq!Sb%(=4zJjml zqWG?1qq=x=V6l411^neH>++EC{uhQNpI#OCmxr($hgV(OE)=@hPxSVtytPyzbyV?idcyq0inOiP;8_>ZqdZvoND%4UZ9^yB=q zCeNhXi$@QClp4I+3U8kb(FtwJJkZlk)St`N1$(U4u+ds(GrE+ z+z<7L=bT$R}L6T4qWH;%9=8-_%es%CMJHB%&w3}iw(!YOzItTfroN~hV4fRvbP}`c<^3W-VzN(sOX6%n8NdE+5-IJ35AHo zF*3^uc1yMu@ZHKj0>Ase>W1SIo_Ge-DAXgPRMbwq;^NcL6UV9&Y<$v6eo+;zO}^P# z_3MVtLT^J!K1LU=ufwOnuk=fc0pQ=`>x_BsvHoIY!+`oOvitsaVvCCZp%Y{?w3>5l z6ku|Q%-fU=ofhuN=>L*CFMAS03xZKn0LS&GBL4B0SoPy{&R=3WOvS47n1oPtD!?K7 zsfTKV7tVUxAM;C$gLE_}4EwDO(+4;>Kh*>u#v!Ay=CBQ(5GYWHYzusX@b?x}SCyVw z#2xzc`15Q(MG(pP{q(`~_oBUf8h?6bg~y$u66_sBKLcdoyZAdO`J4E;qW&-n!@ae3 z|8Y{->lAG^1dz83UR^c*z)oDGhUeS9mBOx&(Yc(AOf$Bks|u0%WF3dcwXdUFJ4Y`f zJnfhAb&95HLH6E-keSqG_Xw+;DLn}^jfRz~we8jUw}&h$qg|OInZv_qs9_vk1-XEY zqq=MUqO8v(?bjKL^kC;?wYEC3kFw22jJJc^sS@e6Ljo)0*xOo-lamj%s-f77BZAp$|&+TNZrV)Kgi=@Cs~PDu>8KJ_Hr=wOxjFfw$+D4VU~(U1Qg*Oeoa*_ew_V>Q=LJt-F$ki0X+g9BrzWWiotMTc~=8izR^dFZ`Qs ze&aWP2A6)1n3o_Q$`BoQa6VrbU1RpqBKqMMpSdQTCYyHd(#dpopU&4!2|>Es+9T6h zxFfS-HD#1inPp^*nPuLmGc*6Zn3g{2prwnmV&txQOw?)Dr?K+bCK>g%OaSHxxtO^y ze^*fWgMD)cnPB}>80TR^QWXg%({vNhe#Sp^R7lpXY!KgKq=~?-l>r1 zm%(ZTRd6>y z6jiyKbFDnzf$aEp>o9(OKIqYP(<+s9m)BGDnR6dszuR_|lR0VbKQiE7rF6MIjeFy+ zV)04B)@@m{@A+Xu?P}vFezmBWK|PvCnmaH@B0c>NA!4dcoQ=v~4Dv^1A8r7}EM@E} z#?0zghHi&`*`B@SQW@&1V+Jw&_BhmY(@bWOc6!%Lcn^OJud<~lkJs80%5kRzNuZw& zBF(fi57msmt2?nmWZH-}jlLCp|Eeol=gMv^EKrk7fkY*~Mk7dW%2eo=vBM7ia&$S~ z3+7wAqSisxTCiAuw5V=tRIbC66{LV<+dNCj!BRsc#<*3mF&l?5(=yXFq(w8ly` z&(QMEuVV>z*&VxPjT(a1+07ivOCQd;Q-p2+{0ZU~amjRxYM)aKa>xX^#E#eb=`PX5)OfWy+eV5D<|MX?lJH*s(20$8K8SMX z$c%a~M(H5~FbWbp=V6w7Mw$lZ26hRV;VJpu;;lH{mVVq&ZW-?upz`8BRV1AkEA@u% z4gF--+@Vie%K2AGQvX3a1EOE=JTMVj(#`JrYBlRdjicjE_}#R;|E6Zt`>wzs>I($L zIvZ2>P6GB5*w<+a=WpC!z(lRSY{6&{zhb=|K_paLysR| zCz8l)%+D*qA7WJ>jAD}W2NRUg4e}l6X2E=BnF!mG>0W>WXdC9J@caH5CS#8|no*g5 zQ+Nys)OT(l(|2i`!i&cDEA4x*L^C{2H|~7lH2h0`P@fLUf1c*EMdjNwP`QKSFza)( zS=L7nKDdT#SRVUg?ixXivN=ms@)+`X$In2&+laXT*!%q6TqZwUNGX-LA zNIo5KIk8%r%Ze_Gr5E%Gq~?>XzxhSy-ZGdkp1GtaeZ$7PeO1=#wLvP1U_qC1$+a~$ z#jQwcI<1ZHi?0O3*RNxI!{_wbDBf%U-h03OI&d9gubQy{$=Y9AyGlN#qM@xZK5=`I zf{MMQ(mKB-#G5hW3awJX8f%1h5I>=yth8{%IkuN+pD{k2@Sb}!ww=Db6no{>fmHU3 zkCuoFQ>uw433DT_a};VO_)bDO$LUK<2)v*ut;6zAw~xbStr7I8?R31gGY&Pvl(jTT zuqJ8^YU9r|sS4!UywMNuO>MAGq9`_TrKiRfb2{78YhRWh0Do!$JL>$~)+H{2FVQ zT{sx~Q0e*OHTi(g=$mmKz`;|8CpfDFQQF5c<7jrh%T&o09&HuFq0RXRx2MeY2`ExM z;yf)pAs-7dh;)6NV>CcC3I!J2m4+AIBWOmb2O`~oVUQQwuB302tpR2Hr&eQoYuDFy zN4tto9>vWZu@4MIKMY+rm)8SB3qDbQ=hzX-tNTP1YX;O*q`w+V8nDKe_{8$lxTVM{ z$s$sU|ED4($o8-Gz?^&rdi#qyeO`kk4R90w5YNIvyMS%53f_hpx|60MqDN=&z8$Pr zRR5qrrgU3kE4^pxdyF~laqDo(UKc2J!L)--BtCC(Q12BoLTGd&{iQxFlz*NZ`#OyD zx2<&IZ+w;CQvbfKy$u(cAS9}*e98WTP#Du(vD`2PB^XPyp{qO zuolXn%c{X8B@j4(Z>mXcVJ~ZF0oB%+Mov}aO6VkM6|qC#E4Z-{5Gesk^}NuTQ;JF; zlhn3($u%Fq$UJ9bE^Ov-DE#ap!9;}vftmO4fj0KT9}Oh4!b1}6zkBq54x}}m3m*Vi zF|Jl(xX|7@1T|UJkl&v`UUUFHTV4-j!ZIMXvTI(rLGMGa|5V|bU-j)4Q#Fm` z=hp^{`b9%jsn(fU8Tl6tF`fB5rv6_+O8!{dUu?w3VrL-YUnCX>08h#+0epqPpT5Fh zBR;u9)VXG5VM}vSxLYgeVZr&vKm5|c6L>drjoydW+KaKI$*1Xh7b#5TI$KQ09N?WtES!J!&tXmsRo-35ShB)&G*yw+7Nb z3i`+0=jE9ss@N&N^P7kCBOPrlF0x5y8jG!gY z>G|JZI5L%w8FbTkkMmOc?$kVGR;a$F6vdcc9poP#Np0>^av_2`b*$e$*D!Ihf|4Ql zi-9RuoBeYXILgei@LIdUaLXB$ zUMh&W+kk?*>L_8F4*djH*n(qcR%lSQ=>D+;Z1{{DvYvz4^E>)!faGu-b~$BvB-e}Y{}psWrjvE?=#DPOr=Fk zOR=yue^?{oeFju9vjnL_x&N_XUJTqb^sA&f<5HENY}ot6B1 zxh@oZG-Edpum=|Xw@@BTTi{b~nvjI}HvuP7B_ldo zS{v!L^wXp{sbT9j$_LMPbnJ|7DzvLk3n>Br`Zu7)K;_vMEpt%wtI+FmH6Ay0f{-~g z9cnF+TqoP^YU<({jjKA_THp(xj`(ZFWv5y<)W7}p-6ylpNisf5*=cy7anO}B8X zWDse7=a+x>e^H5I;M7d2BLONa8aYVPvguvBFtagvJ=2$LqZpx=&+lmMJFZE(kfXy? zf_VHH&m!eV{4VAdL`tDggcCC96Fl&$B$2;tNd1*(I4vx@rL~YUV?^Vsh06tvmUlia z3|?_0P)6fdfX$6OcY&D!!~6Gu{Xboh6G;;@Grxy_xVEP`oepo9sbkN5X_Y_zonGx1 z!l3YeS(V|Y;7}oI0_AtJCgvjbBIdjK-_V7GO&69)8?l=$4X(cRSM(_`-!1>v?_VYO zs~_nC>uuSwnWpIZ8$QzR=2Md2y3ggVjry@wnt$dafi;-? z_p3ibziCOU{1}mbODOT|>sT4^J&OM4@3AHM3t=R|_kOGvQ4vDM+_U=}alu8J`~U{^ zuKB-zMa@95TkMI5DBxqV4VR~Yd{_QF-mdO3LnJy7)`Wm6RHgm@@jqvvv$6m1P5|zQ z|9U6DKffrCmz$gKUysr>o37K^2>DSJh1traD}tweN~3@VAn=7R+?$Gk@(BjZDzM*s z;>xAip2M;+iqJ;XD!mS^lT5Vhsb?@AGp;oie1cU8kCxV(4z=%LJg{SwU{y2En3D|< zQt*?FeD{hp#_v|#kOK0?6w8g**>mET@2+TOA6=rA-YIfDmb|d15|0( zt?*-dt>ewjEV!PxtJ_H4U46!IM_!p-`NmRp2RGO7R9#Z&)4A8)NX267+AS(X9SkLPv?8uQROv%z|zkWJ?h_ODA%)7(w?R$5RI>V zpVUH^@U-(Xv=9vrd=&GdsmbxhGKQyz%Y(vH@m;{>)1b1DtDY?%?P-T8p|`<&tkA^{zYti$ zWx@a_ohO&m;#?1^`X9VVmd4)kD{!EMy-PAtBsU3REK7V5B&ofFc>0`FLJ4t%B_PiX z9QJ+#V^*SJQo1ZRPLUnhS(e7dxSvX=5)!VF{y;-TG!fCtBycy_6rrm9lx~y_X7-~Y zsX4a2m@q@;ii%h9XnN~mhin!ur7uHg_n_@EI2!vV)`*B$?(!(N{*ix3@_}!@R6gM% zH@EhFv4U6#PHN0iO!K_orOF4aYBDZIJFe#R1{VVkK1phTZi2AjTaTQMU!RtJow;sW->ndNl%h+l7On+z@P2$@RK_*El~KEg2>49 zt>;>L{f=W$7zw$xTEp^F#b&x=Z?>)=jypW<#GGK~0gae3;F%oP-#%ljPWYVnj%X#W zJihnvbUzJYAN^IjzQ{#>Cx+utjxsn%>9W@o(Vg3@xU|?;%dax^((Nu)I!s*|?`~(q z1L0X;?6Or>d>4J&f}Jbv`q7;VT6#+35WCYW;o|a_&vvYq$w_-ci>@@v5*c;tNV#I{ zIq#|jgZQYsFERUiwO3fMu`Dqe=Uvu%9MM{S0};niUhh-$NwQfBf5FTQ&pN*5T3x=E zu8c2l6TQ1=Klf2gxZhWnfE1eck7aVqUWtZ0T_$c+tJ#ymEk=7voyH2OyzKP&4qj%@ zug6*8=lN!D6A4kXynr}c-OjvnyWuiQiLtQI!8L=J-N^nXNrZ#ajrFMa$OgPAoUIE2 zgU0aksiybgck-tgW-)BIjnutkmF9#J@$i&1rws+NOn>^9$4ZIpZP z8qdTyS++6$a?t4k6Q_zdwiMD)ESTj7^zHTdEc#{Hi7_0wt9-HGb7 zt8})#yCSCAw~-xk+**k)hTj%Venh;?r_-Jg^}M);Yw}P6I+`fnU)620G&RQb1Ty2m zv96~y_Fc5W?B?+<<6F7-W|wyxql~>A@3-|Nt-M=PY-yOPxa=eL)j9c%-h7|f3NSl+ z5HUV8RiHOD{le4E4SDg5i#lOa5$`jNIcQ&HQVHKp6n(5XSoFB4~Y z1;%_u)yd(f;R08@r05@SYrMaNg%;8lCdXQKTlAjBO}9vr`VvtbThL9bDKld0vTG#k zte&P-^BfIYo=gop)`XQ9JqUpKX+X@Hs>GQ{X11Ensxdv}qlq%N%B*L0iN@-@i&-r* z_=tiu2x^lHUn`Nnr|&m<5D3oQuqKVRqpc#Ci=VO{*yZG9|A>E|`NVp@JcUiP>xPdQ zhn&IHT87Cf_JFEcM&uY|z3o!z@{_Cw9fqU~N;}lO+HWc6>9&L(+4-EgS>o?ZHR$?F z&yaZU^m~5P6{xM6Kv~~Kb@w_r-ksj4bnf0J*GN-}&CqGtYh+hiqiA?k!t5t=r&o`8 z=&869kydJ%MUm+{eh=rFf@11y5;a8QpjzHYu>P0Vw2yDzFP9(qA|FRx$Hr9InA_Sw zd3#hS)jguCGiJX+mlt=zZUF4Yc#&R#G_cR&+9j`$Mq5n^SFSkKclg|1dtV`j54l~? znGsfzLO1LN-oMoIA^Za2ms6@S54)R3{QiZjWphnF?c3`-LYH%{htXKJ9F0PP`BSo2 zr5Z&Zhddu0awY54qg`jwQ@;UyP_24PFh*4j1C!?AG0S*1NsJl(HZU{MSY|XLtAN=K z8a=?7HWxGy{;D8N%OE;4Sr$-P;4nPpkH7QgzQv0yLE>gK*j$4A^qn=uJ0~)9v1U+q z+oB}dm@_)NIJZ)F%4T9ybe)AnSxHCETH9mE=}NX4+gX+M)>c!b)5Bi#3ulKKSJ#** zEkALwJ>r?e=s#uans_eb)6bo>z^OxM_OcIOWHa95VJlDkrT>S$w+xG;S@*qhcMIE*H^6=QApl)KN<_*(%~YnLSDiKC0@licJb(q_MQy%j0FLW+g0{oI zL3lxs5E~U|n|a>8E1wt4*QQ)r#ch|I-qXD!7!P*q62Xx`@pQl*I=h0AFo`iYx-b|v?umH#njhEO~{1kQRa^m1yeI00T(ohSZB-)%k-?;Fg~ z%LH3v3(oe>>1pLch-B*YXWQuREg={Kwh$%u&>wMMuUF(%U=6@)JuR2mOpHJ;?U)KCzZk<$}Q;$K%g&Nk~zNy-%b!J zt~Pfe^!8%zVatCwtbq4!VUo%9FrpL^OJTR_$`thEOMI&RY6O>R*gFv7X`vzc%+g+2S|l%A3h=m}h5L=60zoox;&J$XFXue0D!i7I z;7$1y&}Jnn?c?7t5Wb$PRY~7C@FLx$uzz%#3@eiN9EBG0V_b};JqPI=zD>b#?NX+6 zKz`s!;2DXE31imFzSIljU|(j_cOhL_LC7>7rH={NPyt`MjCUy{*f3!9a_emtrE`4V5x@xUKf1laP6qn_MRSXobL{JSy~D&bI4@5+!nGmQbn zS#qDD(R??(D$4qC9tW*lcZ6UVt6gL&_iblkjmW#P#D1!sY|-s7cVCXJLj5pjIo9mr z?ir0VCllU7*hfm?{lu|r>O3{;OdVTa9kjB&cHb>Lgb1<v{8$6KZsRha_(R0Tf z@+ulaB?(ii0*GvQAHICzO5mnPBAWUM6phsjV5;oQt5_>f{8amdhM^KvPSMa2WdNlX zU{{gT3BwGHfgY=!YB=cG7(Ds1c1b~+vpf_OCEnI)QQLbs**Y8mO+i?#)Jaff<0vdu zR;*fA`?1zF>D|6rnub#XUKmhI5ss3gSNDY$uC!(yCBe^W9MT$3o{ZT3T~A&v%tT8C0koT-6m1&Sy-oCGjkXjT0MiwfCw|jnkm1cd# z3-}{t@huGVzAYWJZ7w)eg@Cc!B7-pSJiyvEBl2$zG3_Fhx4$y4#eXbf3-ri9qCyPc3iHW+IuYLxmfaE z=op{OVXXJ@rj72L8Oqfrf&RBF#-;30MCU{n3~rzY9K#Ru9-02_4YH!=uM(;7Bg6K2 z{+kw4?9PqGct5l$DO$P4tSl|?+W8t{SDRK-vQO#`5OcP1qfXl^7JLN{Ei-N_y@lAq*><*zTi&3KuI?UzLh5d_-M@Acx8&yez$*=U7zCsA>l3SNop zQimAg_3{g|dZwqf`R(%taxf3D-0lKrEbjW!l~Gj_eN-9ChJo)ClC7b`y@5))D`t=cS48Wq4i+O;r#btiXXnFN~Q4 zHL9ndDmP;@FsH>IhOxk|JqvClkfNMq`Ei6rV+)P~$5K@pLDT;Bqn$LCWa(^Yw$o1o zowkBitV*2Yad4cGe2WsOkMxlk(9xe&Lg;5FBJV!#ZamrLI6FG+15VA88r;H$TS!*9 zP*g1@%|9YF)zQpNyv8sj;`_Wpt83rvO?KV;Dk~a87m{n0AmjG9h}%5>cAK14PC(wk zZB79>q^!^1{iZ8)d81Uau%s9Y-hLXMewxJs)g&g=jx0838U#Uc3gl0MxG(pz)x99IVMP}UC2xs#8W@>Hliq|kKdYE4(HA^_f_f}m)}UP)8nOvIu_tiW?hY-rmFWhbZb4CGCbF#= z81u3CxMbhyZd0ZB;=ydQWKAP<-urWFyLp_CCvGwLxVjn|M__C$Ykyr(agZ&(?N5_IbE z@xUZ!c{VI1_SQAn%4;L5>}b2`)h1mEySi2;_|}R?$A?KK++!3__ESTjF5cuAKxQ8L;pspeIfZ4zg)`qB#8ANNMC4Vtsc zEIPAao%kiaq0G2ko#eqRd#O@gySF#Tx;vUZ)S&V!PHv&*AOnz+V6P%Ry;e@4BrcFv z-PKO{c@-$xs6Ahbw4eog*vA1rfMc>pi(ozP=3!jH8Xw{9BK0CIi;4o7q8B_8_=GUe z{J!PPLByuG=h1m{2}kjsKoWm#CD6KuD)*Y-$2cR={}p-A!$dH!1vx@MXL=|o$M(?< zr^V%J7MSHN?hJGxR(;Z?>MYk^dyBRDVe4u7^$RMZ50P`m6{D83J5>0UWzs7hSlbP7 zS4J9VHHV5Lf~YZ~_N5m6#A5L`-DK>d>a^s`LG2vjV(yr-nc1efq9Fw!`#9ngw9hXX zh#!3KeRnuClZ+{TxxQL3I0H2d?ZqPw4qERtKf&U)?gam<2bP@wd|>$pV)_SS`UhhA z2V(jMV*1}uOaDXkA8_3t1^y`TM}a>I{88YK0{<5fQ=Wg$IsSVm2@YNk&VTD9k)s2m zIkaH=Zx{xRh({<$WP!mMBzu31FEv~3uQ#>{p`*Uln2HEC&7R2 z$u)R;=|Dg^o*(eTu6iKS;n%Zz<=~J{sgYq)6$!+QiQV39v*gE|k^jv*B1U26bA=}~ zqH34Ez|%cpLa)m3r~nK&Is0^*fdJoGrJt^Y^wsX(jhL_Bv+vc;cJ5vd!@k|9b}2b5 z5G%AH>fxPcsdFHO=gxwQ`{Lwat?VSbh4`fmLl9TUOUSGUUxIv_bDkaBDUZRl|C2q9 z`8s^m9aInd$hJTb6h@8)S!MbMfF$Lt_Ld~2sp6^z9!(h-TjF1y5tE^*PT(U|k|i39 z$fwCeSqt@i9AZb>76kBu;1GP~*>>xsz3?m$S9j;Y6Fhf$Z)CHKe})8`Jd7Rw+rLXXymPOE*(ux0@H z8c;=-P+kEML&L}oE*%$9kbkl98x?>R4428o)nLekY~OWKbi^3do6o^#YiYDNhA1%q8D72Jvf-L{)Ny!Fn{EMPkW;ir~(7K7^Kcsz=I_JOAJ)sBNrJxXUQn6o{#V;a36= ztE!sr5796_t;i=KpNu7{xt=XH$-XC(DCzD%kKX)bd~?Ma=Vo1o$0>p$Rln;5QBF=% zLZBI6kn44AmK5GgXARCQL(2$FiDs=cyS$^wApQmBWHq{F zQw|%QBw6CaB7~Pni+~}T3~5f+jT#l%rkZm=OH37NGPlR(k2kZL8jVy6dlsRmGqO-l z)1}7hJPVZ#G$8h-7cjo=;>)ZAR~gyjN~S8F`NhGiA*aSQTsERWngvm7q%IV4N&Vpp zaz>$!(ha#QK^j(vwZ6oX@dqg-)-j6=Zh?4b;9?1p0}f$|fRs@RDxE@;6LTx1^nb9=zN@4n!=aivmQRgKg>JY?c()N}S zX)P4FLuB0=6mD&j6~rCy{!;#Fze5v_rE0c;1s%}|U7|jCnMi}BPjxX#MHkrLJ3N#_ zgttB_?hJB;uv9v?I7qRsA+5Hm#11*8(5RKdur}70ZK$aRYnl71eFPmNULRSe`vf)dmpkVxvv!Hl zsJ%Og3FX1%k&q^^xgve({E$FHRd^qH1htFCl3F9L&POWGdgLGxbJhi8s8=NJt-3K` z$ivtPSK#X4K9j?YAm zAqb47>;&P&Qu2X3XuqGsl``DErWn#A<3i>6{8InZx!nW@-eo}dp(!v?% zRbl_`k-&r&xtFVWB&RHPyPTTbR0blU}u_Mv@#Ap%lpvzA*;4wiDrHfF;rmmyKSzx7S4b{w! z2u_mROq4v${18RgKU{_~B_TL?xUZ_>uCgaf~N*vs$!D4qug_%TA@IYB+U26 zR8uz-xHweKBCk>FulR8FLM7d)VmS5)!D*{xDg;KM^Q_txY6o4DQhxKzOeW+2?MNG= z6G9auA;LKTpyV4u_vUD9(xpV5m!L`adh ztrdnFS$0PM+&2x2pv#M8Lv}KoaAyuI#A69=%r|6Y>G8S`R`z|L&2GfJTCRSM#DNw& z^>Pb*(^cuIYfViQ(I_GF8Llv87x4W#SxL+9MCkm<&Vd!&fG6s6%__q!U9 zd`wUtUDY#$VzqYT^Ix6cK2oC;CNj*^@Akhr=JPk`^1a`$OQ4;wzy}cBbW-jZLMa44 zz0bUQlLOY^?@zPOXdB1So2)XK5C{7t;VM$zN2)hCpluf#yQRB)%27k{VDF_Dish-e zWMKgCJ(bV*!U_;S9wi)fX-QDEwyY#UAN zO6_{B+cw;Jsr)`Ie4@*2@~<7xZwI`gMo) zerO#V$ws4m94n%EKx4hzclwbaiC*qA)C8t_q6=Y(90Qs4l7)EnTR*|W;K9ft2}D}r zNij#()#M)=On>y6<<8dVdBROU916zlKme-&xw5|cZhfX%k-+;vkHT0HR0t+Xf@Bgr zC#}=w-<2h*T@t1ugac}nTD0c*=PK&aH;p@|_wc9kS|gj3w*3IjYA!ynwO-EM#Z~ZY zIlGSY2jG{tcqG`vf%~({nx0n{qeV(+KYJfAj}@Yyr*NrDddfi$+i^psH;o5%{=DX9 z7y%~kbOA0su!OA7lYq_@z0bWY!DBaH>sSo^fwMec>kNN9uY)H)&iGserL=T*`CRuC zzi!$3yZMuQ?!69dfGIeQ$J9Fx-^&KdatPWIUSQvA%{e6STq0lR2U8(SNuh>A=qh~! zo-u}7tuXsG6t(i}j_@2-DV6JP}-YBwDeQxj}ukEbtHdz*qFE9AW60D&kB`Abnknm{Y3 zUd9AbRxqhftRn>AW(1YfF4gj&Z#$ctOaN?&DDZgk4jA5pt#3Q_Zu;CAk48MfmVfGt z$S#mKhRZAr1XsJpz|MZsh&UXTrQT7Ze{v=%zO)DoTT z9pbh)w{btAebe06QU(n5r8lCg@y|PSx5}<4zEM_pe%o)XD zPDf@Qo%UMmk#_TM--u0RH5b{yu@n6G^4-QG`}wMRADfMk^_WS zHKANzi8Tg;YEem5%{!JEWNHtr;@6GT_aq@J0=`2NvYFeMZ5d@pv`vO(GbuV_+X>hF`_wMr~&4AD@R zVDS>FI@~S*EX5~2O^H?gJfo8A$`B&%;jg1`{jE~eLorFW9!mv`Pg0o``&Bu-Re~7F1M!& zXP~QP%@+KjP32(?I~%=5cCpUZ=Cr@kJmcI7E<0X*Njo(<1?VVl{`^jdWVv4J*XZXn ztAIV!5mJw}XVU{%MNVhkIe$z+SE~ts?l-wo==LG{ejAx=6QG~(x7kD)P#jbC)ha}H zpIUWpe|<)J3F+bGZdbAN@Z!wtBvI+s55hqCO|*(H++-l1X^ycX^prVsBpMqmlNpR4~XqaE)*C(wV4c7Kd^ ze~fm2jCOyFcK^XT_uqW|^Pu*R0)G_vqre{p{wVNAf&X)(9S7S#=N$i&(T?Zez1dvy zdBu6P4c#xlPDI0Cm??=I%s^kZvMW8;nR~r@QEtzeC#{GtwLim!ZKQsIBza?)XWx}a z#Mx}5dAZ9fknH!`p^#rk@AOcdW-g#4Gm4`)nq1=}8$JDX8~yz6FK=&g00#^Wb!w&R zQ7*wMB;j7jnlC(G1H2;=relVy$itzM*%8~9^$duzW^31!kFO_U^EQ5Mn#d7L7Wt!#m z%m^vwHg@+nUPAP72k>teQSmnjW!6^Qr8e%bMy*k<_yoS_M@z}>Y6G&GqPP;0nnQqn zxs$fgy$R|D>IL2iW)u0TdfurfcJ2i}i41|37WGG3ImI#H2Xj5E`5X7_-6kO>gvhCF zB$SaI43eZ~r+A0L4g7^tQ?+_y*`h0zK^laig5{ej)B=S-7;;!|QA}+A`pVW|+ehtxl!zjv>+peWQ#({QjZ5iPs>FF6C#)cQd~Xm_Yx| zL0HNoEI38^{s!%>zQIfHU(>}Zp^keWbtjxAa{@yOv>s(l#6ZS7nXt0Z*8CHB`8YJI z!w>@W8W+ETA~bTR4)(DHLtJF8Bbqy&@up@d@nlshnTcdsSU_Oe+Vxt(mPGs&gwx~tGU z<6VL`1j$;(vdRq89H#YZN>~o8D1F3+2USBD+jiKJu;K#Akt zR#=&FAgV2eDXz_UldNWHbS58Wk|WbFb&B7GCQ;K=!OdW@5A!Dp#e}GZ3%fj2o^zg* zRgR|=+3u{-xL6dA`D&!JIYj2|57DV!@riy}wp940JOAYAAfzWA$$_nKaJn=OVJ!H< zs_pr(GDuP<*6WNEl|gwg8Bqodl5xIOLSN?^+=dkRW6Y1rj@+%Z@IxtCoWw3mPp;Qc zrw0)zzrG}mq{9x_=;545DrBqnd&Ve8Ko6SN3V>{PbK2=&wvnc8yky>N7ho|=JfFhx z&sts7Ue50Jub=U+=kF$9fXb80Bamvrt12+R^D1pCjamu5(-t`@D)!z1sXc$>1BANX zK6o;v^zR2rUP&JfiweLD1U;B2Uy^+GuI+Pfo<1E`y95F;fG;^&OpCnf$LH*{ zC?td#l_(UcaBruj4OiW76k6nRA2T|#fB+wC6fY>|ra5j!dGsts+A271?X0mGTIq3) zN}iGG?xOi3E{U<7{`_K$s81#Edxwi)@WJuZTMbN2tXcYz;PipwyQsXeD7D=_jY*0@`?ut21`#!`E}zB8C#u^&<@MK&X^@qT(`Io9S7VF39ZjM%MUzy|rQ7@rjC$Tg6>Gm2{Yte*m^j>8tC$*! zjaDCQ9;)hv|94&JJv@OE7o}{%(qV&&-SejERS;jg!KANoJPQ>7+|Z2zybeLV<{V&n z%w+Q2v}^Gf0JJblJ&s#j(8OJd4AWzz0TDDT3U3Bcz?zm3Rq{c+6bonatMTbwPC%_* zZ2r&)_Kki<1D_*G;zKvIWV7BuG^u8tn71@!b6YG_NN_LhITW%&V?F1Vjg2tqFb6Ai*xy6D`-UF%FeNwAor~}xDrW;ir*o8~@#p?a z-<%NtgFRXHe!9q1|Na<*j6pRS$EU8g%hJdW43L|rCHwx(t-woT!=%LJd5(nXE}OMkwY4XFV^ zF#u~fJ>qrgCa>pK7=Iqc*qI~*WIdIttvdOZlq&n9`_=Yag-dLEfO?&SqpN`wyChB{ z>Gyqvw1iVXo;A8T>w}aN2jVkU$4lV0;ztqr@n|#!Ge4f*MftrNpVr-OOwBtf4w4xy zXfi}|eeYBNU&iub%ok(HrXhEbw?IM72Qon}Y9eRJuB1m>CH#VvUc&>(&#^d>X}RCM zbHEmzjh}6tBqN;7+6#ZNy2}g}T{tDt~ zAjH+SYOmCMyJyKi7;uk->7?V0m}_kki1Tv-!rFRAYfnsFo^*YFod3C22Q%BaQ;upKid| z&YvYj@?`xI@eI|L)bK-2;OtE~*zH<`U05(rb^*1r{z7Nhy{eIZ64R@v8Eel_BSDa4 zX*CFbZ_s0kc|L6_v)C0ue!))Pr}q^}1U<)$gDvc0JTo(g_B|^wVBAy2d7(!gg16i| zgD_sis#bMd=%|g1<_p?FkwW^Sfi^d>8PSnemF5HD_LUVk%EWBY1A2sNq5D@^J=JA2 z_THYy#Ia$b52PJDZs)yk1%tCm!`j|pI-BQnOZ}M@?Y(5jW)Zb;!y9obk$xn~r+f1Y zhI=>`kRHkPl1G3JAE7I;>bwglEfk>;wc$rjZ5LPm$)f~+nec5vGBkMTa<4P5>mL_< zup5!lUsp|!uvFUvmNy-0sZg{NB6Z4WTgI}o= ziSPwG2>(JdYKy8BXb^fEJ{NT;F=#+3aY4z({>9pp|FH`!o3wyj~>2*@S zGEL)_$n2=U!`Z`krSzJb@~^v}xhrZRRHjl%)E3U;7=d zXF)wgd>L&8A;8}Hn|pzsO4ZxR8;8u8@Og@JE3^3j9&v|H8cYZx${Vvvzg)WbQ2PVDISg z#r%saDKCq-gPnu3nxnC)IVlJGU(*v+mcJVTCB)Sw%uOB4%vog2tu3uwNjcc~SpGh1 zVdG_Iqf!v_L<9MFdhpNI&bjE#(pl8lT^h=~TYF$wYV zb8v9*^GZnkr_&`Q-hYCj!+{G!7(s%QgF&E!L!yJf_k#IV(}w+&R)zcxrPa0n=9 z7+5%X1Vr%vmm3F($cF5IYQ3 zQenLq1tqp>uL%dIbMUk{ELF1F<<^y{3l~mNLkP7*in^KPuVPp34>KCu=GTp(eY0*m zH&QgIC1Fiz{d2pwrOgBLdv}@^?&0ZWErSdD_dL>CmL3sb%Ug#Q4<69LARs`sh5B2a zU}2!S{#FGtHVi0G4V=g^q1l6yu>Pt+uj=$A2c^lDb8xb_8n&qm=M@#qUlo9*=K3`Q zszmVzGf53@SM%+Et-^m_1DbzU;T-^m1o^iz(ZPhl&N2S47K;Dp66lEznbolN1kEH} zq=%t&C9L1sI^mO-W>mlxo6^+u&6?8TEvIJ>Uopcq2@hc6CZ~tm^RcWIQ_~FPYO=X` z%qEv+)Ur;PZDXYNt!7uHgVoDM<-XN^*As{8IkLVjA7Cp>{`qV06h_=sRxWu(R&!}$ z>qY9Sz{4>^;}Z&aAS{rKM1DJstSPzD=35hIeQC`zIVTC!3trD0famSOwE|Rz;Hhzp@d#~HET*n6GaRCp4GE}Cf6s&=&b)lr}=*zA)32zYZ1o`w($vx3WGK};<>3NNLOyNH3!=g-Ftk7g-ONS zP>$+w9xA7_X7WA1yn`_{BG__ev~um8)tP4Nin|e?XahHHlPb#=x`yw=zj7-~r2DZ5 z3P($z9G7j&Gi78#U*E@tkFM*ULEtY&Sk3MW(M}9~x9|>^2d4^!0V6>Fo?l85tVhvN zHPG}LZR@_+1$i^Ovvnh(epE-248nm1W_Y#*&T+I|HNjgZwq&N3Fvk&wRhWmkIzrlB0yeUMVs z>BHN>8(-Sq2KI((;&TFjyPthVp00rb2a_T6O{5sW8pCubagQX}n2O&Kj{b6FKqkG z9s5m-ijACE^?3coJfP$23IlLnl5%hn?)2Y&Py5Bs)4gnXUV0A# z)48e0QaQ#?T(oGtRQNUYtf{NY>QOZMoTa@TGwEv{>k;WjS?#xCd=6;0pcbHn7Y&6g z0{7{%%euC*p9g;Myhe45{@z`!tsm!ty023{QBX5)MgZ2fOwq?D;Yu${nWX){F*LL$ z>gRXbCs@y*Uga=2;ZK>^a750nzbAvR91q`lF`?y8y@&%&Zd9Un$p-~N%=lqrFsmtt zD5dIt5aR)?Dc?aVHQJ4J^D5ik^)_)J2cHhBx-obbo zB1oR5o@zmaoPWvt??uQb&^+}#_3H70FiZgxARZ5i=`e^IW0+<5_fXGE7{qp;MA#a7 zb9u9^O&)$Bmd<~Eo%wsPGxFx}=BR}te3O5p55hq&T?@Q}E$)B)M@X!E6L1sU=JXB* zf>vjNNI*hENe6E|oZbHtv|R^egOafO5?&8p7xk%R5(XRy z@BbhC^Yyt&=9=uVjXl?0DH=)o#Q@`t4li|QdOt^Ew~xON@gGQxQnrinoTGlntzUK4 z)kzsT;Dx2=mudtk)oR?gSr~|tUU=*&-p2@g7OX1+YV$lYKdm;K_)(06HU1S@p=F;? zAG<2`05f$LOP`(OGklL2p$hx%)F|wB#iCuiy$Ugy*{`JQm!p=!C^Dn(?-t)k3r+j2q3)y{h}F))5ak z%#J!0J*bZ|VOA&n&-4=69)ouXgE( zgUaOZ39e`tMCLZv(q`2&k?w^jLWrMo%mo2<8tQ-#enf`dajb4VJAZI= zsOy{f=RgzhAmNkaJq6 z;Y4o%l^N7^P1WI#okj;yJVblO%4{5GAx~9X-T_`gw6GrRnoAJ3=k>CoDF+5v7(p+0j6k@OcA z7{7x_S@u1qx*A^(y%b_TmF|PsZZu5)`#58@zw?|>GtGYJkzxb_&1dUsFIk29=3n}s zbzj^xeI%oiK#(80Bh)<}Z(fnlc=*8Cq%4@G|J5JVX-qJ*x@mNcg{F}-&c0NNvS)kN z{7XC?j2=?B0Q#<$?Y(z^8w)74!YNI6AfYMZ3&hWzvBKH9EG0aYxQY7Tq%zd7!Dm8K z%AnpAbSN`>m1iqb1fGkX)3yv1_gD|8@$aVrfO+|A@~5|N$9m?PSGvr=Ay9vyF_{+Q zb93_jdrQY|vTF(rm|zkt+*og97#<#I>qxd01n@rMDWTPCRaP|KeSNkMxC(gKuHVx?Ue?@QQ#-R5 zvWw$?iLZ>U%!HQd$mzLJG0=<`Qle@b?;A%k)!w#g&(H382%boD&MWuRX!$<6(fHIV z7&K|oSg44&4-XMu@KEVW_p1)QUAcKa+xolaA^@=pywCOLxw%}rk>Q-x^3iff>+_ir z`HxL!@GG6PJHm7w(-EAQSil&}KJP0Ak<|SzQuK=c7b_;)&%g1(ug`OhBX=@8KW1Eb z+pSg}H&zapPuEuAPTbm|1xQz5Zb;ZTvBgVqGsR(QFn8>{OnJ0%^}8eD$D9tex2hYT zZeDXqynImx#B2NNDSUN*ln&PXSj)TjEvoRd?X&JjJZ|HagsMY#5g&I1{QWWb2v`}F z6P!i;dlL1M4Cn+ zVp5j2B^V z#{jVK(iR5Yt=hf1bbPx#q8uM!ca|pCBn>lx&1$!Tz`F9uzu0lYR`1v_@wf* z!AUeSPdqZ)_~RePeLD_YwjXjrwNF8bB>Vl{gKk@U!D8kC*H)kMH8By(RR#PMLqpga z)LWK0>;OmnXFz?r!p6``DyI*l!`9ht{4}J`?aivWwococGJJF2x!t)37?5a?-;=_B z{s=VsMXX+&E3k39(wA+6?s0Ge)ZZix@9UI7xdSt!RLMyZOU4MiC#XE{qn(5 zQ!7{)+F5Q>?tEkl5UX25WH^4f^%1Ul#Tn&zidH%VBJ{XY9csK3qEpUsx?Jg7@R<`2 zVO9_mBeal6*S7iP9eCBaCN-*;Cq9oZG_n^=L(cv+^d&IVP5dgSsAyXM`8+^lCEP{y zHn&m#Y=?e$^IERF_i2sX1uCC19+t3-HLVHsG(bRe@Oo z0rbCRnc@6T{Rf<;9!l@q-@zKC6ZYiqt*&0HL@C?IUg}@4hd_D*{#{s%LdayGY>9(_MQQzE6Wox1N%a-}e zPLJ~Lq1&f8Qc_ltJaU%rHF$zYj=#B9*X!_-&PLn2QOQspKR>PEudo~1GlL0?K*{7& z>E2UYUhtgLFJ*v1r%$h<)7IB?m-F_C17t73dp`rCGG8WMq7CagguQy4q+{t@FYd=@nAsg`e9=1nEC9O$Jn|nX{^>8g-mEGf`9Ry!P0hfT zq1_9XN&ich^k1Iv zkqoHSH!l{N+Tc?lo62zPg&i`=I($x430?NSJM;3RbQ-0A<1+#>I09n;|` zrM+-AY=fpc1;IzF_kG z@=6oS(m9MWS?1qkxbwaUw){w_yAeePTD|QlpJsLjaQhk!?digkiWsmfAizqKoVI8A zkqDwbl)o^Wb_Rv)5Yv`_U}g7f;QI~Iqggg*<9P%4WDxM% zqn8Y=VlRWsK>)?LcrpAeDpH7Tyh{87Yp`F(RpHz1gz)a+o9t_qalU8|SRlCTwSt}Z zSBlg2??SN;e`OWWuy_8t){S1b!Jtohj=r3*KC)ICBH7$N*}{Vk0c9m*t}jcei6PtA z0g^ot^h^3L0I6pCdmr>{NyL7@T{$o4t&Fc(zv=eSS4|AZ8llc`>Jm1IW_Abh0PIeU zsP40UKVj&!=&qulVgk!Hus;}@hVl-J$r8|SCKzS5<8X?w{uPe}mE2GZcMtzI>kw^d z_5IKd4xY?h5B-z~B{$vAu(9*g5#u#QVzU;HXe>au?iFEa{8)P_nD}hLy{4n{V6)jv zjn0(FYF0cW5|8tk35Vr&C7}5oOub1HR(`DI^`R>euu3TNnASrvAPbifjO;sPo{2mh z;7UhX;#o>DAQ&f0MKM4}Oh@;>g>`AH!KEyt)rs+ zzW>pYE=d8UC8SG2q=rrfq#KkL=?0l0q*GE#M7q06T3WihyN4KNxTC)7^No9df82G~ z{o}n{xXyFVe$LtF%!$2Ud!J{cv`m7X;L-YL>_T(Rolj*}pGO{74T=bx;KX__rle|% ze096W{P3GI%a=u{4s7^FoU=N8{tkc$!}(<6F<>qm1SaApb2`NdmeJ`?1eUB`Puu| zFS)K!`w4C_ecxS7-(KYJ-CwjYlnrH^6&yaot52W*$~CnaHQN|jduKYt9n5P3`x+pr1TpOrKQ1 z#-1nB+(J4#4*EzkO&fI)y`zC(#`V?SnWHZFRn&TVU(L)p`7t&M2`tfeh2Xr=X}N}* z+Ik1K46U(@iZVL%wnO|v2?2Cek7t^N7Y7_YF1Dl3=ddocNO>CZ5}27QcLH`ykEC!c ztv20iXP*>#`5`TwgNirpUSwHXSf0AfUDyR$QJZcyr8a%y5WX{p@guUdzb1a_16Ba^}m`^ z3wuYM`e#)YnNLy!5A(+;A}K^46FXFy&qR6IqCi2>)TxUru59*R$a97I_t3u=%s3oV zE$>hFh(2B=73MJOIiCa@EX6zb1oSt-KQHYyo)*EK)}t;{=5vQR4=dz(3)3yS5vDP( zcjd+QT-au?_=n&6-`>=kJ*GU`9+$^Ancu1pw{mzLB#R{)pS{@}YDlC+Sp~7Y%&nBh){$$G5U^& zg`6T#%%vtU%I~6v2Qh1jE;=4Zzv^fBRfPH@>sQ3{VNko<5p^5quOg0s-_g;%el>vk zyU3aOcPMqzb2m-$e4(`3W5%n;@IG*e0hQ*J`O@MgSQGC=BlNwg+yeheX@Zf7@@8FB zJ$v5}h(ytG-$~oBC+%D9peB@G>xL25_v*2HwXH5Ea9ttlR;YDX(`}}rbp&{*xPR?H z6}#3(M`Nv_S3YKIr<{54Y``?Sn+vq2+%SBo?v&^HVT-GxgaO-|fh1@; zWxIOUt&Vn;X5g>j`DhxK9RiWU>Z5qmKE%4k&T@n?vo?gg`q9)xo{B1 zE?vFEoZfQ6hlGwFUF$iu5fiw3JfiM7lBezRm1Mx0Z8ru6Lii?+q?*&?<)5W zfYAQ1A#5<smDvl#e^M-$dF7rjEp^Oqd<7B-^| zVgP&rZEZo9BH#6+K9XI6x!u$yF8Df_fj*`XtkTs}U9EpmVxJUcc@?+Gh4};9hPcx- zslVwC&<=#>l}tY`vE7UoUydZj`v~JWxX$H*Drpwyc@MC?F4pQaSsfa6h^)Z-l zG9@PXBk9Nwq`Bj1=0stU?r)-TjebSdBZ+$QDUamwfPGQK;S}Q0#@7tYYwWLzy`PmH z773Je?d0xqVuHJA{=a(d2 z!L7V^4mP?Xo`C35l%9cCc%PSU0o$C)!8voV(wpqBm4BtugEkO~@Mb0F0I-p|7xC-I zhx|1U5NFn38ke=XP3U@0t1?gZEX!`U1u@jkt99_lZec!Mg+wQEpPt@w#r`f&teIED zyP^YvUb>{XmieXgq1y+3C}ATmf+@cyC|hgoGZ;@R%I_iq$gp3XVt#cCCSpeSL!H25 zLdS{+2rJl&qc+ZWSF9fz{-?JH|9T66j?MW8Ig4Q2Mn%NjG>kq%vPJG8KBu%=L5%#9 zjLLx85l9kw`sE?dRUuXj^_j5ErI7LgCFpH?@S$ctB5b@8SQqYd2M7c2er&KiCj%GW zs(4PFqu=TwM7Zp+((F3*AnhNLE~sttaOGQu^jtW$2R*RL1Od-kT(@!lnRpsy1|g&m zf`o(55!yn}}bBufWwulQ#|A<9f!E{1xR`wlQ#dApqWXCj`euk54N zFgyT!L3vFeIuZqsPv1f{61<@dvM1q<=@G-F!oasS?F^%`6eK+-fVqKDb1FO;s(CQb zn~Q}aKlpgD|HTuAzohhx+$95Sb=e3qV3=)lUIu*M=_f_q*hU1&)w%8w(6a| z5WtQ%g14k`Zq~l~fZFnEUU5q&kCP2#A_IkOvnc$!c2U9lhtKQdg!%pS#|AAl<5j0f zO?9{KXEk?58u#s%mzP=EMw#9Lv^lhBqN*1tJfni)<(kj6Khk+8-)_nc1qYKIO<2jx zXhz<9uxh$dh3w*`qa%2*)T_n7LNbcfuQ*oeZr+35C{yLnKxYuaftp2P2@{R7(HM9) zUwB%+*U(`iU2Oj;+I@p$GG-_Tc6cuV)A)uX@$%&w%}7alNcKEU7_MnHt6@pJzIy*7 z>m_?!{t6MnO9!4lWc!oCo{IfM;pAdIn-YZ7dulC!6LG%-E?Smbz1rS%L{a0yVESpbo_4B_(uvF zSaub0*)pne`7g4WU&Up!s4*+3n{nG?s>ZC=;p}3E1Fq?BvBn1XCZ^cB$QQcL&Sg8g z%+DK!Wb6<7+BmL00$7upNmXy0Pxq^?#ZW{H6eT~=te@VuO-Y_KCqK2J6;H!NNvEl4 zeRqCz-g<5T>&MPnt(nh!S+q>)g+wyE|K4r^t0%W-$EZA111yzZpZdb(jh5;Jf5{z3CH zO)IzQTOHn>B6!zF3>Fmu&A=TBk_`8y#>Dgl78R2(RdIpGrVwim5{P%>iBOfWfe>Qg zu7Xh+0{m1}JqPRP3R3^SL&NV_>7f6p@^P}D!68`Z>U zQV1Cgc{DkliflKHBJ4Bn%dAJJ>wcN_xWi_UD!6qpG}aEw-WfZ#e5KSn7#HBD>n^Hj zFeA`$FUiQX(5Z;;V3Gz?Td2M1Hd$G|#Ajum6ZQ(^Ns{^$~j_|}rRF*&u4hK~WR zw2Gnp+Rp)q#_@S03+8>}b|xoZPlOL|mc3~8D{i8FBg$hX+>wLc#{APNZiqkU%~3gu zhq`8vVPd+qWLgG__>8>szAun(GsuqE)-62!@a6JoKY9a4(8O-p*2wtbrI679rKm#t zR^A@}l;Nbw_ZV|!`riF8sOud78V*9J2p?ZjI9Arq67^88Q+6g$AI#kW4!NPjaA|`b zn4DQ6_rUqSZKDTd+xLS{>ud(sOa2N=oAZWRdrg6DF^t$tgeLRObe)GPQU4C`bqSB? ziw<(BzEt=dSrf)P0BtzR-l+ASTq52ME9Bt`@Tam7D6G7MP+n}85a!INMer#zXrUFp zMvV|s*40||mf(&{6OdbCD2{u2D=Y-mLB)7<2Y6#tYj+2z>p22mKltaHwXL@spwimq z(rJ@U*d1VB@y|vY!LV+i#~t8%_CGqRc+rgTJYl`?Zbkj8-m;z$=r1+tuWobB)!qT- zO)k@KLyd|NZ9g|EJh=LUd;AJd+3YAYpCl7`!uRNHq>zI5yt8& z`eQTNfKi^}IC0Mlu~y_D``Wko&Wp*84$WF=ZUi~0tm%KWS>O!MH|GcD^m%Lc3i*n_ z0fpeddx_pCF?_o|Ji97Pr#L44f5tBG7rS=5P!QIiqx?FG=;tr=&p!R##;?(C)tWng zweA0F`1Ci55z{*N%n;$BllJrebLw}57-l)jKSx8M)p9adX>Q5skwFmCy0A#;FK{XLWI`QT8 zRWZMTG>7fWwzjTL5+3j^38yD_q5Y>CZH;RZPE45U?19S(cz$eD( zGa(M}J;vtP%g;s-8H*#lGw^KUe)#YrHDWNUT`(W-uffKlGeD1xJAkP<2yy5?gXM$1 z55QL7c2F&q7{ejpg4Tb$oz_gsut{}Axh%2MYq{=w^6G*WVQ^UCw8q~iK3o@3)I&!r zQh=1nMnN?y+oST0j_{focD0}#M_7uK9hB6hLIe?yCZqW-5n3rp{is;p@c5l@z2|ALL}T&wDHLSHB;-DS@Ld}4$? zt-#O?!XbMU)rjbI*LT>-CAivIp3b7-^Z$^|@c*1$&`m1GXNp}U6b6biCK;7`o0B7R z-xZUL*oc&*?{F4oeSD^CMQ5IGhtas3jz?9gT>s^f+d^H>a*=m(`hrS(l%yoxPp6C# zZI!gXk3aKtL6>JDyuK++z6im!x1PlTeL0W;Rd?ZxTN(r!u8XYyeavj0t6l>XTq zXQdi6FQ2yiKbleE<388IjzM-jLiR;S+`FOYsBJDR3(6|Zk%y-%$Do3D=;QA{Rld(0 zM9F5(X4ahD>r+iujf-pg`6RFS9}mLYjV(!c0LHaDz|Vz)x&Fy=qDM@)@0@}qLLgq< z!vp}NDRe?Ne^2MULmyKz@E7{5ae68vVtjh}&{C5`R?}_gt(`Fim^O+x+@Vj>{t>s7 zBK;dz2I|=;j!U7=-Ne}iXoiMT1tz1a0ky423eei^@B;#Cb!O>hFf*L@Vz6l| zW4$Y7j`r*!!5bU;JAj#`qti#ikY^p0YRNtn^ezi!F+;6IYg)zyWwst%uJ)qdlfe@P zG!e4}2q<0kPxO(`N$!4$6!iR*%SA(@X>d-bs;6-QrVZ>;Qmc)=6f$PITh-Z6aT-!8^yDAB z_f+b(ct$y9DRCnfF&}o?z5Ox=l@z9gTPYsk9J^g-o9jRVfAsH_>Gsr3x(-F{^PQ;l zRoq&8Gip;D-*cx?`g;rT>6hz3SRVlcb#2`?mK-)o+O!Le8O0+PMH`zMG}kh zkZypUj;LG%-RXA&LY&1%(>S7xR`{=tVHHPE8fe-P=*oiI+S$ zPd*slQWCxR7@O#wRlV2^Uxu>?KxX9U``SdGs4kE8zP{x9!oMV2&=w^rUORsPta7<7 zWc{33U{O;>uBcDHhkJ7M6+XEOyrgV(YHgNqND=HgP=2@%>T!6p4DP!I&fWnEuz=MK zQLr96g#VK|_nBj1f5oOcrCDr!nX!EOgY9p=>@t}Ry>j|)OO>^gtzUqvOE+1zf8aL6 z5s4{j@$%W{fk6a$xM?_7#Z4rFO$yV8x->c8)dBQI#M^b`@mKtW@(g7E3EvbV4t9yw zo52t5da_e{Ys;tsH#4!1J&-JXJGaLTkUV0xYIM_G!3x0m1{2yeBx zX7W402=I2a8F{4iQo=9-+(3A_F=P1SJtDgmMuc#iWA=Q8kkG)_*`DZ#Ha;|y-2t2# zj`}N_zg#0A>EAr4g}=+)?lV;WAiH6J$V89@XhaPn+B#OQCQU)Oe00ywr{>KsXt(8| zcb24L;3r}-g@^EK`2fBkmN`m30{l|=)pJfwIH+|uy^mzH6M>u!PaOl}m^`93vj+R` z074{Lx$%!=z9vjlZMBZs7!$q$eGEvTmH)Cs2&jlujZ@6y3#=0miMrvSl9Mj68%0qj z`YJKXd44=39Bcxfoz4RS08bX-WlCE-RsB(XDawWU3(R4H`xL_*K2#(_i(x}2R zCxIiiht!L;{bU}P&HU~tbxm{-mU-$~5fd0WXa!-qAK9bcp1Z_5(pyXsMlwYF=^IUi zmbPu4^4z5Oi^%XwW;cMGsd=euw%~i9BS?uqUj&XR7qx4HJ z-O!uM4Gm;_^Z*mBdN z14)-!YhNbvm*ZG{vOHB72(zJ?Jv|+%&K`Z8S%K_m=PYzhJbT!vmW*kgrZ+814xof9 zaz5>D<=A?*o!B6HPI5ufJ+{ri6eDk%FOwfYB2iG3R9w=5@d70T+V zvSaoti+M6}yQ2VuK$b)tfeqQ$`6j)h52qK#+%&3o4IizK07G8E@{Z1X4(%@4nB(Gy z-%|Q(MS`0QmS!Un;2#JvlO2t!DdC@BgYb5R4m|g77dKce^pUdp5o= zKsFWcDNsPznDotgu2^g7N2EpVyBh1jQTrJ?8j!Wmu~s6Mjkl>tAVo)g?f}PZm~_>Y z<;nprw6h@TB$z;tWQaNGz8fZD)5T`_^H61eeUZMkcSYStvf939G(7R=uF>y*JzdqN zc|xh3OEZm~gcHpRlz)XP;N?R@h};r=5=ERs)Nc0;{&+Ncm3S41w7oXpZ&y;o#TOq$ z!kd(M6_*T6kN5})VYfuh?g$z-OZ|x@M`EQVk?I1_0a3%A;g2cd`r;d! zG$hsDPI|rz4R2x!=QqxyN4xWF)=>q{~mYZs$1Qs~YflfORa4 zGgB`GMqOw{AETxFYp?xt@c5i^P59;xi4c;wsumAqztle6eQw>jKtcOmbi}#l83$RO zcT74855f^WKf4&`NBzRE`-eM#rQ1>EA&U}{mKv}^Z8>p<`B()i+3KC{pm=}_gLrpw zAkU}|@5fy+Kos6T5dCqqWHGP$2Z0Yng9pkWiixNe6|YL@a^~_u%Wr~Zk@y$zUS+>D zXDywC2o=z;Ts0m6iohSMqXy^tOVmgoeaQ+^LY^ok2SRXekd*%LkS;2iCI<~4{IGRf zs_U!B5K8AO_*)d8{X6-mRNjbKj{n8xf*6l%8uRMmuIh-1AuHrn2|+2P00v~4MQil@ znIPpamYk0}2YQZKh7X{?LuSNrgz(_g?IDXXCxo(#Vpe%oQbg0ohU>t~9l>@p2#iAc zdg(gUsCFNLKjhvX-F8`fC#0ja^eg+Rdu2vZBkrPf^}<>$U|qm1@Fq(j$avuKP!F3u zJ_XJlU`b06qWhl2;#FI3O?O3<4fkQB8Pa__u=I~qfuJnOp5XdA5ffI5(1mAO-DBrL z-wOC!5xc10Gm&mQT7@0^8)@~3A;LQVQQu}MLA?EOIbwg7*4em6$nVK_-P^8iLN*23 zSAEH>&@^H|KN^>nJ&H&x{3B^%F=CMW67DN|L4Hk2{2Ed+mID^cyotCCE#%Mpb(YGeW*4Ic?uNpqa2UR#YAiAaQVC@Bc6<-~_Q;_)x~(v(%2j`3ch1BI;QK3)%L3bXNJkeQv9ghRuHNyTfY5ij z#Myl)NLmZ>Hil1ESNeI4pd&lY=^85gq9td-!k<=FV`u%eC`&KJ^)iAwWR!pc*%uFO z`p)0YZY4vGro^9?U9zK34^L0j+Ang_*SJJ9wG-~N>@p{X4iwqnavf=u&X`cRQI?8M zfjvu;nM%$yQ0l8AN<4V#*LH|au%EP~N!mH<*vbRSs52CmvaaKmMU%I`v*)jzH`x{W z;Z-k0wEYZ)Ss%q)Fe~hEYIHBMtr4stIO(*$qZD8A>bm&@S!zYHzr`H zVY>p8ogp{cxF?2NXwNf`Ve!4%n$X28xcCmcRhI73cv%sG z-A_)%y1!f7Uf7n!_BaM%=1@C6lRWSYXJ7PL7-;-?BT(piy_`k5s+CL2oEdo(nr~OHi_!sLVuxelE*6nEGK@r{(a5L^F!z}+lWf&n{7$yJT zGyIFB-QB*SyD_0Is;+%cS@+j?>8Bmy_ErzB3_DPxKf=;~;_6;%JK%w0JzBxQJLm1! zkZgE1{a9h7gTwCSpcgN=j8HV8qc2$|@H6tko8XEFm=H(pWKR{IMq7ma1=!2nHX5rcx&0grdPC#yED<|=JYt(n<{c!Bg!)r=Z{6PmQQd^bhqmG zzSQ@Kp=D*665@Wal$g$+;K_eIXYkUp*F;}$Ch{q!-F#(b$;H#p%iUAuon9zPW4&im zN`qR8uOhHp3U$IhYOptSlW=d6DKpBi+_PNQh6=t^F7G;WIZPyC+%B4tq5oLwNI-+O zkWdEIvK|9|c?FY!OLuB}7uN2i|6qb=f@%GeU?lH;Z`wnD38Z5E-R9ELk%U-+^2{4H zJ08w;q1_Nj8W#2RS?fqtD=e(UCk_u4vn~AG{Lv|HDIG-k=-dQ>Yriov8A4p&LJ0j3 zjs|o;6=6bXi)fdTd+)+Js3#2`?)?)_-}hMB?pcC&O^F>Vp!*rQ^-LEYJrZh)`lId; z?=XR#onMXkF2~`av>ZiMw4B5?`i|Z31QfT@^rsZ+U4OYMz8WQzS9};8iu5U)j?Q$J zac%q*lk9^Ejf6Zc!dLTehl%8*t?5C=c~MSNpL@ZM6Es_1IKt5KlWhNJ4}0q(657|Jww6?D9-u(In}(pyyxBI$kZy$-+KHKr;_D zxh(cqUnIG<=!LVgAiYe}kgQCD(ve@v&LdD^y9H{;5qWHpJ|~)f8byfAv%{!*%dGg%8|p&OmwwUog1&ddMlRVO z#8l1~1X&3Ers14o*OY{G30Q zvVp^CpLY9Uun@~okMWt z)kXtCNWRF-rOrW5W!LYl#QhgX>u}a;NcMr^Lb{8W!~O~4a!PCLRW_7J=o%P$;GOTK zdTFtMz`%vIe$g`jQJ2wM!|~>sORZX}VdCGkv7WfrRSd<~gpI(rx(tWI4G5InQP4=b z5O2|YQYev1vq7Gzw^`u^d^htyeJYgy?@xvP?@>8;xdizB{=$ysVydp9(^@w{%bC^y zfqe-6rmvP`h1-GY+Gz?i^?n>S%3}gUq-CTprq_7a2}_?!z0Ad;V@l0FlFb# z70w;)Nj~SpyGVimzr{Anf}?_K4t$7Yrc}3vDMi{iEHf*Gj|3 zn8F_WqFBU-6>N|myk&oxb!^N|_i|o?pi3V24Ndjr(-=WwRhj4XrJB~ov`OE%bh36t zM3sm#oaxn6RIIXGIF4gCVp<@0r%bV{-*R12M=ziBi;08~PSZs=-WF9ystTie6@Q8k zS8sFdD(_ULVn`y{<5h5C-zOk4Lw#4)&e}02;G5$ScY5zV-s18rQweD-KTX!!L0b3i zvY7Rv7u$J1l6<1IWuJ$OCFE;DGks(Dz zO5XzZDO2ghhO=jJ35I5qB!{zyhhnMyNJ4_V!e{A_x9?Ts(umIea#pVNeL2K*j07Kp zL-CF3{LvbdJP+5se1QN=dlKEZd0O8@xY;n|6Z+IKwvG~3#6K&keoUfh86a87pwkvH zKN@*^-!1#8&VGi4f&+3c=~W0FR{JNd5ZcEkqM%+**>JI9oF~!tlMIdf19%TI^gq-0 zqlenOpva)?;ymlHOE5g`e{K9h#e7*H*zfH2*+~A&Z8J}r!>8MLob)Hz@N%|b(a{gc z%p6#)_jES5Ybd^=%7@aheztjR@mhe;^t0%kf|QLb{L0aq)`0#Dmy8B|>f^c#PA)BD zmY!zhmj#bg(~pU_G-iYw0x7Q`Ndb$dKbSbH#PrwW(mqPeu5dDP*cHTwV4-BsJt?!5 zG3@nyfJm%w$9gwlTV+9lY+vwRZ%OoW}C`_2x2_2edPu6G0w2ij9UG69^>E# z)lihUdgavB1fTD>72XT^N<%>ZNF`wvni?F;+T{_s`bvyR#=vlmQ!;r@^<&2Qm!rp8 zx@8wTq>H_e>aF~vV6{!>>h2xu*K6)2Xi^ORQ*sG$3fM>A)R|vS(4;-X_O-;W^LurT z)1IQ=35+%e(dSz)AGy0Eppq+kwvA#-NMykMq>!8~wT_dk_BujMVx}4!{6A>*-0e4KxOu_n7a(khxml9aa6{X^BE4GM;98ZN!xuvz*j4Wav zh^-oLm<*K=!s8OxtJokzsSATcjAOdux{cx<5@VR(H{tZ=;3uQSGY3U!-pI#iilps_ zbn&T1reQ9h*@17ytN6mBrX$RJT|Q$`n^iqA9dN33@XOt4qK9fI2C#9xhZ9#%FAd&5 zut=FWF!#qGN@*lcPLHdMC4GWE{?$k}-QQ+XfJ@h8`Foy#?IIy++}G^yYT8myd^(vxoC>L19|@T|HJEIv ziR#4gP*5Ki#S)vlReTi?)alQEute3zA=QiX_M`@Pq!0z~;&GNC+qUFxvA>CRx|^#| z2xp3Xk&fz#MqN%NBMEVHVb&W0p$(tskqjPz+Eh8_-W2U@+l8JKU3g}STQBWDj4XfE zeeWqum5k-2vJ~+mwn4G@N11OeF9w~W?o+F;MZu28N99zVK&8QwCu+w0nWDY@zFCk0 zYu29JT%tyGcScT3N2RnC$QFTaH9Qq50DE@)I= z-HhArt=P#u$|iidu^8)8(J>rwpR=*ODo(XuN&Kw5KGr#@o}Bdd+c3st=Xy{L)0IUY zQy^7W)g|b@JinuDuq7c7(g8hD81yr{$7}gHWh}1e%;I@uyp(^p{xdI4T{I-_V|)dv zQ<_Nsh_XztXCJKWNxxdW^aokWCgd09$_CR!!ni^z=0fm8(S;O39mtd>oXOsfIN$fk zyiPMwt;&7N+4rh!dOlN5{`u`Bme#rWcW&}I%L1pU<;R$r0C!2FcZmBMYiv$sU@Bygsk=giD%BB!h=mbI@k(j` zzT;xM!`e9B1yizB72Kn1xexozMjn&zp|WJKZz~2Hk!v1l!2_EGNJs1fZ0O!h5Jq&# z=hm6@!{&MuC;r;=B8r4^O|l;W(JgM|ipKMak_YYiE_T6I+V}-BZzL1)@m|h3Z>Y~1 zrxsJDSSWZFok`~uhFF$K`uT1c0J#Eoe?(uW-MbDO@%Ty#eM(=IaZZzm$GwDN(Nb$9 zmk4SJJeDX#;WPZP{Kh)?sl@~O(yEQC#n&qu@ZA@jku7-v@dw%!l`m9iS?LG(Ug5{7 z7^4S?Ig%>6urfU^idPM1)#W@eRV&_|tEuvxZ>#3|C<3w+EXkL)CwdzyWh%KAUb(m8 zY1eVmOH$GPl@`_M-Z&?2#rSLO#dM|EzukUjLFlBn!_qy2tsF>25oU3z)Z z#zwsF&A94H%sp&vJ|g!=$W=VnV-ZBY_^aza>ixz^DV3(fmBFU4TXN2GpGd&7KBsWQnZZvefla^>YYPB z7M4H+=#FqwqB$c7(PxbP6b>>Zz(S)F5@!m&WOU5`zyTpend&zmu$=Ogat$tnFHO78 zXCh3g zU2l>Vi!)+n58PtH+>gDRPjQ&eyf=OS@Zl5}CD-)hNt&I>1Z#P^6WW$6JKQStV6Vx8 zxo1b65Z=Y$9&dl!!}8}8wso{u-2t8ss3wnwcpI8=-fuVv8}31~l$U_kmpRc#+ zp3qk)#FSM<;c=Yf&1n9ts1s_$6gAt$4GKFLw;OKjNlzQdN)B$D#$lNUT-$@#pS+jt z%sDT|?-sA#$_cg3_kP=06rzw5gySh32@I;?THVV4x-{}tJjmGzqO6x9btjn7+)o(_ zbXt8LQfY3PUA)QEXSunXf?dS4NisV7qI^($!$LY?{M`Ff>)jXQvIo-_(X4K(jo+MHG(HRKwL3ud z7y&$JJVg624?L0ch18MHnbWe`Jx-HEoAzppCwp$~%)*-|&q2#(5hy9_Y3ZW@dD~{d z7;Z)n`UY@gLf<{0KMjRXoIa(!qo>-MEAQ+*lz4+pI?E*f=|ziUZt4vLjfC z_6M+pJoBsi;K8@+C%g|`c~b@!{8tFZXi(R#&kB{;jF+D}@B68djHRtI}c zZ5w?;T#HueI2C<`8a$I`{0r{^!@(t=}Kafo3`F z?7s(qRx8G+QhN=eF*mQRn@(K&$G@7K0SeUIdL^#hm*!#&nlG=bM7Tu{AM(^(P~QKc zXD(yuZI{tW9s~~?inl7krlNh(S06-wVUh_khV)t;E#hT z-|F!OGx@mC+*!|cIaceauUcrmaC3VZ@J4%ZZH*KIZTe%NC(WLIMsI%Wy(TlRrCKX7 z!ia6o`TGw2d*N~l)6t%GoUmbU^n%jN@eEWU7d0n*7s*=Et(45I*rF4`I_BPT@#Mvm zR@zh=PXZs{eF_{WO5o$&s61spEi98tpQX$9JoJZ3{-vKG<|cJ)cjkwy?j-`3;V-Zm z+J4ZlNI);h?Tc|>t&bv3SopvzWb*@VEsu<(P{6@J3^d?U5uEWGz^b0p?SH}!p8bs- z{2ONE-!LQph8g)c%*elCM*d4Yk^klK-%rc`I|BcXz`rB#?+E-m0{@P{|9O}ZKJLGD zj{n_W$;T%k@K@N7p8-R-CI4x@e5Z}6{-9^_^SuVI`9J|1lhjG7@jQnm;(2j>6?&gR z+Ap+WNJz9SSbl`W4jriR7`AoyIN1C!Xps!^9K;zyLjylOeCgMkywT-8=f>F6iWZxC zmv%=PDCn#_QLJa=eYtq_bz)lEs!F%`AWp~j(6$7|$f-j>N0R#nc zttf+PB0F?@R*Kj1CLNsjwvCS-G|&qSfLz-vHeQeP?;Ds%By@Ihgy@wCvpGjMH>;_~ zH79nRKJ_XsGkfiDa(=MH@wVz**2E@zg*TaB_a#&*nW`u!U53YJ1gCaH;AyfvSwyAM z+N2Usk1mgLCgzl7)3y$uCYkl1*!>Y1%O%~5a;!3iidmC*yNm3J+R2j1kx4#&x_9mG z`W`VpiMtNGcvD-pv5ySF8;Vq1vE8F}iMP4p7qxM%@tCRcp7XGIR@FSIG&WRhVCZE4 z{R&YQ!a2g9TPbOYU6iKIQXaS1Ri1^^F1(w&8Yzn!1-{)EC|;|H?EKNiHLGImKHPYm zE95Ez{0|U)UvkgaB4_2E%V${0GU`%i_DmfV#8lut6Kcw_tWRH<$n2Z z902MVFrEGm22DIjHJ2`T(-eimNYfOK@(5djq57F2QOehRTyW_l8KV}vn)i}-tEFep z#ygBT$G-;7VP`&q-&mUgQcy9&sTlbrO59T#ZTTuR8-3-U~x~JYLKWb!{nx*Ix%DDh@p<5n!e)F?E#OqB@AS}Akvc#^z>2ow-M)^cxqcQyKR$k|HQ8W`dzw%D4gQr zZpX(mc<1M6%I7NI{Kql@bJfdOYsD)@Kg1t?6I^l4`8@iLdBhKsta#$OOr2($$?2y0 zhZ456ScRqw=?67MB?Mv_84*4d#VlmtUL-bZGVHcw1y&8xE}UD8AVP8)m<8HtcJn-N z;rmEd`8ZmZat1Kb9#CbGaWOh^eC;vT70>$2<^|XDPIj`G)jY+pSzfz{^1IjifLLQq zG^yt3rm+t$`bSdxkv2g+q5=sl#m!|aqYQNR^~Qk0)0mBn=c*-39G5nSf-udmpSAM? zkg#@fJ@bhsh$u=RCZ@Rhp%2gAJ7)xaaY|sCox-BX9qOF%ZJR@la^xa-F+i=%T1Z8H zHkGdL^r8SS{nHkQV13uk5O+P({DIeXA~oXbVM<&=6Ss~o`ST&O9R>YJ4r(=wDZ=2d z)Lma(RW&D0+220EAp}u^pT4Q>ywi=*in;slZoY65*P%M|^yur(s&mu^foP8ZS(3MG z3tJv(dcxJ!AZC)8-7Ni)#dX5%?I8K-i-0{2nSPn6N51~TKp}T7F$G*U1@i=h#uQJf z_a`+Z4~ay&^^lAV$J=>4r!`znm*+@LX})RWj-4oXQ#&4vgnph=7$i7W47a?9c?!a5 z#?V80e!ib#{Pp}=KP@oxuu%t2J^|4?6R{d z^)8BmA>cK$8j>bDbI>l|)(efMxa13u#hlEB-TA`oi#Hw~?O!u<20nz>ZOC{lH6=?| zwX&$VPUtQam?I2%&E+Yl_v0M4Er#Bdl;7~V;On7d?^(KHF_cN`WF6nBsu>3#&o|q@ zO6Y855!MEshMc&{VUu|&9q&J9wQK7V2)UaItyYr^B$MR}4~Qh3Ui6-RvugA*kfLqS zt!+JcB?$#{9x#2W zvkU5xSlc}kvCfM9cn^v!RrQ(Ib2*w-GNWT6rM}wY1usmn@qWlyaa72Qu=(g@gu&pV zVem8iOnq)Aip`mk51MdzrQ(Nx-gMN4U^=AU1+Vqd%$U=x;I*98PcWz2fbH|%AGo6B zQhQs*6`FG$_gBPLpQi`#>XW92i0C=rxPSZrwSg}QiDc=K*wnJ^o-UC{XEGK`#ae~d z_3q;JSnF|U7h2b1eCT-g1AUuQttZLr3o_5=G5`2e@)1z3pia#2W8Y6{rb;gDkNw`b zo^EcZSxsm++~S9mP>o1eg{!o*w28`rlkjHH(7HQ3-uU1uaLBPMI!b-FD_-JwiRWMQaF+ZUY~scP zH6onSL}K&BrmyOZ##&jv_xhIgf>daA@O55t*Rpw10x;DI*WA1pUn#I`%h&8nc(GW* z4r^tualKE9PD_&D`?PXx>^y(`T~{xpTzT4+rM!(F%I@n9piNT3QggFZ(P(d}+Pmq) zvFmPO&*;oin0=1{k@P$r@F{v!V#eoTD_X63l*lfQ%4Dkc}imM;O4T! zQ)m<>J5H8#y6_iV%|f($O#=>&vCrMV;>@53sR_~DRqzn{eWB=@RNoO><{BX(jL+?- zlE5hTNqUO&ZAkxiK-$`jK(-!97CRbS``PXLEnRo9dMiR-jG1^LQmzAmuCuYj@&1u% zm^B5Gctk{y@;sL>(d9E^#=ME@Dv1b9|JS_SxKkT(k1Y`~j6h@W)?&}|4CU$%3-PVcYx;3{3i|UatpDB43+{QE1L(&YF z3u5^~x4!i8V&|>IlDqI$I}MI@FyQ)5UoOqFrGb&|rQ$C#aA%amq{i|tZ~aY2r{Sd9 z5iQ4ItF&NIBjV>XCv<3fB{8h_#!aHAH@FEMFPN0p?n@oZrf3eR3JSMuC@%mrI3zzSrUX!suNWn0J8)+E33 zmA64rFZ*i+{v1X2wS_)664Q7(BYFA6K0z-Nd~g0GcB&!ONk`2vT-%5Ix5N`qJ4rUX zQuGQjX}EV4-aWmuz^Jy-b8t+>TDRI}t^NA#F_oDo-(a4kb6hW*dZG07c8Q*B#oRQ% zMhV7YEV@TUSyUM!hXqbuU5z(x#t3W!&gnbKJJ#vaQSoZII7D5RRjhnaG~#2vNtL(~MQ=Uq}hivYuJ{6gpPSn91pf+czA?odR+dX|B|40li=yi>&ml(muJE@*<2ME=;+G(Y@t zl?_=+>JS-CquV(4F`G&GgtNsAUM)qrSRZ!y(yfwDUbFVX_x-w-J>Y`ErOShA7?x|* z`e_t(snrH2(d+v}SU~DP!3;U6tiXp9XtX{|iv~Pcw$bNPo@lQ z<|xY0AbODG7f$lfG3#(xS83yuCk#^5lrOrAfj7ODjja#F6V$Q4Cl{dkR8D$V#(sT8 zKz)?MeTD-yZ)e;DEP}zfrdG~({g}~n!!=N`RGkyP-m^Er{J<6wHz(N;o&Sljn8%(% z%aS&FpVaUHo=|;^P)P1t1D&Uh(uP@a;`BkjH|4@?P+-n>_Dk*6V~=A_vi0Cn{`+m_ zQ|&E6?uCkdLi@*Mt_`J)NSpgOx{8P5dxH%68|+lm%K5VPyqlq0_n&@EraR8{r_#kb z74AtErpL`L(48fglV5WY*kD;xWcdtNjU_2oF=-kcPg;9pmEHWvJNa?Lt%5)+pfg=+Tk8%mT<;0UaPs#HzH^T;}+nT-KL@FgM0o@X_oqZk& zy_O7hqBfiCtX$CJM;DG9`D$cKWS9t@b{KrGv3iZe{A_(|cQ2rlQc!lRT4(9G@UAGD zxCX^O437i5p3$jwgMv6rJ!?X|I~&p>pf}Xa9ty65@kCDBfScqQ@7&fw-YA1hZy-e z5l7+_Iz1*cx@bEFkJ1#AnNQhLzNyQSUzcJ))h;P$uA0-d{vg;sn|SXQ_xYfHnMOY& zC}+{vo5F3otF{b&d*=ngm#F@uabqICWu`(8kb^_Jk_Y4c0Xf%$sS<*@VFB-4=4fj1 ze6{Kb_cZ@+N2f|PfDeG@Mc;KhjrR97((GuB8lQB{hx-RKvsdA5d{u~#x&5_8*RYoa zYi203x>eT{;Fl;W#}9sF=|EF3;E(z2+KYCM8|@D-$g;{j(Po-+II+?PJ+*hnI^8uL zxqBJ0(`>a~e#cCcUQ=Yn+@Z(Kq|bQOp56dhz0VQM5v-WG=QCIiq&OHTKf^TvVG^S!6uQK%3?F9VP^2pm(=Z zR>YzaA6^NRSMP)ath8Qbn;DULDZf&dpJ~@Tz#}kg=+(}k)>vX4iZdl?>+KcLn|aM= z{Z>4jE)$Qn@mtRT?wwwvx2%3n-5f9@$DMf%eYM`f!8fJe$~Gx56AJs@`pOouiu}j8 z!gw!uZTJn<7kWOE_1$`vAE$)1l7EC5u2n2IdohxOzW`T~M*XPql)p1t{Y zR<#jmGNgqwUaLvw)Q`0_ToGizqe)!%RltJQ}flf^S?#HIpN z$JTs4@oww{Lo6r(v*h^Mrx+BqrKX3l4z^E0>Q&m^nzd%GHL7BR`xkxpEzboS);<^qRBZS= zX~`@-d^i<1Do0Ofe{>^=cbZpi^;MpU5_KQWY7g~&Wd)7xn_RqkCuM-m>O~>HCe3aj zwfw!zKyvm@!mPDT!4Ks6X6z{B!oll9Gm>LrQ3Sg9*ClKXICQs{7rTpk?69;5H)E?! zn^_g9m)4nwfu9L-dbW5F1qCfjI6(?o2R;CKfX8Yy$CHE zVLgkEejP5FWLrjHJNH0bXFwdFR{rt@!T9dG_;%sR%@Ez~o6Rr7BGC)Vc~t4i-hC-6 zBQuL?h&B{&|JE=Wvyt>l)K1?|gR9`qpyHy!@_6q|;k`Nw74F&XC{;>^hadO{F}iMH z-u%k-akT_@oO$A%e|)xg&!dc+`p(<|&yqdL@Al0lgn}K)sylg|RKxeaJ+XieR_%Cb z9V8IQ$jA!XeI*}nS^@;&Sr$Ty6{nITS@>G0=+aJf#@Qbyzwnq$p{QfH=GA@QeqX^M z(vVHSQ2D4^u3_4<_Z=$*lQ;yNH=A85qn2GdA(~B6~1KYJn-atw}6H--CRNh zwJHj0C@jcyB-@U0lPlYDG%Zs1)>5hx#>R^{gU#S`itj?I5;pw`q2~8l9_Yn)hLh}q z4Dco=pcXFzXJ>Bl_=VVMFZnEfSm14n?cvs!dSYa$!B)Ta_HN@c-T6qu#|T48o)>K7 zcP4f^zD*bgO@1+}-E|w0qm3RA^uT|k{B$-vn{5~jkfS`{TUB!KW7IHNEo~U<))1aY zyh+oTVVPcJf{>5G^lNZA&i)OCc*paWPyG z4`B~SXGf$odJjja6I|Fslo5P+47D)w{xTWBNdJQb;ULQR<9up*eHC?jDVPg{o|lV< zi<{FD$P1$9is1qL{SK8&i9h5>L*SM!cFqVpm=pb_ zT(AYq6(P#V_?OQUS1q%${IeWqR~P7yQmiZi5GceEdE7S~z|F-CxT@h*eMPJ+g{@(i zyCcNF&dyLfOYmhW0B0*}5x}pkUj<##Te?8N2$;){c3w&OSy3&>gTJQ!%z)a7DL5e@ z&`X^Ty}{t z6zpUp%ILvq1+fOZLJ^Et)$6XJmqv~^dUa$rgkBor27$tm;|6(7x{@7&o|9e$NovRG z>;kidz~S^-U`J;tgkJKO-spAU5EptmS34`nf7iOdR_;gZ{?d*s-rsutXH))9M+0)y z|2Z6-;9Nh(JeMWR5#RxKMh*qwUxxycT3G6`Zy`t}K*V@}+&r8>K~5eaZEjv+5Kx#G zXea{s529avC-P?w%tadpgNn(zfE^(!UcR0JqC`gOBA}4_`KLltA6cpg)p_$UFrnI0Ecs3Hd44ieJde+R{>x(*h`D&B+U~0&{}F)ea)0CfZ}oK5@ko=6{C@%X^B=~~nTIqV{~!MRYVf~>`~waAtFC{bq+cZe zs_U07{=wb9>iP#t`bF}ux_;^6AKd+`u79AUUnKw6)%716pcvhe3(CJ)(}I9}zqUVJ z`fswxMHM~h$EGMvz|Vy&{g0)uCO{SG8N=y8JU}FmCP2*@;v{K#x$ZIo0Wa6angCfl zgb6^+6#=z#x+DZ$?yb@UsDo`FNY57nKrSidtsp27 z{Tp+_v*R}3mo-pPCWa3^ahNEze!E#l-eD=i?s@I(3gc7tZBQ`cAQO3_ua0I`;(CowWE zCe}pRbaDR1szlWvKdo^!s%7sLRgaH7x(c=K9M_MlEy5<{Sh2@R>7f`^w`B`hRNS0&~w=TqYPF3Q$6;qn49U1 z%5aL4^iK+NB~GeRhJ;+Pipp+02(y+sV%amk9BG!_sto0}+jw7{d6&X1mN|p5tcGC6>DX4+0s7!h_VG? za>$gDc-oWPLF4s3`AK@-?Ir4K!t$Cz?&4|}Z&IwE+t`QcCq$D=Cu#JrXx-{DO-gva z1`-A4%@Tx$=5?>94JnPM8N{@qMy(O49;vPqmmfF89>`WWTf`ovFceHb3%oC~UCLkl zFk4%MS+F&%rky$(z0E(sp3(GX$HPfQSE+az%O^Tw^08&{!~?=2RY&9v2FL!1X)DD1 z+f*2*w6L>EUTuY$2Bzd-1SA`Ss>$!JZ({RQURQa^WH8Fx$Q<6!2|+>68G^P9-;T!$5Z4=jyj3dVRvjE$W;RO-_38Hbfc{4P8l+F&O(!VvDLjTAt0YQH; zO9)-1SQd8tcO6>qF>wRCW~lRrKNT400M@^!6NWMDyT-_}OfvCw5r&Ds*W)O!+*VS2 z?&sUl?j+J|EFmTuKSldOSA3Jk8$$CUUvt+Ip1#^MBD)+a7hzN?whH}x6ak&Tp2I=F zOQb%Yc4siDY`^j38V^gIXR_|0dLW0l{>?LH6XTEJkwpE%pWsuzeF^B%=^+j!2*Tqu zt80vR9QbF>Hx9QC_a>_%JKRcb?OdK5SfDfe?SI{i&mEp2HJ#+@)fqrN*_*fn2B=c5 zbh6=9fB5d(@o8d8HXL)J@!sPB)jUK3)}4!mYA^QV_z~)74Ag+&$tmAzaW+)QiRTI) zu02NgqB-xzwYCXlBcBP(@9}z*O{uyFBv4K`)#rL??)kW|e{l@ZXOT(Ulm9f8Wm4 zAzCbnJ_|eF9ulQgy@!1$cXTLKnp?Wt4TkP zFhufX0V_C5O!tN?)e(uLOdl!* zBzEq(1&A-xvws$oI|*M)A=W20ekz~WIPwJ(pR?Y*^uYcGCjR(3rDt=U%2bntOV1Z> zP1X}^Pel&3`|e;p%go^n)Qz7HNlUtOK$3SuDO=J%5{oLq7$ar{Z7jcZfI68cn3eRS z^k5Qs7s?`=mhjti-OhA3@6|qLYpZu|aGY`b0KQiec?LNhLl+JSI8lvULUnU-v)zs=fUYsVTGISRHk%nb~%@(XCL*62>Z@pega{+tj?*b2&r zn_p>ikc4VbD4npEcCBRw{k{R=>B|C7%X~V)O!xaU5_B#Z+OdVpXWxx+IJS8gWmXcM zsL=E-O9!F%3Q2bkS<`~vFvba&2VY$*C@z+|BMnb_)fC&xW7*gxoa`;@5@C0-k? z_wNh}_oWwf>G6n&Ablt(0^$OYggg`+;F zqB`B65b8EgByP?UCi{J4N^c4mUN2oIM+JWAzW!Jz%X_UF4}htgz=zC$Pc z$E({K;0{`UvF{$=d|ya1hY_$>?QV)G72y!jc zq{QA(E3}1)7+GL3R=&2zU6w^2pzilwhRd}Zmc&l56(qS^$30>g=mo!)&g4U8hP?qn zEX5*DwwR}s%9pjKMV7$iPw2bxg|mpsJ(!yL+ODUD7s1Ri>?e2_s%Q~3gI)?Vm9>J* zk!FFI-!wX@k{aUB$(TTK4ifFp@1iMfoIHJWzZz}Hw>-(Mq*UvecxPx)B79{8Z#2mY zGw%~EOJZa>aZN3YM`6lx#XUh`yW!CXreTb?s0*HW`iZKB@JsiZ!5VK%0S>57aLU3y zmfV!rm=%sN6R5Bjco%7PlOUohoyxHQBwQ}XCc-ydM0`47eK&hh_V&1I{#`YiY4%w|gn3Wpm zd3p+YeOFti7El1sU9Xssf zI4kmyS`1cwZH9GL?P_RAp3wD47OuwxAGR=%0m$M?Tb(OVLMf!-`uzfVe0L*MfW$i? zwA!HKKQaqwoA&y^}J6wSZmuZerNhn zEyp>aHnmoyLtsg`H=8zpx0`2z7OSYV-lLN|F~_rr;~Hf4S^~>L8|&ao@S7tUoKKG} z!z2S!*IltL;!k7(VBCun3*i+#dUccgDjr?u!j|+1sGFB@kM)AqsO6+>yKyjm`~XIR zFPb$?6H5VM!tqubnNYsf(<2~+;eqcxMJ^?`ZoB^SZzpmAs3=AJ7)5_;A^s6eg`1mS z=ywxgtScGIEP&r~%<%!;QNIrQdiceyYuGNSqVF#r$7}2CL2Ei=XX_vldGGCg&Bz^x zeSJH=`<)-29@mwxW}DO-8TnQ5$-(bUPXj3Z+D?-zk0a3c)|(=9jrX42t>wR^pDkiM zyuE$+jGr9UuiI-6bxE}Pxxld}`p&JH>^R4Uod>&aP?J5|XZYrw#Qd`9ew-MYyt@w1 zZnUI~ezDq}y-Do_R7Qj071tO$pfutPM!$BRT)o-B_zrG+>kQ?90(Z&S(X!OBEv_7s z09||tZy{*gxy3NMylx9ilCQ_qstgj#x`oEL0j#JqNVg%5S3{>M7&E^6YPQ|A=vW>Y2l2NT8tZ|(}Ri1=O33`#!n88;m{ENxERlp+L2c)GW=*H(us=6HP zWX5fSHIlCb%uDdzD)N!T)4L9NF}q)*9+mAF5HzDios4Uk=bdW|#7NtY^f}6WGx9!M zg{sy`THh(fq~!W6Wd2b5q?pBS{*7#&VkD)ltTCs;lq~1XJAOVQIiv-?A%#_YSSu-9 zaR;a!%W>WfA(glxdN!7mcS^?BGt_#gYPl7)2eAa7$=eu8v}`io5Fl|-$Xw8M7nL(jjynj#NTwOW5 z%%Ep>GB5@6Y5#+V^upiJ?iZ`0g-hUEyFp#nkIP{~WH6*zj5q+6haU zao&Q1TP)Qb@MKxOu!pJ&JT_hc0?sBr96&icz0e9_{Xn0@cSA8lJezU!AO@487)Q1% zzn41GqnY=^p20W~W~064-I^60vdwGD3h6>o}%uK$+K6S~1i&K@v%S-zK? z@6U9XOdpZiE62EQ*~NR?K6^!`BjX|8mIHr%#}$lCa6vC^Ve0*G*>-}BxpxvbO27~& zZkTV&rhtW8`8IH-P~42VI5?LfvTL;Y<8<0)I}ZMAjJx1ShDM0#zg z&;kYA2`UnXyfX@y2kGnqdUPu~y5;n;L+9_PPkceQzL~y$r6py%2xyqRe#b*m_BPJj zA}1$TBOO#3n(s?{bTdqTOS!py0J3q8bS}!Tx(5BilM)e9A{O>9#H1I`cjNa_`v|vv zZ)y1WUqgCzb04i@4otJrm7mc0kOeEe)GQaX5nN=c)gN?U_iWD+TMGYtE3b3#DEI(> zaKuY>XjlKMH?}uBYT`9j36-ssY=}EWnxk3uwm9lF6fY4}{J%A9JpaV?;pPT$|8Cfz zmxhfUzjd193;M&FF7vEf7@M7{XxRA$gb9tufsg8Gk_>adT{pO;5}$6`o7`cwPBUhk z+>eZ?5`9m++I`lx*$l?f9x^s}c%%tm*xWxoJl#0lgXr~Tn~d5Uc!;BgeP|Wyc>4KF zr`nx6?6Kxq_jvlgN?6aB;bvm&qWhNDvntoy*{h~KuU-o-g_)nW?zX<44t(4<0$Z${ z5Z)U`lWtj^&7ozwYeQK%`Zd&`bvxP_uSWB zPwrs>G+LCK!J?>?+MB2Nc0?9o&cxs8HDaSfxCjDK`&8^h-Q*CD3=PoR;beU_?`&CJ zPwKaGCc>$R8wji^>)6o?r5=Vu(R6W9Yc%C0>&jjBO4OskX{IBdJ@0-R{6+O>>d2UycnE8JH9 z1f^^bGWrxsJyj@a=4qjh=~uEZ+dc@0J7q*uyU^)$jgc+Ztq2HJY~ zrIUsi)$jV;NXGIfaOT<8S;g6-3EnE6n4Gd}QxsSw6#KHRak)5-AW_~3CV?2dRek+26+WY!C}r%K0oWt`4?%a z{2=SQ5S@hdYa9#Qw_iI1+Nc=gC$#f3WIUFVX~(?Q6+^;duuB7DB`w5x{IYhcc-KCR z#=d(7Yg5EMu}kFsie|8+b-0J*mX|w`mY)c}2`OZ7 z8c6v*AJu(ah9_OZntizO5WjXt0ApCod#AeU0s!^j*bmx{2TRFyO&WWz;4t!#3>b|k znCBVK?ll(O)26u(4jFojQl&uQ2p*>3e9^mr?o))1`M|-_SKK+QO9QhL;r52_W3MKC z#v3Y{ilF%URA`Zw6o3qiH`6}Up=_f_kW9r>H|f;LgxVFG`Pp0V7>uXSu=+#EP@T<} z(3OG{a8kcr6l}Z;wN|dz;)9rH-Vu11VGfqhIK3d8wLTA$Zl#G&N)qBy#i;h2}&U9bHz7BC+v^Dh&3o=&sD(Y zE6Zu`PO5qzQ%?DrPRTVqKj5ygn_k0ktEhCc-xWOrK76<1Bx!p;`C1=JZu#MZ_4@Cg zZD;)c^xA#OEbTjADTv65H$Utbs0y{9I;v<9Cb)4k%_Zi|=@r-w^^iv9;hRLPFn3$O z^2)57z|uD3%Jiof@`D8CzMI>KvRHS=t*l{Jqwt2Coe;be9HY?Ld;))-(2+g8SlwZI z!K=Wm;(IJO=d)YoH#c1E^Ld-^`nTo83&xYjwT-6l63ED&_}Z>3>W%Smx86J{%j*Bk zFvVJjPEmU!6jTZe)^rlA!#BexHpBr%EU;L2g9!U*7P*50wQ{(c9TfyAXXZ0!+pn+O zV*j$ZM`R1{<6C}Np9?Jp@tn@F0HW^#W6-FdSTw1n-grW~Q!!Y=gb!j-e!Wz4P6vgI zx?{zHjLj;}-;dJwjP`oVEiXR|6SYzFC4OR1z|Vo6F$0nE(y0-9ku;yFcG|SjbZ^au zhSKVMtM&s#$dlHWH-NkS$mu$+da)lkGz0%zdz>k~2jLi*uCo=+wMH(;zbZgDp?HD@fG|H)=zav)psQdLUJT^gL}u@qB5(-&29L6yGW zxIp?qj-l~$_A31Pob7m7gD%UB*Y*6b&r(I$%Ef1#@ciqB8GG_mx>fnX1XGqibV)>9}%6u+`o<%j_vie6CLq=(T!qfeALJ3eUIOBsZTMSq?|%L zBteCFq_BqT-q+DVQD+UK31h|zU!qBHBv4;=TUNpyqa20_H58;YkwFVdzIB>gmWQz{ z&6T!Sncw+fVfQf7Sb~56TtUi7)H5~>D#d?q#3v=#5l=U)QL7Hgia7}ixVJ%RF*bwk zVooa^DH@Ce9@|Unr!sDL&cnhD1Q>!u7T>?*b-nQ*O+si$Zs_?s{i6Qf4R&j6*6T$C zQ=u%)=O(j{NNPh{l{Gn0U%6;eBM6lVI^^mNMXS*RDORG)MaT%8nYs(azGy3vSTbTh zT$mSqsNZqEP5(E5S;^7+x^+@z6#-7LitB8bN=|MgM;lfC_{Tp`Mgxt1zUo76sl%Ha9Y0y(H zF|u^pzKoheMq>1sqRbFl5B7Xq`wePOa_`PZjW)l-d`}#BELW?rVYsdb$hFWj`CGF94cDCM`9@4nknOqb3TjR$2&x{)n|L^{o zG19ob=f{6{%h?f~=JYFm)V{5q=jMb>4#-l=5}oy}kH4WaoqT_K^Q^G*egSf-_fZq(|V<87N{(IAhH3a!gGmMyezWxM3je5ZHjx83a2h03*i z)&yhbJi`$d3edQY#`_{)O!MaFichY|0k5S+tKQ~H$93Q=3hY1*z7n|W zt#QcLuD5zSAMWek{y}jC>p2Pbd-EL8L$~zBl4dl)lh0-N&taQqIH(H7lr$_4_D4u> zk=%)8&a%Qd+GFut%Pk9mGM}I4um@@op;#($ERo~{UvHn}dDAvVpUt{4FpZI|PAkc* z+9Xm_n1Xs21xgr%`8cO`YHSGVoU>CIOt^+RVUF2LjWq%yosx_m=!7wn(yZ8@?j{OZ zwzXZ{eI3wn{Tu=CJ=m+$o^3Pp>EI8Yr^?h<^;&lpd|l>$hm0HJE!J@gDf%*b_F4BTa1APePvBih73q|U6n?nfjjE16F zylWw?_470nAA^J`LPhT3Fnk$Kbl_PBQa)h4bdC#WhOvT%dwNl9v>4begbRv2kUhosSCau3LU$igB7ybX7ahqCkRKY8Pp4+AWEi7+L;*UA_1fz-H%+NzATFpT}fH|Qp189CX7=<>Y6jd zE&bzGo#??y-YxzT4OPKGT7{?II~NRigW)&sQ}d1$ckZa{foah>tzvsLjatNFQ9`J# z=Oqo-?-r#cFV{Q{Hc z-1;O|zt7|8Vd5TK^tx-xD*L=*(%>4Nin^mN?fT1c3-kN=aykW!nx`B#)}|6Dq3b9* zi8)zP`qly-i_SK~f&KL3Cc1h`QqRyPvd+Tusn8vD45>G58OUnd^wDRYXQVqcPgD^K zJP&=u7c>8CM|Rf7(BF*#`(41vx;nrxiZ0sA5S`)eya8=hu@iq7^eFBSd(GsCMWF9SG0YqK8b176FuT; z@+d1R-(H<8r&62k*N^c?*W%TxW4UH8gUg3=nCBSAK9f{+`reUEn~|)RU4Y4={FwJ? zB@U5ES1v)R#^*P;ZuxX1+p$hX^uIoN+Ef(fE*J+$2(GL$p0G{Vaz~8|Y2|Z*QihiF zGgJka+tzWiq!(2;8=IRUuhl0(_N>^x*NiPrs5lWlejT=xrx=O&y&P25 zGdCx;3@7opZ6_nRe3uj?Qx^D{C!m}Jx)QC|S;J08Qa_Z2VP(c_6U zys{8Eoq;;dITlOYL)nNYsX^#fWUlxK_)xb~a8L9y6IIMIm<5Ui2yS+PYx+KO<)zrG zU-!cC3*>9Zcyk+FKB@m0JK|HN60@czo(>AhmbOQmhHAkL`;UAN6`N~`!waBrXI(Un zu{ueoK}ifB;+MfEgblI$!3ydxo`!G)D#k?IC0!RtK%;E9o+X1q{Gu#_OHq=q@|LEI z&4>qvgKCQgNBPlrM$j~Ab=mW$u5L_~B{(%8$)*jVGFw~gJceW0(dYu1w?-QE7`(X= zOU$`ubKOy?o&J20R^$-a-e-xhN1*&09Xj^?>&#I`dn}IOut|_hSii(mwJG?AVMpr4>Q){edNjzszbr5 zf${zQO@<&>D~rmtJ7*b_kyK?$2(u<8={pQ@A#Be-zG^acD!Q0qOkR%n7Dvo8K6)nS1}a2F&wq!gTxP$LrXgu$!V7X z*lGt$-zQnOR>CioP21(Zk#<>L_?-`1MHCJ=zksU~Fid22oH}4cA1>nsD~iW;WUaIA z`(gUo#ABwLzdIK)!;Zg)S~GAn&#m-cj~R{;*U1^CKON~af#Kuf;`*+;-e?Eih1H-) zp?y?9n~RIJ;b|jpUMq0mof!3A{Nm@vlAzwt+IX6Ww0-TPaI?>ATz65#=HaYBB0 z1Ry~qc9sxLx%>ak_edT-Apw9Y{co#W`N&3akz(fXgrfeuUs!CrLk&J zI4nvZq>wsD_SxtGlqc*7Axb3kGd#fx590Lj)LtS&1o3AV6z76QfZ#eFxpN)Hid0iU zf%SgVgU@KqTuO0H_Kt6Iyj^$_1M>izl=yAiX;u z?s-ufaRJ;>Gt6*EU^~o4;d_NGKc9p4enTuE?>Z`pr~13Y#k2KbBFvTan?=I4g2&mI z=Vw*U5hn>lF=x~pBoxXvIA1BF99A4~MgmZnQORDQljWcykl`HAbvwxg(FAJHP6ExZ z6HcL#hhx1AAmWkan!(4FAfCZI3Q|-@{~*a^i^?j2B}l*(@OnwO5$9t7mN`ar@P_%d zR@D9L7+?k->A?34I`Vit0fY3{tcaQ<#nkCZ31y@nTqh#A-!wvAf_>-Vom|5b;!|>A zOp}1B`{*O0wxnr6eh(LVu$6;KYj`RMZuQ{L;2ebpFY`H5XI$Ip{Q60?5v?jXu50%b z=83Qm7Hwd5Pd&CoI5vTX6h85_m7o&_V)?t>a{TlU<%KlE8t6^r0WNryl0^ybZ1|Bu zdHMJn&)C$ou_GTUN;yi9<=@ee)7UMco#32cv%hwTqaw?ZOEAK33plugpccMi62K)V zkn?ur@re7#>ZSk%erV`ump)j8dpQ+jP*|CJ9i@m2P74a4>>0GE)%UDGog&;0gIHkB zpzZkWaPJ81(7REek<0a#t_(VVY{aG}dK|_vz_&cKa_zy5M$cAKQ#@GkaZml{*%J~U z(RMt4bn)11R{0wUY_|~e`)FWLZqUg6MrjXu4;f~X$%v_56voGm5&DuMs;&2_S=yOi z#vdqf705q|Z%9OhAhHl~2a(FR9TI6?5?i5qvNk37Bz#N#rW?cvW?p9LN||BPU>W7& zX8U}f1W?Md#oVOJBUfkqrQkimOFQ<~q-RM{q+0gD+Q}2_rQlKnyU2#phHOL~q85>O zptGewSrPp#IvwBWw%K~kpv)6)-20kR62>TIm!Y_V0+*JYIQl1EfWLT&dRvjH9$b{o`LD!hKw#R7MUbcuDLZo1#3 z+jP%mZ*A*~)0c=ZEo=AQ)3N7}9+07t@{keUG1g4gEx0pxr;ZelOpR?pFDUsS87+B= zoksVAR+^5k#*wUg2I@Ugf7u1VPuNm~``%^NlQJwRFbyKzj8C`|aeWQMqK* zPu0rx98`_u?kRhxyM-MU#Ad`kR*hAyACfYus|t2XbISbaC>l*G1<=?FSWxlRU%W4F z+M#$6iV8Ki>*NzO2iB0bO?a`x?B>V7Qjc;I!J**#fJn7;YstbPo(NO)a z59c!jgpp4}L*09$XZorJc7`%WIz|Mb5`2GI?$t=Xxw%=d{6o8Kcm5Q9wDcSN9WzvO zx99k0Rk@}Oh73$Qd#1Pxjuei)>h|)-9~Ha;$tWQ)%T6WK*{*MQ^ZB z_C*U*#>5gvIYoZ&Jg}VNwy|urD6_QI=c@3n*nrBpQF=#O4zRB&_s;dD^sDyI_U%Tb zN4kdt;Bp z4WBl`r?F3~?&)#5!#{3%6(2sml|#UNt69JvI=Cp<{?c-M^i9Z{h;;uKpqCtPyxUly zN_y=s-;2K0R?-@!<1yo&VD`L)KA(ijIc_-ZErQ!EFJjdm?sPlI+?H9Bp^G%=n{MYh zFIy>pm|1)8OzEx?;m7Wsm0ha2-I_DoQiPrA)(q{U=q}`-uDfp9JO9X((@fX2KCVWs z5mwM$oCoh%|88<7uHCP%K5aiVJdHRsqxcEJuJV2MtIpb?~HlX>b1M!qRw-TIo!9oM>|JPVozcdhCU4i zYH_H#y3e{j^@SarCyd({mk+(jVi!^pO!s$NHXdr4-7noJ2K1UxY{!1^bAB+{_f`~D z^!#xvz@%x%@2f?%)?O2bo1UuK(bOS!n_eU2l%hM}*U#eqUO^=$rr~HpTr@t52 zhd+d^EzG7YO3y z=D9@H^8tUNK`wFY{}Z_S|2>NOk0=5ETlDk~bjqK^{{%Dp8^=F_>iaFLPv}K$H@;MZeczlPM`o1Nc|&vz@M1yACT{VLj4oUa*4102lRgd_KGt8 zKm`j42qAZL^FTQHx%sU)d3pHwIE9cvUQRwMYhEOP7s3m&0RODrU%e{r-cOsCjzX8aQ* z4L3#tzU?8F2)OZ8Yy@SP4U8W7@(uDw0@Th1iKRBv@`NKGj&L&`L#02L9E+f1syX-7l96xVV3} za{2xI2x&<+fII~1hTK7+ZH@e6L(dM7)xC5MWD7q>l!$b#<}5Z8~|~&0>fv!<5NpJ* zCl@;#TLi$-&dC)HaE7=bY+KMMQ7 zj&K26K^(y@4ghOAWW4~&aOh;j}C&d-nR0+<8D$pVZFCdDrZ_*1YY%-Qp*axi4r zF9@>1NW?rnw*U_S34ljNGlM$85CAUZA7r2}fD1Ba9MYF|fmi_?!AOk06Tk{$;{t&I zoT0ApD_tSnVQ^QZ!t7u!0E8_v@6RW&r7QAtj-x9*w;B0F)#p(|`0#vVx zQ@bKpyCPTnja=;yfAg0Hf{zwshN$027F0Q{#2@DDYBk<;_18jyX8oLm6N6*J^FW@I=0F(B=( zXzZ?N?0%!M`;!KORD;u1aj+|L*cCbKH*(k?1b=c_*|{OZzX4!ZN^rembiHD9{f*J} zPY%zkOynFzD#7z-vdL8p#GeN4H$x@x+qT&&V52(N#SxCQc~UT_tD_T~o)4go9OQPE z$S8eC8}q~5{V!NZ1!*G@DP%Z9pb!tg47ZGs48N?j0Ekaenp;vv>b?w+NANzsn20P; z7KmJIBMG^IKp9@?`;tI``}g^gKLV26(gHv+6M%w~H4GVl;)iYhiLm75;Ra~JU#X(hLglUNuTz6FD38jJWAR^xQ}j3`ZcN&N&j`+Ai!Q4|DW9pcu#=wz<`x z^5phfo(Z)+mmxd57RT#{;wX?LUBxE&GUIYVO$KSCp_9v;q=Ozc^V}1Q+^P{%{9DFXW}a6Q+`>;U5$gMv{zdN1 z;wwknzCC?W%k2MUn%<|6TfT|DsLqmK8U1AM?5NIHt7KPONArJK&-bY@w~DR4_2v2lEkM)BX8*oq`5`S1Adzoyy*FHDAc%>Aj@Q4r*tA z-`VA}UDrclVZ^chq9mQsKyJor`*BCVboUyt9)hef!K-UG@7$+GlfKs+`hH`l&eEcu5KG pLC~n9eRgXombx2wI9pLkMNw)RFP9;35YNb%mrGUE)!&Vm3jpZ`oV)-4 literal 0 HcmV?d00001 diff --git a/examples/multi_process/Makefile b/examples/multi_process/Makefile new file mode 100644 index 0000000000..0abeafa2c8 --- /dev/null +++ b/examples/multi_process/Makefile @@ -0,0 +1,49 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +include $(RTE_SDK)/mk/rte.vars.mk +unexport RTE_SRCDIR RTE_OUTPUT RTE_EXTMK + +DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += $(wildcard *_mp) + +.PHONY: all clean $(DIRS-y) + +all: $(DIRS-y) +clean: $(DIRS-y) + +$(DIRS-y): + $(MAKE) -C $@ $(MAKECMDGOALS) diff --git a/examples/multi_process/client_server_mp/Makefile b/examples/multi_process/client_server_mp/Makefile new file mode 100644 index 0000000000..abb11ba09a --- /dev/null +++ b/examples/multi_process/client_server_mp/Makefile @@ -0,0 +1,49 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +include $(RTE_SDK)/mk/rte.vars.mk +unexport RTE_SRCDIR RTE_OUTPUT RTE_EXTMK + +DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += $(wildcard mp_*) + +.PHONY: all clean $(DIRS-y) + +all: $(DIRS-y) +clean: $(DIRS-y) + +$(DIRS-y): + $(MAKE) -C $@ $(MAKECMDGOALS) diff --git a/examples/multi_process/client_server_mp/mp_client/Makefile b/examples/multi_process/client_server_mp/mp_client/Makefile new file mode 100644 index 0000000000..202fce349e --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_client/Makefile @@ -0,0 +1,50 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = mp_client + +# all source are stored in SRCS-y +SRCS-y := client.c + +CFLAGS += $(WERROR_FLAGS) -O3 +CFLAGS += -I$(SRCDIR)/../shared + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c new file mode 100644 index 0000000000..bfb7476701 --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_client/client.c @@ -0,0 +1,294 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "init_drivers.h" + +/* Number of packets to attempt to read from queue */ +#define PKT_READ_SIZE ((uint16_t)32) + +/* our client id number - tells us which rx queue to read, and NIC TX + * queue to write to. */ +static uint8_t client_id = 0; + +struct mbuf_queue { +#define MBQ_CAPACITY 32 + struct rte_mbuf *bufs[MBQ_CAPACITY]; + uint16_t top; +}; + +/* maps input ports to output ports for packets */ +static uint8_t output_ports[RTE_MAX_ETHPORTS]; + +/* buffers up a set of packet that are ready to send */ +static struct mbuf_queue output_bufs[RTE_MAX_ETHPORTS]; + +/* shared data from server. We update statistics here */ +static volatile struct tx_stats *tx_stats; + + +/* + * print a usage message + */ +static void +usage(const char *progname) +{ + printf("Usage: %s [EAL args] -- -n \n\n", progname); +} + +/* + * Convert the client id number from a string to an int. + */ +static int +parse_client_num(const char *client) +{ + char *end = NULL; + unsigned long temp; + + if (client == NULL || *client == '\0') + return -1; + + temp = strtoul(client, &end, 10); + if (end == NULL || *end != '\0') + return -1; + + client_id = (uint8_t)temp; + return 0; +} + +/* + * Parse the application arguments to the client app. + */ +static int +parse_app_args(int argc, char *argv[]) +{ + int option_index, opt; + char **argvopt = argv; + const char *progname = NULL; + static struct option lgopts[] = { /* no long options */ + {NULL, 0, 0, 0 } + }; + progname = argv[0]; + + while ((opt = getopt_long(argc, argvopt, "n:", lgopts, + &option_index)) != EOF){ + switch (opt){ + case 'n': + if (parse_client_num(optarg) != 0){ + usage(progname); + return -1; + } + break; + default: + usage(progname); + return -1; + } + } + return 0; +} + +/* + * set up output ports so that all traffic on port gets sent out + * its paired port. Index using actual port numbers since that is + * what comes in the mbuf structure. + */ +static void configure_output_ports(const struct port_info *ports) +{ + int i; + if (ports->num_ports > RTE_MAX_ETHPORTS) + rte_exit(EXIT_FAILURE, "Too many ethernet ports. RTE_MAX_ETHPORTS = %u\n", + (unsigned)RTE_MAX_ETHPORTS); + for (i = 0; i < ports->num_ports - 1; i+=2){ + uint8_t p1 = ports->id[i]; + uint8_t p2 = ports->id[i+1]; + output_ports[p1] = p2; + output_ports[p2] = p1; + } +} + + +static inline void +send_packets(uint8_t port) +{ + uint16_t i, sent; + struct mbuf_queue *mbq = &output_bufs[port]; + + if (unlikely(mbq->top == 0)) + return; + + sent = rte_eth_tx_burst(port, client_id, mbq->bufs, mbq->top); + if (unlikely(sent < mbq->top)){ + for (i = sent; i < mbq->top; i++) + rte_pktmbuf_free(mbq->bufs[i]); + tx_stats->tx_drop[port] += (mbq->top - sent); + } + tx_stats->tx[port] += sent; + mbq->top = 0; +} + +/* + * Enqueue a packet to be sent on a particular port, but + * don't send it yet. Only when the buffer is full. + */ +static inline void +enqueue_packet(struct rte_mbuf *buf, uint8_t port) +{ + struct mbuf_queue *mbq = &output_bufs[port]; + mbq->bufs[mbq->top++] = buf; + + if (mbq->top == MBQ_CAPACITY) + send_packets(port); +} + +/* + * This function performs routing of packets + * Just sends each input packet out an output port based solely on the input + * port it arrived on. + */ +static void +handle_packet(struct rte_mbuf *buf) +{ + const uint8_t in_port = buf->pkt.in_port; + const uint8_t out_port = output_ports[in_port]; + + enqueue_packet(buf, out_port); +} + +/* + * Application main function - loops through + * receiving and processing packets. Never returns + */ +int +main(int argc, char *argv[]) +{ + const struct rte_memzone *mz; + struct rte_ring *rx_ring; + struct rte_mempool *mp; + struct port_info *ports; + int need_flush = 0; /* indicates whether we have unsent packets */ + int retval; + void *pkts[PKT_READ_SIZE]; + + if ((retval = rte_eal_init(argc, argv)) < 0) + return -1; + argc -= retval; + argv += retval; + + if (parse_app_args(argc, argv) < 0) + rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n"); + + if (init_drivers() < 0) + rte_exit(EXIT_FAILURE, "Cannot get NIC ports\n"); + if (rte_eth_dev_count() == 0) + rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); + + rx_ring = rte_ring_lookup(get_rx_queue_name(client_id)); + if (rx_ring == NULL) + rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n"); + + mp = rte_mempool_lookup(PKTMBUF_POOL_NAME); + if (mp == NULL) + rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n"); + + mz = rte_memzone_lookup(MZ_PORT_INFO); + if (mz == NULL) + rte_exit(EXIT_FAILURE, "Cannot get port info structure\n"); + ports = mz->addr; + tx_stats = &(ports->tx_stats[client_id]); + + configure_output_ports(ports); + + RTE_LOG(INFO, APP, "Finished Process Init.\n"); + + printf("\nClient process %d handling packets\n", client_id); + printf("[Press Ctrl-C to quit ...]\n"); + + for (;;) { + uint16_t i, rx_pkts = PKT_READ_SIZE; + uint8_t port; + + /* try dequeuing max possible packets first, if that fails, get the + * most we can. Loop body should only execute once, maximum */ + while (rx_pkts > 0 && + unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0)) + rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE); + + if (unlikely(rx_pkts == 0)){ + if (need_flush) + for (port = 0; port < ports->num_ports; port++) + send_packets(ports->id[port]); + need_flush = 0; + continue; + } + + for (i = 0; i < rx_pkts; i++) + handle_packet(pkts[i]); + + need_flush = 1; + } +} diff --git a/examples/multi_process/client_server_mp/mp_server/Makefile b/examples/multi_process/client_server_mp/mp_server/Makefile new file mode 100644 index 0000000000..009c6413e6 --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_server/Makefile @@ -0,0 +1,63 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp") +$(error This application can only operate in a linuxapp environment, \ +please change the definition of the RTE_TARGET environment variable) +endif + +# binary name +APP = mp_server + +# all source are stored in SRCS-y +SRCS-y := main.c init.c args.c + +INC := $(wildcard *.h) + +CFLAGS += $(WERROR_FLAGS) -O3 +CFLAGS += -I$(SRCDIR)/../shared + +# for newer gcc, e.g. 4.4, no-strict-aliasing may not be necessary +# and so the next line can be removed in those cases. +EXTRA_CFLAGS += -fno-strict-aliasing + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/multi_process/client_server_mp/mp_server/args.c b/examples/multi_process/client_server_mp/mp_server/args.c new file mode 100644 index 0000000000..ecdddabcbe --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_server/args.c @@ -0,0 +1,175 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "common.h" +#include "args.h" +#include "init.h" + +/* global var for number of clients - extern in header */ +uint8_t num_clients; + +static const char *progname; + +/** + * Prints out usage information to stdout + */ +static void +usage(void) +{ + printf( + "%s [EAL options] -- -p PORTMASK -n NUM_CLIENTS [-s NUM_SOCKETS]\n" + " -p PORTMASK: hexadecimal bitmask of ports to use\n" + " -n NUM_CLIENTS: number of client processes to use\n" + , progname); +} + +/** + * The ports to be used by the application are passed in + * the form of a bitmask. This function parses the bitmask + * and places the port numbers to be used into the port[] + * array variable + */ +static int +parse_portmask(uint8_t max_ports, const char *portmask) +{ + char *end = NULL; + unsigned long pm; + uint8_t count = 0; + + if (portmask == NULL || *portmask == '\0') + return -1; + + /* convert parameter to a number and verify */ + pm = strtoul(portmask, &end, 16); + if (end == NULL || *end != '\0' || pm == 0) + return -1; + + /* loop through bits of the mask and mark ports */ + while (pm != 0){ + if (pm & 0x01){ /* bit is set in mask, use port */ + if (count >= max_ports) + printf("WARNING: requested port %u not present" + " - ignoring\n", (unsigned)count); + else + ports->id[ports->num_ports++] = count; + } + pm = (pm >> 1); + count++; + } + + return 0; +} + +/** + * Take the number of clients parameter passed to the app + * and convert to a number to store in the num_clients variable + */ +static int +parse_num_clients(const char *clients) +{ + char *end = NULL; + unsigned long temp; + + if (clients == NULL || *clients == '\0') + return -1; + + temp = strtoul(clients, &end, 10); + if (end == NULL || *end != '\0' || temp == 0) + return -1; + + num_clients = (uint8_t)temp; + return 0; +} + +/** + * The application specific arguments follow the DPDK-specific + * arguments which are stripped by the DPDK init. This function + * processes these application arguments, printing usage info + * on error. + */ +int +parse_app_args(uint8_t max_ports, int argc, char *argv[]) +{ + int option_index, opt; + char **argvopt = argv; + static struct option lgopts[] = { /* no long options */ + {NULL, 0, 0, 0 } + }; + progname = argv[0]; + + while ((opt = getopt_long(argc, argvopt, "n:p:", lgopts, + &option_index)) != EOF){ + switch (opt){ + case 'p': + if (parse_portmask(max_ports, optarg) != 0){ + usage(); + return -1; + } + break; + case 'n': + if (parse_num_clients(optarg) != 0){ + usage(); + return -1; + } + break; + default: + printf("ERROR: Unknown option '%c'\n", opt); + usage(); + return -1; + } + } + + if (ports->num_ports == 0 || num_clients == 0){ + usage(); + return -1; + } + + if (ports->num_ports % 2 != 0){ + printf("ERROR: application requires an even number of ports to use\n"); + return -1; + } + return 0; +} + diff --git a/examples/multi_process/client_server_mp/mp_server/args.h b/examples/multi_process/client_server_mp/mp_server/args.h new file mode 100644 index 0000000000..d2ff6eed51 --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_server/args.h @@ -0,0 +1,41 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _ARGS_H_ +#define _ARGS_H_ + +int parse_app_args(uint8_t max_ports, int argc, char *argv[]); + +#endif /* ifndef _ARGS_H_ */ diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c new file mode 100644 index 0000000000..cbaccb9759 --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_server/init.c @@ -0,0 +1,304 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "init_drivers.h" +#include "args.h" +#include "init.h" +#include "main.h" + +#define MBUFS_PER_CLIENT 1536 +#define MBUFS_PER_PORT 1536 +#define MBUF_CACHE_SIZE 512 +#define MBUF_OVERHEAD (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define RX_MBUF_DATA_SIZE 2048 +#define MBUF_SIZE (RX_MBUF_DATA_SIZE + MBUF_OVERHEAD) + +#define RTE_MP_RX_DESC_DEFAULT 512 +#define RTE_MP_TX_DESC_DEFAULT 512 +#define CLIENT_QUEUE_RINGSIZE 128 + +#define NO_FLAGS 0 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +/* Default configuration for rx and tx thresholds etc. */ +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define MP_DEFAULT_PTHRESH 36 +#define MP_DEFAULT_RX_HTHRESH 8 +#define MP_DEFAULT_TX_HTHRESH 0 +#define MP_DEFAULT_WTHRESH 0 + +static const struct rte_eth_rxconf rx_conf_default = { + .rx_thresh = { + .pthresh = MP_DEFAULT_PTHRESH, + .hthresh = MP_DEFAULT_RX_HTHRESH, + .wthresh = MP_DEFAULT_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf_default = { + .tx_thresh = { + .pthresh = MP_DEFAULT_PTHRESH, + .hthresh = MP_DEFAULT_TX_HTHRESH, + .wthresh = MP_DEFAULT_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +/* The mbuf pool for packet rx */ +struct rte_mempool *pktmbuf_pool; + +/* array of info/queues for clients */ +struct client *clients = NULL; + +/* the port details */ +struct port_info *ports; + +/** + * Initialise the mbuf pool for packet reception for the NIC, and any other + * buffer pools needed by the app - currently none. + */ +static int +init_mbuf_pools(void) +{ + const unsigned num_mbufs = (num_clients * MBUFS_PER_CLIENT) \ + + (ports->num_ports * MBUFS_PER_PORT); + + /* don't pass single-producer/single-consumer flags to mbuf create as it + * seems faster to use a cache instead */ + printf("Creating mbuf pool '%s' [%u mbufs] ...\n", + PKTMBUF_POOL_NAME, num_mbufs); + pktmbuf_pool = rte_mempool_create(PKTMBUF_POOL_NAME, num_mbufs, + MBUF_SIZE, MBUF_CACHE_SIZE, + sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, + NULL, rte_pktmbuf_init, NULL, SOCKET0, NO_FLAGS ); + + return (pktmbuf_pool == NULL); /* 0 on success */ +} + +/** + * Initialise an individual port: + * - configure number of rx and tx rings + * - set up each rx ring, to pull from the main mbuf pool + * - set up each tx ring + * - start the port and report its status to stdout + */ +static int +init_port(uint8_t port_num) +{ + /* for port configuration all features are off by default */ + const struct rte_eth_conf port_conf = { + .rxmode = { + .mq_mode = ETH_RSS + } + }; + const uint16_t rx_rings = 1, tx_rings = num_clients; + const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT; + const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT; + + struct rte_eth_link link; + uint16_t q; + int retval; + + printf("Port %u init ... ", (unsigned)port_num); + fflush(stdout); + + /* Standard DPDK port initialisation - config port, then set up + * rx and tx rings */ + if ((retval = rte_eth_dev_configure(port_num, rx_rings, tx_rings, + &port_conf)) != 0) + return retval; + + for (q = 0; q < rx_rings; q++) { + retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size, + SOCKET0, &rx_conf_default, pktmbuf_pool); + if (retval < 0) return retval; + } + + for ( q = 0; q < tx_rings; q ++ ) { + retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size, + SOCKET0, &tx_conf_default); + if (retval < 0) return retval; + } + + rte_eth_promiscuous_enable(port_num); + + retval = rte_eth_dev_start(port_num); + if (retval < 0) return retval; + + printf( "done: "); + + /* get link status */ + rte_eth_link_get(port_num, &link); + if (link.link_status) { + printf(" Link Up - speed %u Mbps - %s\n", + (uint32_t) link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + } + else{ + printf(" Link Down\n"); + } + return 0; +} + +/** + * Set up the DPDK rings which will be used to pass packets, via + * pointers, between the multi-process server and client processes. + * Each client needs one RX queue. + */ +static int +init_shm_rings(void) +{ + unsigned i; + const unsigned ringsize = CLIENT_QUEUE_RINGSIZE; + + clients = rte_malloc("client details", + sizeof(*clients) * num_clients, 0); + if (clients == NULL) + rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n"); + + for (i = 0; i < num_clients; i++) { + /* Create an RX queue for each client */ + clients[i].rx_q = rte_ring_create(get_rx_queue_name(i), + ringsize, SOCKET0, + RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */ + if (clients[i].rx_q == NULL) + rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i); + } + return 0; +} + +/** + * Main init function for the multi-process server app, + * calls subfunctions to do each stage of the initialisation. + */ +int +init(int argc, char *argv[]) +{ + int retval; + const struct rte_memzone *mz; + uint8_t i, total_ports; + + /* init EAL, parsing EAL args */ + retval = rte_eal_init(argc, argv); + if (retval < 0) + return -1; + argc -= retval; + argv += retval; + + /* initialise the nic drivers */ + retval = init_drivers(); + if (retval != 0) + rte_exit(EXIT_FAILURE, "Cannot initialise drivers\n"); + + /* get total number of ports */ + total_ports = rte_eth_dev_count(); + + /* set up array for port data */ + mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports), + rte_socket_id(), NO_FLAGS); + if (mz == NULL) + rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n"); + memset(mz->addr, 0, sizeof(*ports)); + ports = mz->addr; + + /* parse additional, application arguments */ + retval = parse_app_args(total_ports, argc, argv); + if (retval != 0) + return -1; + + /* initialise mbuf pools */ + retval = init_mbuf_pools(); + if (retval != 0) + rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n"); + + /* now initialise the ports we will use */ + for (i = 0; i < ports->num_ports; i++) { + retval = init_port(ports->id[i]); + if (retval != 0) + rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n", + (unsigned)i); + } + + /* initialise the client queues/rings for inter-eu comms */ + init_shm_rings(); + + return 0; +} diff --git a/examples/multi_process/client_server_mp/mp_server/init.h b/examples/multi_process/client_server_mp/mp_server/init.h new file mode 100644 index 0000000000..2d4ab58f4f --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_server/init.h @@ -0,0 +1,74 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _INIT_H_ +#define _INIT_H_ + +/* + * #include + * #include "args.h" + */ + +/* + * Define a client structure with all needed info, including + * stats from the clients. + */ +struct client { + struct rte_ring *rx_q; + unsigned client_id; + /* these stats hold how many packets the client will actually receive, + * and how many packets were dropped because the client's queue was full. + * The port-info stats, in contrast, record how many packets were received + * or transmitted on an actual NIC port. + */ + struct { + volatile uint64_t rx; + volatile uint64_t rx_drop; + } stats; +}; + +extern struct client *clients; + +/* the shared port information: port numbers, rx and tx stats etc. */ +extern struct port_info *ports; + +extern struct rte_mempool *pktmbuf_pool; +extern uint8_t num_clients; +extern unsigned num_sockets; +extern struct port_info *ports; + +int init(int argc, char *argv[]); + +#endif /* ifndef _INIT_H_ */ diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c new file mode 100644 index 0000000000..efbe051c9d --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_server/main.c @@ -0,0 +1,330 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "args.h" +#include "init.h" +#include "main.h" + +/* + * When doing reads from the NIC or the client queues, + * use this batch size + */ +#define PACKET_READ_SIZE 32 + +/* + * Local buffers to put packets in, used to send packets in bursts to the + * clients + */ +struct client_rx_buf { + struct rte_mbuf *buffer[PACKET_READ_SIZE]; + uint16_t count; +}; + +/* One buffer per client rx queue - dynamically allocate array */ +static struct client_rx_buf *cl_rx_buf; + +static const char * +get_printable_mac_addr(uint8_t port) +{ + static const char err_address[] = "00:00:00:00:00:00"; + static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)]; + + if (unlikely(port >= RTE_MAX_ETHPORTS)) + return err_address; + if (unlikely(addresses[port][0]=='\0')){ + struct ether_addr mac; + rte_eth_macaddr_get(port, &mac); + rte_snprintf(addresses[port], sizeof(addresses[port]), + "%02x:%02x:%02x:%02x:%02x:%02x\n", + mac.addr_bytes[0], mac.addr_bytes[1], mac.addr_bytes[2], + mac.addr_bytes[3], mac.addr_bytes[4], mac.addr_bytes[5]); + } + return addresses[port]; +} + +/* + * This function displays the recorded statistics for each port + * and for each client. It uses ANSI terminal codes to clear + * screen when called. It is called from a single non-master + * thread in the server process, when the process is run with more + * than one lcore enabled. + */ +static void +do_stats_display(void) +{ + unsigned i, j; + const char clr[] = { 27, '[', '2', 'J', '\0' }; + const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' }; + uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS]; + uint64_t client_tx[MAX_CLIENTS], client_tx_drop[MAX_CLIENTS]; + + /* to get TX stats, we need to do some summing calculations */ + memset(port_tx, 0, sizeof(port_tx)); + memset(port_tx_drop, 0, sizeof(port_tx_drop)); + memset(client_tx, 0, sizeof(client_tx)); + memset(client_tx_drop, 0, sizeof(client_tx_drop)); + + for (i = 0; i < num_clients; i++){ + const volatile struct tx_stats *tx = &ports->tx_stats[i]; + for (j = 0; j < ports->num_ports; j++){ + /* assign to local variables here, save re-reading volatile vars */ + const uint64_t tx_val = tx->tx[j]; + const uint64_t drop_val = tx->tx_drop[j]; + port_tx[j] += tx_val; + port_tx_drop[j] += drop_val; + client_tx[i] += tx_val; + client_tx_drop[i] += drop_val; + } + } + + /* Clear screen and move to top left */ + printf("%s%s", clr, topLeft); + + printf("PORTS\n"); + printf("-----\n"); + for (i = 0; i < ports->num_ports; i++) + printf("Port %u: '%s'\t", (unsigned)ports->id[i], + get_printable_mac_addr(ports->id[i])); + printf("\n\n"); + for (i = 0; i < ports->num_ports; i++){ + printf("Port %u - rx: %9"PRIu64"\t" + "tx: %9"PRIu64"\n", + (unsigned)ports->id[i], ports->rx_stats.rx[i], + port_tx[i]); + } + + printf("\nCLIENTS\n"); + printf("-------\n"); + for (i = 0; i < num_clients; i++){ + const unsigned long long rx = clients[i].stats.rx; + const unsigned long long rx_drop = clients[i].stats.rx_drop; + printf("Client %2u - rx: %9llu, rx_drop: %9llu\n" + " tx: %9"PRIu64", tx_drop: %9"PRIu64"\n", + i, rx, rx_drop, client_tx[i], client_tx_drop[i]); + } + + printf("\n"); +} + +/* + * The function called from each non-master lcore used by the process. + * The test_and_set function is used to randomly pick a single lcore on which + * the code to display the statistics will run. Otherwise, the code just + * repeatedly sleeps. + */ +static int +sleep_lcore(__attribute__((unused)) void *dummy) +{ + /* Used to pick a display thread - static, so zero-initialised */ + static rte_atomic32_t display_stats; + + /* Only one core should display stats */ + if (rte_atomic32_test_and_set(&display_stats)) { + const unsigned sleeptime = 1; + printf("Core %u displaying statistics\n", rte_lcore_id()); + + /* Longer initial pause so above printf is seen */ + sleep(sleeptime * 3); + + /* Loop forever: sleep always returns 0 or <= param */ + while (sleep(sleeptime) <= sleeptime) + do_stats_display(); + } + else { + const unsigned sleeptime = 100; + printf("Putting core %u to sleep\n", rte_lcore_id()); + while (sleep(sleeptime) <= sleeptime) + ; /* loop doing nothing */ + } + return 0; +} + +/* + * Function to set all the client statistic values to zero. + * Called at program startup. + */ +static void +clear_stats(void) +{ + unsigned i; + + for (i = 0; i < num_clients; i++) + clients[i].stats.rx = clients[i].stats.rx_drop = 0; +} + +/* + * send a burst of traffic to a client, assuming there are packets + * available to be sent to this client + */ +static void +flush_rx_queue(uint16_t client) +{ + uint16_t j; + struct client *cl; + + if (cl_rx_buf[client].count == 0) + return; + + cl = &clients[client]; + if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer, + cl_rx_buf[client].count) != 0){ + for (j = 0; j < cl_rx_buf[client].count; j++) + rte_pktmbuf_free(cl_rx_buf[client].buffer[j]); + cl->stats.rx_drop += cl_rx_buf[client].count; + } + else + cl->stats.rx += cl_rx_buf[client].count; + + cl_rx_buf[client].count = 0; +} + +/* + * marks a packet down to be sent to a particular client process + */ +static inline void +enqueue_rx_packet(uint8_t client, struct rte_mbuf *buf) +{ + cl_rx_buf[client].buffer[cl_rx_buf[client].count++] = buf; +} + +/* + * This function takes a group of packets and routes them + * individually to the client process. Very simply round-robins the packets + * without checking any of the packet contents. + */ +static void +process_packets(uint32_t port_num __rte_unused, + struct rte_mbuf *pkts[], uint16_t rx_count) +{ + uint16_t i; + uint8_t client = 0; + + for (i = 0; i < rx_count; i++) { + enqueue_rx_packet(client, pkts[i]); + + if (++client == num_clients) + client = 0; + } + + for (i = 0; i < num_clients; i++) + flush_rx_queue(i); +} + +/* + * Function called by the master lcore of the DPDK process. + */ +static void +do_packet_forwarding(void) +{ + unsigned port_num = 0; /* indexes the port[] array */ + + for (;;) { + struct rte_mbuf *buf[PACKET_READ_SIZE]; + uint16_t rx_count; + + /* read a port */ + rx_count = rte_eth_rx_burst(ports->id[port_num], 0, \ + buf, PACKET_READ_SIZE); + ports->rx_stats.rx[port_num] += rx_count; + + /* Now process the NIC packets read */ + if (likely(rx_count > 0)) + process_packets(port_num, buf, rx_count); + + /* move to next port */ + if (++port_num == ports->num_ports) + port_num = 0; + } +} + +int +MAIN(int argc, char *argv[]) +{ + /* initialise the system */ + if (init(argc, argv) < 0 ) + return -1; + RTE_LOG(INFO, APP, "Finished Process Init.\n"); + + cl_rx_buf = calloc(num_clients, sizeof(cl_rx_buf[0])); + + /* clear statistics */ + clear_stats(); + + /* put all other cores to sleep bar master */ + rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER); + + do_packet_forwarding(); + return 0; +} diff --git a/examples/multi_process/client_server_mp/mp_server/main.h b/examples/multi_process/client_server_mp/mp_server/main.h new file mode 100644 index 0000000000..1794abcdd4 --- /dev/null +++ b/examples/multi_process/client_server_mp/mp_server/main.h @@ -0,0 +1,50 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#define SOCKET0 0 +#define SOCKET1 1 + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/multi_process/client_server_mp/shared/common.h b/examples/multi_process/client_server_mp/shared/common.h new file mode 100644 index 0000000000..46cc4f37b5 --- /dev/null +++ b/examples/multi_process/client_server_mp/shared/common.h @@ -0,0 +1,89 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _COMMON_H_ +#define _COMMON_H_ + +#define MAX_CLIENTS 16 + +/* + * Shared port info, including statistics information for display by server. + * Structure will be put in a memzone. + * - All port id values share one cache line as this data will be read-only + * during operation. + * - All rx statistic values share cache lines, as this data is written only + * by the server process. (rare reads by stats display) + * - The tx statistics have values for all ports per cache line, but the stats + * themselves are written by the clients, so we have a distinct set, on different + * cache lines for each client to use. + */ +struct rx_stats{ + uint64_t rx[RTE_MAX_ETHPORTS]; +} __rte_cache_aligned; + +struct tx_stats{ + uint64_t tx[RTE_MAX_ETHPORTS]; + uint64_t tx_drop[RTE_MAX_ETHPORTS]; +} __rte_cache_aligned; + +struct port_info { + uint8_t num_ports; + uint8_t id[RTE_MAX_ETHPORTS]; + volatile struct rx_stats rx_stats; + volatile struct tx_stats tx_stats[MAX_CLIENTS]; +}; + +/* define common names for structures shared between server and client */ +#define MP_CLIENT_RXQ_NAME "MProc_Client_%u_RX" +#define PKTMBUF_POOL_NAME "MProc_pktmbuf_pool" +#define MZ_PORT_INFO "MProc_port_info" + +/* + * Given the rx queue name template above, get the queue name + */ +static inline const char * +get_rx_queue_name(unsigned id) +{ + /* buffer for return value. Size calculated by %u being replaced + * by maximum 3 digits (plus an extra byte for safety) */ + static char buffer[sizeof(MP_CLIENT_RXQ_NAME) + 2]; + + rte_snprintf(buffer, sizeof(buffer) - 1, MP_CLIENT_RXQ_NAME, id); + return buffer; +} + +#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1 + +#endif diff --git a/examples/multi_process/client_server_mp/shared/init_drivers.h b/examples/multi_process/client_server_mp/shared/init_drivers.h new file mode 100644 index 0000000000..658c841c4b --- /dev/null +++ b/examples/multi_process/client_server_mp/shared/init_drivers.h @@ -0,0 +1,58 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _INIT_DRIVERS_H_ +#define _INIT_DRIVERS_H_ + +/** + * Initialise all 1G and 10G NICs available + */ +static inline int +init_drivers(void) +{ + if ( +#ifdef RTE_LIBRTE_IGB_PMD + (rte_igb_pmd_init() < 0) || +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + (rte_ixgbe_pmd_init() < 0) || +#endif + (rte_eal_pci_probe() < 0 )) + return -1; + + return 0; +} + +#endif diff --git a/examples/multi_process/simple_mp/Makefile b/examples/multi_process/simple_mp/Makefile new file mode 100644 index 0000000000..fb9d81da86 --- /dev/null +++ b/examples/multi_process/simple_mp/Makefile @@ -0,0 +1,52 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = simple_mp + +# all source are stored in SRCS-y +SRCS-y := main.c mp_commands.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/multi_process/simple_mp/main.c b/examples/multi_process/simple_mp/main.c new file mode 100644 index 0000000000..166fc803a2 --- /dev/null +++ b/examples/multi_process/simple_mp/main.c @@ -0,0 +1,160 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * This sample application is a simple multi-process application which + * demostrates sharing of queues and memory pools between processes, and + * using those queues/pools for communication between the processes. + * + * Application is designed to run with two processes, a primary and a + * secondary, and each accepts commands on the commandline, the most + * important of which is "send", which just sends a string to the other + * process. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mp_commands.h" + +#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1 + +#define SOCKET0 0 + +static const char *_MSG_POOL = "MSG_POOL"; +static const char *_SEC_2_PRI = "SEC_2_PRI"; +static const char *_PRI_2_SEC = "PRI_2_SEC"; +const unsigned string_size = 64; + +struct rte_ring *send_ring, *recv_ring; +struct rte_mempool *message_pool; +volatile int quit = 0; + +static int +lcore_recv(__attribute__((unused)) void *arg) +{ + unsigned lcore_id = rte_lcore_id(); + + printf("Starting core %u\n", lcore_id); + while (!quit){ + void *msg; + if (rte_ring_dequeue(recv_ring, &msg) < 0){ + usleep(5); + continue; + } + printf("core %u: Received '%s'\n", lcore_id, (char *)msg); + rte_mempool_put(message_pool, msg); + } + + return 0; +} + +int +main(int argc, char **argv) +{ + const unsigned flags = 0; + const unsigned ring_size = 64; + const unsigned pool_size = 1024; + const unsigned pool_cache = 32; + const unsigned priv_data_sz = 0; + + int ret; + unsigned lcore_id; + + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot init EAL\n"); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY){ + send_ring = rte_ring_create(_PRI_2_SEC, ring_size, SOCKET0, flags); + recv_ring = rte_ring_create(_SEC_2_PRI, ring_size, SOCKET0, flags); + message_pool = rte_mempool_create(_MSG_POOL, pool_size, + string_size, pool_cache, priv_data_sz, + NULL, NULL, NULL, NULL, + SOCKET0, flags); + } else { + recv_ring = rte_ring_lookup(_PRI_2_SEC); + send_ring = rte_ring_lookup(_SEC_2_PRI); + message_pool = rte_mempool_lookup(_MSG_POOL); + } + if (send_ring == NULL) + rte_exit(EXIT_FAILURE, "Problem getting sending ring\n"); + if (recv_ring == NULL) + rte_exit(EXIT_FAILURE, "Problem getting receiving ring\n"); + if (message_pool == NULL) + rte_exit(EXIT_FAILURE, "Problem getting message pool\n"); + + RTE_LOG(INFO, APP, "Finished Process Init.\n"); + + /* call lcore_recv() on every slave lcore */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(lcore_recv, NULL, lcore_id); + } + + /* call cmd prompt on master lcore */ + struct cmdline *cl = cmdline_stdin_new(simple_mp_ctx, "\nsimple_mp > "); + if (cl == NULL) + rte_exit(EXIT_FAILURE, "Cannot create cmdline instance\n"); + cmdline_interact(cl); + cmdline_stdin_exit(cl); + + rte_eal_mp_wait_lcore(); + return 0; +} diff --git a/examples/multi_process/simple_mp/mp_commands.c b/examples/multi_process/simple_mp/mp_commands.c new file mode 100644 index 0000000000..6e12ed367e --- /dev/null +++ b/examples/multi_process/simple_mp/mp_commands.c @@ -0,0 +1,169 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "mp_commands.h" + +/**********************************************************/ + +struct cmd_send_result { + cmdline_fixed_string_t action; + cmdline_fixed_string_t message; +}; + +static void cmd_send_parsed(void *parsed_result, + __attribute__((unused)) struct cmdline *cl, + __attribute__((unused)) void *data) +{ + void *msg; + struct cmd_send_result *res = parsed_result; + + if (rte_mempool_get(message_pool, &msg) < 0) + rte_panic("Failed to get message buffer\n"); + rte_snprintf((char *)msg, string_size, "%s", res->message); + if (rte_ring_enqueue(send_ring, msg) < 0) { + printf("Failed to send message - message discarded\n"); + rte_mempool_put(message_pool, msg); + } +} + +cmdline_parse_token_string_t cmd_send_action = + TOKEN_STRING_INITIALIZER(struct cmd_send_result, action, "send"); +cmdline_parse_token_string_t cmd_send_message = + TOKEN_STRING_INITIALIZER(struct cmd_send_result, message, NULL); + +cmdline_parse_inst_t cmd_send = { + .f = cmd_send_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "send a string to another process", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_send_action, + (void *)&cmd_send_message, + NULL, + }, +}; + +/**********************************************************/ + +struct cmd_quit_result { + cmdline_fixed_string_t quit; +}; + +static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + quit = 1; + cmdline_quit(cl); +} + +cmdline_parse_token_string_t cmd_quit_quit = + TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit"); + +cmdline_parse_inst_t cmd_quit = { + .f = cmd_quit_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "close the application", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_quit_quit, + NULL, + }, +}; + +/**********************************************************/ + +struct cmd_help_result { + cmdline_fixed_string_t help; +}; + +static void cmd_help_parsed(__attribute__((unused)) void *parsed_result, + struct cmdline *cl, + __attribute__((unused)) void *data) +{ + cmdline_printf(cl, "Simple demo example of multi-process in RTE\n\n" + "This is a readline-like interface that can be used to\n" + "send commands to the simple app. Commands supported are:\n\n" + "- send [string]\n" "- help\n" "- quit\n\n"); +} + +cmdline_parse_token_string_t cmd_help_help = + TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help"); + +cmdline_parse_inst_t cmd_help = { + .f = cmd_help_parsed, /* function to call */ + .data = NULL, /* 2nd arg of func */ + .help_str = "show help", + .tokens = { /* token list, NULL terminated */ + (void *)&cmd_help_help, + NULL, + }, +}; + +/****** CONTEXT (list of instruction) */ +cmdline_parse_ctx_t simple_mp_ctx[] = { + (cmdline_parse_inst_t *)&cmd_send, + (cmdline_parse_inst_t *)&cmd_quit, + (cmdline_parse_inst_t *)&cmd_help, + NULL, +}; diff --git a/examples/multi_process/simple_mp/mp_commands.h b/examples/multi_process/simple_mp/mp_commands.h new file mode 100644 index 0000000000..bdb25c2d37 --- /dev/null +++ b/examples/multi_process/simple_mp/mp_commands.h @@ -0,0 +1,46 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _SIMPLE_MP_COMMANDS_H_ +#define _SIMPLE_MP_COMMANDS_H_ + +extern const unsigned string_size; +extern struct rte_ring *send_ring; +extern struct rte_mempool *message_pool; +extern volatile int quit; + +extern cmdline_parse_ctx_t simple_mp_ctx[]; + +#endif /* _SIMPLE_MP_COMMANDS_H_ */ diff --git a/examples/multi_process/symmetric_mp/Makefile b/examples/multi_process/symmetric_mp/Makefile new file mode 100644 index 0000000000..5036bad325 --- /dev/null +++ b/examples/multi_process/symmetric_mp/Makefile @@ -0,0 +1,52 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2012 Intel Corporation. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# version: DPDK.L.1.2.3-3 + +ifeq ($(RTE_SDK),) +$(error "Please define RTE_SDK environment variable") +endif + +# Default target, can be overriden by command line or environment +RTE_TARGET ?= x86_64-default-linuxapp-gcc + +include $(RTE_SDK)/mk/rte.vars.mk + +# binary name +APP = symmetric_mp + +# all source are stored in SRCS-y +SRCS-y := main.c + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +include $(RTE_SDK)/mk/rte.extapp.mk diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c new file mode 100644 index 0000000000..ad783f1383 --- /dev/null +++ b/examples/multi_process/symmetric_mp/main.c @@ -0,0 +1,471 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +/* + * Sample application demostrating how to do packet I/O in a multi-process + * environment. The same code can be run as a primary process and as a + * secondary process, just with a different proc-id parameter in each case + * (apart from the EAL flag to indicate a secondary process). + * + * Each process will read from the same ports, given by the port-mask + * parameter, which should be the same in each case, just using a different + * queue per port as determined by the proc-id parameter. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1 + +#define SOCKET0 0 + +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define NB_MBUFS 64*1024 /* use 64k mbufs */ +#define MBUF_CACHE_SIZE 256 +#define PKT_BURST 32 +#define RX_RING_SIZE 128 +#define TX_RING_SIZE 512 + +#define PARAM_PROC_ID "proc-id" +#define PARAM_NUM_PROCS "num-procs" + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +/* Default configuration for rx and tx thresholds etc. */ +static const struct rte_eth_rxconf rx_conf_default = { + .rx_thresh = { + .pthresh = 8, + .hthresh = 8, + .wthresh = 4, + }, +}; + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +static const struct rte_eth_txconf tx_conf_default = { + .tx_thresh = { + .pthresh = 36, + .hthresh = 0, + .wthresh = 0, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +/* for each lcore, record the elements of the ports array to use */ +struct lcore_ports{ + unsigned start_port; + unsigned num_ports; +}; + +/* structure to record the rx and tx packets. Put two per cache line as ports + * used in pairs */ +struct port_stats{ + unsigned rx; + unsigned tx; + unsigned drop; +} __attribute__((aligned(CACHE_LINE_SIZE / 2))); + +static int proc_id = -1; +static unsigned num_procs = 0; + +static uint8_t ports[RTE_MAX_ETHPORTS]; +static unsigned num_ports = 0; + +static struct lcore_ports lcore_ports[RTE_MAX_LCORE]; +static struct port_stats pstats[RTE_MAX_ETHPORTS]; + +/* prints the usage statement and quits with an error message */ +static void +smp_usage(const char *prgname, const char *errmsg) +{ + printf("\nError: %s\n",errmsg); + printf("\n%s [EAL options] -- -p " + "--"PARAM_NUM_PROCS" " + " --"PARAM_PROC_ID" \n" + "-p : a hex bitmask indicating what ports are to be used\n" + "--num-procs: the number of processes which will be used\n" + "--proc-id : the id of the current process (id < num-procs)\n" + "\n", + prgname); + exit(1); +} + + +/* signal handler configured for SIGTERM and SIGINT to print stats on exit */ +static void +print_stats(int signum) +{ + unsigned i; + printf("\nExiting on signal %d\n\n", signum); + for (i = 0; i < num_ports; i++){ + const uint8_t p_num = ports[i]; + printf("Port %u: RX - %u, TX - %u, Drop - %u\n", (unsigned)p_num, + pstats[p_num].rx, pstats[p_num].tx, pstats[p_num].drop); + } + exit(0); +} + +/* Parse the argument given in the command line of the application */ +static int +smp_parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + unsigned i, port_mask = 0; + char *prgname = argv[0]; + static struct option lgopts[] = { + {PARAM_NUM_PROCS, 1, 0, 0}, + {PARAM_PROC_ID, 1, 0, 0}, + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:", \ + lgopts, &option_index)) != EOF) { + + switch (opt) { + case 'p': + port_mask = strtoull(optarg, NULL, 16); + break; + /* long options */ + case 0: + if (strncmp(lgopts[option_index].name, PARAM_NUM_PROCS, 8) == 0) + num_procs = atoi(optarg); + else if (strncmp(lgopts[option_index].name, PARAM_PROC_ID, 7) == 0) + proc_id = atoi(optarg); + break; + + default: + smp_usage(prgname, "Cannot parse all command-line arguments\n"); + } + } + + if (optind >= 0) + argv[optind-1] = prgname; + + if (proc_id < 0) + smp_usage(prgname, "Invalid or missing proc-id parameter\n"); + if (rte_eal_process_type() == RTE_PROC_PRIMARY && num_procs == 0) + smp_usage(prgname, "Invalid or missing num-procs parameter\n"); + if (port_mask == 0) + smp_usage(prgname, "Invalid or missing port mask\n"); + + /* get the port numbers from the port mask */ + for(i = 0; i < rte_eth_dev_count(); i++) + if(port_mask & (1 << i)) + ports[num_ports++] = (uint8_t)i; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + + return (ret); +} + +/* Queries the link status of a port and prints it to screen */ +static void +report_link_status(uint8_t port) +{ + /* get link status */ + struct rte_eth_link link; + rte_eth_link_get(port, &link); + if (link.link_status) + printf("Port %u: Link Up - %u Gbps - %s\n", (unsigned)port, + (unsigned) link.link_speed / 1000, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + else + printf("Port %u: Link Down\n", (unsigned)port); +} + +/* + * Initialises a given port using global settings and with the rx buffers + * coming from the mbuf_pool passed as parameter + */ +static inline int +smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues) +{ + struct rte_eth_conf port_conf = { + .rxmode = { + .mq_mode = ETH_RSS, + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 1, /**< IP checksum offload enabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + }, + .rx_adv_conf = { + .rss_conf = { + .rss_key = NULL, + .rss_hf = ETH_RSS_IPV4, + }, + }, + .txmode = { + } + }; + const uint16_t rx_rings = num_queues, tx_rings = num_queues; + int retval; + uint16_t q; + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return 0; + + if (port >= rte_eth_dev_count()) + return -1; + + printf("# Initialising port %u... ", (unsigned)port); + fflush(stdout); + + retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); + if (retval < 0) + return retval; + + for (q = 0; q < rx_rings; q ++) { + retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE, + SOCKET0, &rx_conf_default, + mbuf_pool); + if (retval < 0) + return retval; + } + + for (q = 0; q < tx_rings; q ++) { + retval = rte_eth_tx_queue_setup(port, q, RX_RING_SIZE, + SOCKET0, &tx_conf_default); + if (retval < 0) + return retval; + } + + rte_eth_promiscuous_enable(port); + + retval = rte_eth_dev_start(port); + if (retval < 0) + return retval; + + return 0; +} + +/* Goes through each of the lcores and calculates what ports should + * be used by that core. Fills in the global lcore_ports[] array. + */ +static void +assign_ports_to_cores(void) +{ + + const unsigned lcores = rte_eal_get_configuration()->lcore_count; + const unsigned port_pairs = num_ports / 2; + const unsigned pairs_per_lcore = port_pairs / lcores; + unsigned extra_pairs = port_pairs % lcores; + unsigned ports_assigned = 0; + unsigned i; + + RTE_LCORE_FOREACH(i) { + lcore_ports[i].start_port = ports_assigned; + lcore_ports[i].num_ports = pairs_per_lcore * 2; + if (extra_pairs > 0) { + lcore_ports[i].num_ports += 2; + extra_pairs--; + } + ports_assigned += lcore_ports[i].num_ports; + } +} + +/* Main function used by the processing threads. + * Prints out some configuration details for the thread and then begins + * performing packet RX and TX. + */ +static int +lcore_main(void *arg __rte_unused) +{ + const unsigned id = rte_lcore_id(); + const unsigned start_port = lcore_ports[id].start_port; + const unsigned end_port = start_port + lcore_ports[id].num_ports; + const uint16_t q_id = (uint16_t)proc_id; + unsigned p, i; + char msgbuf[256]; + int msgbufpos = 0; + + if (start_port == end_port){ + printf("Lcore %u has nothing to do\n", id); + return 0; + } + + /* build up message in msgbuf before printing to decrease likelihood + * of multi-core message interleaving. + */ + msgbufpos += rte_snprintf(msgbuf, sizeof(msgbuf) - msgbufpos, + "Lcore %u using ports ", id); + for (p = start_port; p < end_port; p++){ + msgbufpos += rte_snprintf(msgbuf + msgbufpos, sizeof(msgbuf) - msgbufpos, + "%u ", (unsigned)ports[p]); + } + printf("%s\n", msgbuf); + printf("lcore %u using queue %u of each port\n", id, (unsigned)q_id); + + /* handle packet I/O from the ports, reading and writing to the + * queue number corresponding to our process number (not lcore id) + */ + + for (;;) { + struct rte_mbuf *buf[PKT_BURST]; + + for (p = start_port; p < end_port; p++) { + const uint8_t src = ports[p]; + const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */ + const uint16_t rx_c = rte_eth_rx_burst(src, q_id, buf, PKT_BURST); + if (rx_c == 0) + continue; + pstats[src].rx += rx_c; + + const uint16_t tx_c = rte_eth_tx_burst(dst, q_id, buf, rx_c); + pstats[dst].tx += tx_c; + if (tx_c != rx_c) { + pstats[dst].drop += (rx_c - tx_c); + for (i = tx_c; i < rx_c; i++) + rte_pktmbuf_free(buf[i]); + } + } + } +} + +/* Main function. + * Performs initialisation and then calls the lcore_main on each core + * to do the packet-processing work. + */ +int +main(int argc, char **argv) +{ + static const char *_SMP_MBUF_POOL = "SMP_MBUF_POOL"; + int ret; + unsigned i; + enum rte_proc_type_t proc_type; + struct rte_mempool *mp; + + /* set up signal handlers to print stats on exit */ + signal(SIGINT, print_stats); + signal(SIGTERM, print_stats); + + /* initialise the EAL for all */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot init EAL\n"); + argc -= ret; + argv += ret; + + /* probe to determine the NIC devices available */ + proc_type = rte_eal_process_type(); +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n"); +#endif + if (rte_eal_pci_probe() < 0) + rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); + if (rte_eth_dev_count() == 0) + rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); + + /* parse application arguments (those after the EAL ones) */ + smp_parse_args(argc, argv); + + mp = (proc_type == RTE_PROC_SECONDARY) ? + rte_mempool_lookup(_SMP_MBUF_POOL) : + rte_mempool_create(_SMP_MBUF_POOL, NB_MBUFS, MBUF_SIZE, + MBUF_CACHE_SIZE, sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + SOCKET0, 0); + if (mp == NULL) + rte_exit(EXIT_FAILURE, "Cannot get memory pool for buffers\n"); + + if (num_ports & 1) + rte_exit(EXIT_FAILURE, "Application must use an even number of ports\n"); + for(i = 0; i < num_ports; i++){ + if(proc_type == RTE_PROC_PRIMARY) + if (smp_port_init(ports[i], mp, (uint16_t)num_procs) < 0) + rte_exit(EXIT_FAILURE, "Error initialising ports\n"); + report_link_status(ports[i]); + } + + assign_ports_to_cores(); + + RTE_LOG(INFO, APP, "Finished Process Init.\n"); + + rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER); + + return 0; +} diff --git a/examples/timer/482254_Timer_Sample_App_Guide_Rev1.1.pdf b/examples/timer/482254_Timer_Sample_App_Guide_Rev1.1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a8bf8fdc6011d0212d624296424328a675d093c2 GIT binary patch literal 52612 zcmce-L$oOCk}bS!+uF;vZQHhO+qS)zZQHiJmu>SuxBjYn)j0RQTbqO8iJ-}ksNnegIamv8bG3frp|Gmyy`qK6>%gJ!FY8jeK61NFlww!wQCG)VWEHF# z_9RjIKm6jPRef)sa&5cU9MZ7Ol zQf0R`Zs;&Ut{bjT+_A{Co>Hje8n`>haW1i#Jor`uJSekuIVSY6qKR05zfLnf$vcYA zNnVSrMqMUOmdE?+u!=Xm14=y39g{rA9Jvveh(vC#bJfIx`h71KK{8v+yZ!TP$u7;Z zpCOZtNGm+;>Z{@~caIyezA{T)235G|d^z`Z`D7bmR)=I*!XM4=pB(jHJj6aZSm743~3ubgC z4l2*3Hg!fRGG-)gV2k4%vsn+i*N>PPbeB1F@*3*cAwmtUbeLvs)O>mu8FL_N!Hrap z;LHwO7p7<{8%^hO{JWm^U~@Uh2qYPS<6YSU2sH;lKsNHz z0Utf8(-#pga2s#e#le*BAtrGy#dux;kS+(dxS*8}_0MYRQ{+GFv;rjeXya~$l6z<%dtOYZ1g!D}bvcM|+$VDk|^sY4}Wd4R$U zh^q^8uLUr96ht#+M+11%J;1(15#?|{$?4+>=W8eIZN~GBdFeyZ2X$1HT)@55$b}&3 z^2?1*H4lOBf|~=0mmBm+7)?%&hd8we^kSLQOtDm8mJgWRUpI=};-)maj}hBdiC@nJSo6sBZ2K?|GGY|ESl z6*?NsX!tTQU}>%%-;+`A?tdUm*2Vk=R`OM4`4m#M*n zo^7HDWF5SifD?o`jLm51MmQ{-vS}642TgJvDHSM{!v@n50?CeND2|8e+-!&6 z`Rv0D>cnYqTDa`pOJ~yY0l@$cuB3JTmN|xhgE=Oq|6-2qFA`-5SpFGvUCK6gi}VQJ zpr7D=IDvQUH!;ES9SgQbmm<#A1babQZ80olGuG&uzC5u?=Ia1=M1)i)4{zRX?OxXn z(ejC~;)zMrWD*#O0bF9X(BcTB$=p7X573Ne3-+Vgi2#d)^`7GlJS2~hunz6E93xa- z?9Ee-Wi6||&r>`+`k`{lMh?R+<5YBe9ev5-5dU zZ`oObQ7e?&pyK0HUB=ik1W^n+Gu9_R3nGWWcSD_BXvNi2o#c?)jedF-yaXDVp1hMt zVh8lM_vV#CUc9(N#IOr_n}q_sVF{Do=k2r;^ikbiyNjolN9>mqC`Wg(mp|41itqht zPUcWMYqIhBLmR(n(Ng#non6kapM^4wh}H+?7F9LKaCwC}`PL6#cW-XeumScS&XccFzORs@lrEwMvZ`m7gI4%p39GT_Wrf&+OyTbBH+^oQze^@g{* z_KxfO`_a_uNv`X&x>e7zBj8EI9}plwZ$0pDb;9^>=!AjczlFj~&+v~!gY};&rD|!~ zc9R|kU{CFjL#H?~Ndl5Q#_0-68gAr9D?u3RgTW~RdO4W09UrFy)GIwqxKE<-L>|OB z847LOUZFpT6-qKqJYf<)wY)q5rXq?pqXBI^M)=YO zz)<3qTYfx>t?g&ppV!}fw)3hZIlULV=cTY*f<@tXjeeIeE|tS6Fo|;p!O=4-h>*0y zId*=0t9-THKi?0x`S z>{DGR+3~u|3B=X#d;AuN&xvbH<+g(Jpc?p(r4vpxE=F~v+o=PhW(=gfHwGMHer*1% z{#muR*2{bTVs}T@_wqo*%Np_Xs?BQQU1n&o7$-7_R?ib~$#mw!sTl5@$^A-}y{l>b z$aFrBGt406!wK`*T&?zN>wMAq(RGt~Q2Wc}d5v`*f9rClf5YW0{}mY-82=F&|7n&$ zJ;eYQOn?ybr*aAwK{Z$gCML2Pk+U&`!5N!@Gi20_e(@{m#-u3}YD3leEpOJoN{e!I z*?%F@*i{yWEf!FXnh{LmosNc4{!t|ZrKaU|=7xRRuAVLQ$bHz^l$Ll~ zc=-!a3AW!14-gE1bvHS{-vVR)-vML#*TCxGf(Q^s?$p}Z^7RojG0>=Lgx93O4cDTX zJ%i$Qf45)xz4>r#4+4h@M7z|QeI8dg9h?hTh%~U1m0+KQV`fb$Bs$BKp<)dF(t{^{ zL8|(+l8j8v{E)C|+q^+<1vPpTV!uFxt|98`7C#z^bK?8!G_%np2mD)PEdMKHf64Rz z;xc~%82;`>BM^h9hp*q>z%fh%OtWC~?veu?dnTpyUw= zGA%J14%T<2Qy{C!b)@EzZ11Kw>!`hY990(hV78yb;ybcFz_u{S@4tog?^MX_Z2zS! zW)6;jxaprtD_U9OuTYAR^ZSZ+;EfnKomH5-+5lD>o}#bGC6NsJFUBlb@FuxkeT2647X zp3#jyS$;yo!GlBZ@_7?%@!_SQ0=p#MkfoPmjbXmM<9y}p%|(L^9e-)c>!wN?ZWCbJ zZW4t{Z}KEGDSW1(Yyx6W*U+s%BMrUxXO3(`QHZ4a3>~tg1V|UqDk1}@KG96dbDxFF zvk11TO>E8#io2)Z4V+7qa;Sy?uthZ2Gt;TMm} zSOaCA`kR7Eswr{*S8Q$umn!3*YYvy1Dp@#38TBcKIw1RG8hZWy;;+j;h%wH)^u7V8 z?#Ez=;ckH&X5-1#)#G?d7ii3?4}Ej+zpgHffGA|tvN(S0vzjth2U2YWnQS_+Jbw9= zZZu+3q@1s9V?KLN)o49~${Ue^ntHl`T@_;)6Q!iI+)(jdQt@@stY;8Eu}dtIV%Cbq z5K|W2zS>blzXT=C#MPr1UHLcN(5}~2s$xJtE`$9jklk77ySU96rgJS~D{L zBdD?c)6+R|QW97ILB#LqTR`rqk`w_b(}h2DN$it05{aiVIpd=Ug18 zO6z7=G#tpjfA}0C#yC@%P;3KYA4MSJ`<79s%LgZ|iS}_4e783DKv;~TEY@hTZRuvSG+x)8nH%|R?O@z;m;q908Lw1_QO3pQqq6C%aED@=0mbRo!AJ4f zU0TDYM5Eh*dhv3fY12-Di6(PC;pfR}Fg3yC=zhI3HO@2`1C@G#fLL{6QzRU%TrtEh zsq{3d^j$4vZ?F%SfhGyDrP~>fYKB+iG(ZfFWfgv0n5(>){FnF3_9m(hLfQY+`zJ78NO#Fx+ zBt*R>4Am<&9GN?{Ysl4f78Gq5G%Dx{Ra3E|Oe2bqMXZ*J(X{U>S46U!idnUV(o*e* zmq?>X?usdD@T7pXC_OCTR3)UKwQ&!aYCsPr3^Vs?+cdQ_2b)icC}>P6^!?|l2LXzY zfPk|@Artw}C4sB(V8P709bw#s*l~ycgp&qvjKQF$_EQ2KTQm1%GwJBh#8Zd&MIr$@ zLB;|)l|zE1Q>lyeX(tc~U{#rtE7Pxv0#C&Z@Eg!TIYItjIEo^JTJ-a6v8jKIp1Pl37SvHkfUbm|9OfV%7=no&yATDK2?+#vDjXavy?P#?kqVZcD}*B* zw~`v}n14$f7yc6o{2`U0Xx6C^@E^u>nN0O(*Prg|)1n;f?=Yh2)1n&LAE<=;w{ybM zt3g52tI?YHw-X{G>D&5)?swTNULaXy?dbE@9)kQbQbofcCk#l|7tN?5s5B1iKjTQ1 zmmP%ym6DbhDgf006|rUkqo`k22YgU90-e%|A@n5P~0ytm#O+l)qq6lBmS$zq{gR+N&OK=H;ik+Vb z_y)idOfxSe4@V9kpI8YK&%*EG&nV)29@DO_q6A+MR(tu4ZCsX^sjyqo=}xBDhS6aa z2WzpX+DkygN(=@<$pQ}{IL}CcIu>+UfM4EH+qq&QTz#tN@F2gONOxt0LF5I<6Oe-K zO;-RNeriltNM>&Z>V>Jp-ygdHeFj8Xng)4jE3Kp*!{tR+Dn?Hhy9Ip?IYi}?(p^NG z|3{z@86(>kIlZ23m<(Z=6bV_qzTPC7oa&`M8FJ_v8S~dfOH4KqqvJVBLSx)Hih>k~ z3m9VO2F$*|Ia7GB`4mAOugDUJ#sXtdxOoMSfAW$kLL&7v0r5HxqSg3uI7qR%lQ2*Y ztJo0nCJv%k_Z8w9*gj5l?g-NwM8k1)tbSc$wG6?pd**;D<1|QT?rj$!jq$(CK}`4n zQ9XT3?+aaeLs2+(d__^%A(lZ*u{lIhAT*OkERd__rn24Jp$$C3$o;c<D13WlMFa_tv5*Pio@kT`ltPN(&_W89q`!)d8-s6CeF`> z-SOa80gl)FC{)Ys)8T$6S>D@WANoY?$U($u*{aX!_o(G7B(GN~Ka(sYnXPZ4E*s>hI|=Gue8$o_(>nt_JEHye{$?B!_9f!#uNHAq7mm2|9Z)S>RFK zTv8@orqQ8X-TkdxPtGUiC83iMFHV-_GG7YE@+PP3bkjglkSE;oHr@Bs~){Oj=!B|6i&KVpcl_-D9v&zvt4US!Q!Y> zjaPlR0dC_W#E_jLjCRL?PYwoN`IC@KOYGo-$dZLlG%z;jk4YG8k$VXNXYn~EUbyMs zUeUNNq{w<2zc0wHE~|RcyrZ(a1}+~_P0i>o$`_~Y-r4yUxD|fr`T*44AOE%h;`n#k zg-jg(r}=wzwbEqWN?+5v2s-a-f%(-dUm6nMuV0oQiF$ZqrKobd5N zf%RG)N;H#6Rl`IZ`+WD%m*D`E$+;cn(7B9kS<1>cHbXUK{~RlQbqCo*Vm zHG!6oZzA+DwM6VD%Otu}wS)t{3F!iz5TIEKD$wXBU8($O`;)b(AXQhLbDYY zAD!eYNH3c!%BJ6yh1E5p%bxqna6XU~L%!QoPIPnh*x3-;jMNn^ACb>5_Y#PpO3rI}NTD-O3v{pq_HUNY=G^dcVcup3gJ8_RIAY zPBv>fpRyIZ8lZ;XCFyU~KJdWvg{B9n)oMBWyIhCk-)V#XpY9F=J=^vMJCr{YS4`l3Lbfa+Yd7PLf*OX?#jbPLi5pG%19a))}nf~XHFLeCWJy9h+HaDm zf@*bY=Gd$^d)W4mwUmbG{a4}k?Nywu%Zh_Zj&RNrDnsvbgm-_KT(`|1QmrsM2h9>% z$w?wA9pyunl(s8l5hSF<+o&iV@Sh=nJz)`@Hb&Y)%G&Ki72d8~ogs~6(vZ&ZyHOot zm*hhLjpO!C@jF^z8wcgll&ox>B8g+1V85{g)TH$`C&ZT8P{$@MEeWrYwv zDUMt+Ncck$s9TTV7=rLQx&`J6u8hjxvI)>N>=RrpXXno1x}fj%)!`FsBB(pYK(3Ce zA|5(U>lBhO z4AT;Ruy;MV#qr9S?bKlllmIbF3nqIEEcXjb2@%R;FLZs5sX&-5L7mhwhUU>-!^Ks7 zM0+FhGKzxskTZg`Ih-yMNGkl`5E7(}vz6&thGp=Qlh6j^D>)F8N0_%{dt4(5@!2bD zM~Sja<^B)BS&-4UbfeD#I&O|?rYgz&VBjC(2GAsUY2SB1SXBI{^U(KLgFpmAV|WmY zMI(s33PQLXzL9l-d-=nRN*uX+RQqUp34q>Y73cS_pa@49dr<+Y0|0rr9uLB(#p7 z5e^lqtusgsDSdU;fC)gh=V{8k&9DN>S5QL{W}8Ujd*7(p?b47mHJ=Y4>H0C)Yfw`w?#9-UitK<>1cW3CI8mOK!sT!>Y2gUNi>GhxDag z<**_mz%7d!Mp8DT&LzsbtF6<*p*V!x9zKFF5_`n+<(K$`_+(r!mZqZ6qTC~j%7I$| z<u}AnMkNnGzShQ|MCHlXY$ldEpjA9uAD&tN+#oOEDK05edaFEzry6VP%#$A zGdeuiRxXcR(4J2y`$z0bxBPBbEZU_|CF(nmb2iz zVX0&7Z=gouu?%`Hk}c8|XTdXeP+{wXcBH29K|%{RE0KtIC3*jNTTBf%tNHS(gZf6v zSzo~-XH%?B5Vmo!Y6dpS*%C58v$nTBW;_`{28-y&@xjgc!Hh-tt7HBXX4#N1mI8i` z!SZw|CcP$i79qA^7V+Wd^y-o?sNWKus0h3<-r|>f=nu;~lcAO_B#1|u{7qY75CO~t zC`>H)4|eSLGoVUNgXYs(c=?_x{aOGPXig{2&B(Pchi=FiYyXtajNlS!e{G}57!j#@ z7`1fJbY+j;nd8Z~QN5EOHf12Eti;ag{S<-RD?kt;3c7zbxNQ?acodQOmi;G|uCl%@ zaJ)lZ@@h@zIyHLIoo&4?q;{%#PBaS^qbZmj*z2AHm(|wlmmw#zt%eDzM--<7k5`!% zsGHj&b!t4>E!j{8cn-V5?RQ<^EM(#0IiWZNKIN2hNRQwA+^jFCh{Y83T7wSero#ed zVzdaTbND{BefKR;Cd5i5$C%y~n$hxAzpObN&I%T#sgUx(ywtQ$6xx>Fy^)tFr=lR5b0HUyd*Ki?B|IU;4t0QbGMX;uCh; zdn++DU_x$?-EKfWn_gXKO6d~zR29ukw)DikE_>Jb*Wa8Fbe5OhFkAvZiTa*k_bt2F zA!QUmn|~O5u%m10l1HWi2 zzvF5#ZEoPl3OYw;kez<_%)S7)`1pGh>K3$=eJZ+{Og646DQ4wf4}c7_73%=(s{H)H z294bzKh5J3P^T131f=X}-A!TEfVI6M5C?=`UEPZK&Exe&u25PDP&>>}AO<8J$d%xT z_lPgpA}g%1%|(Nz%is=x+LkjMD?&R>NJL8`q* zPz=hKPo%I%^_ACN3NVwN+)t{7K%~BI^g$TRcuL_H*AznBcILHa0E1?j2|`o&lXUe< zQojp7A!XxnpoWjPZXlni%6JE^)9@}uk`i2f?Tche2R3v^NNU=G*$;w>MD025-E~iN z%CwBEqczPwX>$?~w) zx&!8S#@itsxX_W1?z2#Tt~Nw1Aak=K1pin0qXVJoSAxxnKBrS&h(++o=m5ct4>VHc zviy1*%FSQGx%2ch=JnB%F!nSZRT$l2PiC^APU z10XdDyqC>S^S@@jsmt7LaiV*;@*IJL4hTdeXCVCQR=2f8SmXOvR@DoYxi$;81Oh>K zW5G(t0ZfOQ3qswX>AEXJbuyk}@r3SngP^;i!mLf`qIWfxkr86gTP6(!` z!&DlAA}+se&8f4k+)+W}zRS+7QL(cO?aaTL4t3QfAX3TKL!u4=4;AT;UB(sRMhz`7 zw1t}L7#Lmg7|q(afB{*2>PG>YI>pHcV|~pToQ0|A?K=)S86^OTiTnf8#nvhkDx-~K zAL@C9TImk+8maTa>Gu$pU*tiBHu1{DykW~9spcZf4xB%LgAkiSvd50Z2Uk@1j6=Qk zw_vI2Eo*9PpX}uHcq5no$VfXA;K+1^xs_4@3t1Vp{VBM?`pPbiCOo}BZIBqLO(=0V zLMuWEv&Z>Z7)Duf`NQr)~^$uvjj!ji+u=hi*?j^U}?S9Ke7?$hVfe?_f*nyS7;!k3&guQ zG1?3Z1-$5<=T<^nar)}?%9uYcU}R+vGzY6Rbf|*a-sBkM1fP8~D!Y(N+Dc6`RaH_U~^B=6BU-q?{G zZsUEol?Mnuw?4z-Xr5q12;RC}RoUpwVWoffdAd`J&DK(Dv@;C?!^fp8UAA&?BTJ=X zEmX}7D>~hRrMft?A>fD$IE}>d(*9)&1stA7a&phl&Z@tHs<&zP`&@%@^R4fTE7tB1|6ucv&n@%$C?oW>O>4XQjX=s|>XTu`bB zF}6n%TI88#)*jRwXSk3I^#D|z)rBQ~$Fwi(>H@n#Nv>S>S0u}##r(-a_z7epx@N;< zw~~)@w5UOMNBz#dmQQQA7=Oj7+tUu$xo;eF$FNINqSc&=Mu}=^|%LrpE(wtpaD%5tB>aV->Pz2>)T(K zc^jPAk7kN6=XHBMb|%%Sb8ebcdCbscF@1T>I7{cvpvN2guMIbow$nsv*G2CkKz?h8 z`Ch4jp}|)zwDaFP$;_t_RdRO5$0MkO^iP zwtlZ8We*<6+ZROVi2CI{Bj-Ki^IecHTOZF<*&Sv{A1~Ni$&=ZdqJ76~L!nsH`dmzZ zR%%@DS^c1xUm)8$lL+J?hfIFU(|lI?aJ#Vqi7Q6fZ_XI$e|m^*7`Pk*dt?i}i5q@9 zza+?V9sU-+_~yH%CLhSEn76$<+t};?>@|8;+PW_3lYS*p!8RxfW^ zwQjGyM8N5zIc24d7wKr|NPV{qYHf$f8d#>hlxWB)gi(!^2W-k>Q5eDug{r*7{mDow zA(P#JV^iYW!odcjCk*s;b;)3QTzE1;lb$+uI!YIwU4BqU<;Ac~1P56wHTx{7rvI7d9 zF8xIT{4RM?k(AL#vsQNKn+_SG**rgY|DexZKVidWg{FyS;}$i(!sg^!oq#I{CUMKg z+a-a5wBm@%*m^4M;Y59Veaz(S55wn`fy}IUG?+YxzkFrvh1h_9_;OV3SxNDO@-&v*@Ct<%9f&+gu4Qg+JBMfU+qbZ0M}M}k zIh*J)cmj=jNk-GFIg%30Qm`ZOnH+$P-r=%)I|vN8S2)F@k^l!!%ZfsW?pL4D#P69U zv!SisNEW8%#w*kA4cgpCTC;lXvih5y?M>)XKx?fQtAbIu&x&7iS}c^>kwVQ@Xym7I zo6D}Z2e<|ey3=W6}??4;>f0LOfRox-K-f zEOx5&%A2}-mVlY^?1y(BC*7Z{Yg_MqOAnk#;_>Wab&J6fOTSJ1`LYqeO=}aL0Dhq! zM!hGQpl7k5^m|tyDLq&sj=($UV_1UFxt3VWIvUqCulRPd@yq!&-XMQ9JaF&_aA2>Y zjO6iq8d(JcU|%-DD`8y?>7s#|n$WogY8zJ4yOl497tZ_vYC{z@JhP6et5WbeIo`{D z$Z<#??=}}HcIbG0iD#PCP;X@eGGc>;RzXVXh~J*|-0G%rb*y|ZlxpN=e!L5VqkQ>l zfCYD@gO;(@$~w)UlvEP=qoUAUK_0m5u+OXRaxBK1lGorQ`c~MG(HOy)tRNjCk(m17(X~M zJ`)+7(~TyO4B>3LdZs||HBG$=mS@yll#^#a^F{ukLa}w3_nGW+FeKZsM#82sg>D&_REI47r5fIwSe+tzN7Ex*(`uKQfnr`oEL9GNP3np9B)oIFYI^5} zu~}vD=xa$mWPhHwz(a5u@l$& z&NCfs#ZZE! zDJ8!6+}Yg9OMa&VKGTtYUJK*=;ad8_G%aYrcXbtlv@=$#;R-f6=nWMx^BlvnPoqlq z+hI>H)+Q{$imH0Q-Kem2DJs>sdY_60UooDkHc^wKK z`_1Sk_N$mkOV#2~ETEV39ao7drN&dV@k0FUC<++Y0&l?le7dh9X~)y3*_1jZJ=%#F zwbq4_trhWZX$1>reQta?@aU=)J&TH7u`AjSwp(9nMbq}G^xkuuz7f22qc<~(7jsUO zWFik5;d;$=Ce~Nm;oXv#OQ)9fnLfmcQ}ijd>FS2j;d&*PcoR^s&d{H^yn3%dABi&&iYf`<+4@dTO{`^>U^L|^cLrZ#e?gZg8I$0AewTLcs1-{ zZuVoffEW{`y^lvKReN$PU6xYsyJ+k4<42;Hb37sweriYWQq!XK4cX-RY;ki_YYZO( zJYl(-QO}yLm-?45a_6mQ3dJLtp99OLrlUt{RNXQekj{G)zlsCqD_5uLc)~25BmA?a zfts(XD!v?|019}x#PpBX$>YF6+PQ^Ev8Nk}PoyrErc;NG6)@Xw_c%*zx_G&7d{kOJ zYqN}(R}8D0VQmSMTZ5Ys71fA0V+ssK0l#l*(*miOWE*7z(OQ+OWjxt&Z-{^`U})kr zTwg5wY_;+^~|_N|fR zCj+5zd2-KYoTFe)9C_7Q!Y}mwF`Ue*uM?7-4GUP|_a`_vnKjtHGsqFV9xtGgB={6SPg%ELQW}W5Cu_8=G_=E%Wz`9Q;JE4T>l@f#`c%iPRbcR5+0JjOkSOfPCD3)8I*GyFrok^qWy z*NNbKdN5zM+0T;3h*)1D%~vT=Du(p{z=S5L$=?}M>hFGEys%UjNpIXveB5V`mzttp z;bYH-C@}0v($&DNI)eR*S8@xpHP{;}UfBs{0-b_ph|XTW zn5ts(LcTz>eLECDetPfU;+_{3t7(hSgK$(2)bbQ#t@M+buO*ZiN23B<4L{!?1u4wj zJx!^DyIWI=vR)?~?ksCdY`dCwtvZA#ycD@?1@&Nt73FSrd^EV#AT!Z59Nr3x?3&lU z5(2)T!-RR@7LoJ~M|@TVz3-?i(Z}VpJKq4ef7(FKZ*D1xMuY~!9_q|D)iL4Ue=+&$ zgj&ZM@WvSCpDn#7VZSw`;=7(QU^1?BF%g7$I-X%|pGqWl5{lHNeDAU_1&f;>s9pJ( z7-nLqhRg2w<=)0@eFJ>E1oaWYG zS0fi+Vi7r{ItTt6KC;TFa*ihPvgu3VE9T2|tXrJ7dGP?CI;!t>9n*oGDI43&wb^}} z`|Umit#&r%LDb`yU~C1O;DH|uC7FXJ+skxZ3>r4W+|X%yJ~qYoCw9G^Ct{;QH>$v* zCTmw<2-)n6K-CP8)rKfla!kKSL7?1t_=BaKB!=R?Sb<2KpXRG>rT=nSkR)%^axA*5 zOleL7X}IhGA!ky`ojztadd{#NT!%%93sPlCqv(2kaO?!?#$hu(nijuJ;5{b|y;yMvbV`M8>*)6nwOL1VCHxx}Z@Yl$U9A;8i#~x?rUtnaDEk%wPoCl=R;Zk@kR)ln=s}T zT33x%IY5xH;P<(M1I4dELt;Szs|3@iUf1;@QWBPpoH+ozqf8re4UIKHKtTn|3>a=O1I1Vef-JCg*5-h^XrJ4Z zcFCaRsK98@+8oy?OUf7`sFvU6yqyKELgX;^2Luf|RqrUk@Pt=ml_Q^^r6?_^4Jef- zN}@xE#%r@g8adOLiy$n9Z9jhw^TUp4IYEkguZqr5BP?<`1W|(n1gty)Ea}y@#~cI| zAsjfAr?Oz>=J}Iu>@5<`uC@=P#tLfiE&W$w19(R`o4`o89w|Ug5eX9Yi_t61%e{m3<;b^oa{7$;P-{yDtQ#q&@CGMFj+r6SvvwKd~*Z= zHb=FmoTD^#NI5(;HtSJf00047;m8}*B3g_*u4*)%>W)GzCr@=Sdq2a}GxA92c5HYt zl&Qk>1nSH-Sdj3|`1}!fUEVmb!tKEdP^h3>IA)wOy_9ahX~VB0fHqD^UNh<&2b^U% zmHcc-dKXp*MYTVum@NbPk_sZ=hVlmk&zBQ{>`-@+?wt8-mokw+QF!1j5SN}6%9PK} zVL-uD*^3k#NY? z(K!m3cBLs8q;1T6z$%4KA-d?To?7)ppsr&N zX~X#PyWWPhFzQV@buOn0F$H`~K)`Rau6L_i0dTP6O)ZC(#~wwFDZz(}#iY!psD9^Q$2ygNNaT9)Y&E}edQdJjWH}rDCjSuHPE~?#K*LLl6 z6KpEhk4ze^A=lxEEZr4A3UowSDs)drbLx6K6sd|{gH9585ZdQ3U=s6+1i~-W@i`@Dbd*uS0|wY zk?-66Nby^`^FH5|fBc9oxqtc|28alDY6hXeMl7u_n#g}1+d{{O&LVUBuK>`8Zb~1W z!r{blSM4}v$d;jF7$1gImb?3^_gE&pIJR=Pm=|_=?}xaoLu|inVWF>P1avHSa;VOS+BQ z!!BHj2}{u8kD0{;yAkdXJKLc9PPdO9Z%wrZh16zm#%+hz>@+uHnF&ukpv4q>cZxZJ zw&QDmJVJ1fU)Y)+OK7JYFMhpNa@B5~?K+WVy#bo_B->*nU}O?`JvP?g6m5{LGa8Ef4~B!v0| zo+%^_cMHSyfcb}_g`g}FZ6R3zjr7sTouu-ztyh$%#rb)kf9r)$``P)GdS%`X1|6i8 zI`GCEsC{baz1oh_)lQF&)JopB-A!J6;>YL>LR37_5A|+lmot5T9eZ;$IqW`rFi{dxP$oN@ z4A6`LlqM~+OHMchd4$mX_|!0z@GOsbCz6LKq|r37+AO_GUfL{1Jry)vI0mxI4q|AyR z(cQ;zZ1VsJTzg(=PnbA>VLUz_J)Y%iRI^5z(e0QwpP_5ywh6v5$|?S4{+{(wM60ku zV~c`^u5O{E9(NkONm~r$`x3-OE1!@`DpK{3xse>SyWL+`>sE8Ay_MbEsu()nIikGN zP{3GT1y}5khr&-*rTAWqzd_dXOtH#C(I`LX>0P5s-5^O|sRgr|v$%dBg}( z?JsZRc%=Uu_FSWK3}W&;4qm^ZaSdkDAg3`xbZCq<=K+RPutwBe8g3NR8@jisSY?D5 zv?U1SVC_Vsz#>8P6`^UWQNkdXH2oTFZMV-hMy{ZNrd~3GSpxdf!&C+q!Ix_ke3)}u z`8sl_51hE3}{d26%SieX&)I&Az1PI(|ZsoRtUw3%$Iv} z_QHbihJaTlk};33Pc-A;i@AlwtZyb+EX2W#3r*d4jFufUO58|>VU`hD(INF006s#- zU^y+{Dn)Zwz#-J|LLBlKjrk-!i5`KrJnLQZ3%XJmH7axR(UG)~shreoDXl}IF!yhm zz=J!T00&J@V?PGC3j%r5-|miMMgeo2z^47OeX(whV@Z6-WuPB*sQxO6A23Im(&r(- zsgVwu67~?Dd(=hc=zvc^w#%2bO$2I(*+`Qxd{}xZi7PYl8en};$ZaT&SM0@mN=<*l z4KoEeW-jT6NyUGF#h>Cy-nLssh&25O(2=V$~5|j8gEYaP4d$_~T6>Yvq>cYcEI(X#~*+;6_P{ z-nN+y6k(Q@NXr3Hn32n-!TTbF>hnkqBRpDOX8XtYa*(<0`-8Pr;fy}gInm6@XF=e1 z;aH+(cFXpLLpk_Mn>%2lO|pk4Ku(?3SzE`*!rlac7< z3ns#T0^5{`sh=3En6uu*m+>cw;dVyDft( z^=KksqS)O&zeiX-OFm}^|7kYikT6>!XL4%7BQs^3I7KZ2il2l}0Zy>2aLi%!hCN#5 znh#J9X=G^$pol)g!oeC%m@F>>b|?Q^)eJ{6gFVna-Pl;)lsz*TDC!xk=N8Aau$)+~ z+}m)9Beb0X(4BEYy(xP9oqJkHy0nZ5EZkKjsatxEZeQ!RoAfb1R;ul5Yhq8<14ne zD4!kC7zERqp9IkX?8~=eQ9fU;;T$SUQutkyg`uPJSie5J$8^? z<*EqASzrKMb_^lLHHuWV!qq>@lex~TQHaD)lTztnkpJ2!1EE!SGWQlzF$F3LKE ztKIep!sC9{*s?txwra3iQa*%oWwfxkGQwgrUwwU7ZQSn2Uf7z

>IZ_%-3vb%1>BiRhz ztv>`}p`=x!xKfd%HNqPg%9C_Iq~bxe$B-ylWA>f2MdRx7CKX2*I%1(;-~!{>%xgUO zlnC_s*p(}dNRA4#kq|YBO4G4seq1%)wyc{9OKA^L$Qlxbyj(;$M)IoA=az_^@7* zdcDY@ZjjH}+}srUxaJD2v`u<8^msPGAxU2iT8fa#;xqt)%fS$n8j9WF=0aW_<_G(;GIB6-D8!dsF0Oyte>X;wW&5}bm%C_$B7Of6(!r?N zw8N;ag$DD(>Bd62g8$f>X={aJrU}6fy*3eHlAQ(b`NZ$Qv?E;%UGJ*$q!xDajo)Ux zH2tX1U`Y;`e-7Pa!8M9vW2}ApbSc_8R~Wpx8dB!?_+%S4JK_3e6wqwbVl&M%0d7mS zwhDZ0p1)`2=eI3+ll^fpc7fcoZn3?J5IgK9Ll>sJM)0LAc`NN{nU`7bCOZ>?&!pFg zsfI4<}gPX=lXkVx5o0q z88YglZ&+zMeO#BJ!_JL)~{|2fUBitzBT^aLK0{L_+sX7&(#$k6C(Dvy7JHPqI zTt%)uHe%AQ1TX0U%qGw+9gE4w zufx*#XWhS7r;)J;!S`5B(Iw2zw-QE9eTe@#*c>22X!sYqFaP&Ju>anDVd7xr_}{xP z=bCHrcrB=2IW@g|k#vFfJ^MBV&iF$v`S3cDNwmo5LrH1Z^^|QDA1%BzCNzL#P-mtx?;^t!Cb-R{Lh6<&LtahsvB1|px;l%*&=m<2(dqdV#K+vdz5y?kyDov8<{3B(BYVZ+?Qcwm{9CbKf)k%4 z&GWA1;cFTZj-$AS_Zw1c+Ke`28WR>F7-!r^>4Dm8eVftguChJDpB6er0Tm9F;Swzbi2sVa84$8ETEq zNDv0(=9()z#jshOGM7;pr%)kch}Vw0f_`UQNqcFbWxob^S{eeRVmZ;U5a zL)^!!)H0&|v&I($Dk@P7^Jq-q>ptx-^MfRhXo{p1`XP#a)?NpN9+un5`KtToi!+#4 z;Sz%2^*^AP%l<^SL=_4z$$@i%Wdlh$4kwX^fZ~NgKzd7(;Uw(wCBIV74c4xQ!Ypr1 zisVK;G2B<2=^Muu<+2jm@c346LoGENkr@i4JItak0!UNO2$=_GgKUW~(Mc|X=ufLc z!M!ujlE_8y?|aITh;WMCiLuO_^3*A^G^Wm8ljSbh?rtdKu6^(2aA6 zy~Eo_U?2~bqyT>wK;5cJd506;EHsV>Z70Qh0?)PYIjjjYuYf&{C(>lV_hlil+e!t~(5Y zxsi>bZwN^m$Tjo*kh(G^C|kUIEO8`m2xVh6TAxJT^h-dkkV}TI&*d6!wlLJOBrr6( z2=6wJyRp%oJkCXu6VVGXRw!JiyHWqXS0jNMX-X&>fyNr9{0__RUZjF!h?HO2D5e3W zpUc5l8al*x!C|YoC(}NyA4>FV|3IGVthL(@uvBAD3UmAF0O^i;8i>229m{3JP-GJE z(LiH5$htymFaXkEDXO9bU;cP6o21n?bR&+gK_=avl$AnIX#XGQRs($k!MkJJ_+Eu) z5f|9nC6w`&unMFS3=Irf7oo@FtW2chq)Z+x1RD#M9Z-2CEO^b< z3Rf)om5uVt1ws5S%y6I@s$U7JVc66J{>U;QgJlzK+Jv_@XXHJ1JEW`#XVi^!ytYtC zLkKcqBpMxj`;_`AvySIiof@~XUp-}GS-oFE`)YQAffb(2NQJrJ`uuV&udoJ;_*Y$6 zdHDN3r!p97OTX(!^iF>(j#r!^3}^d=ZEjS)my~R_SR`8!L#rwSJ--4?eTON#7$h(g z+9vUw-U*%I?$^B_N*BUf*GW9KEVv++rw^tx63so{2b1 z2?yx4!DB~cgGBJfqvP28&RlI{tJp{6$6Z*O{QzyEGb#1Hq4|_2u;*Q#{rxqKLwd~w zcRvn*8!k%QJ9;~Ka1KO^QEQM5d8yS29UHnZZ0gcwV1vH7xFx0}IGs@SG#k8X=~!ym z7(<#}GlAA>%uw6K8J?cxDqC}A@9SRBz4KiV(zai64w12ZuCh@Zdp@sSuP;Sphzhrm zr9g9tbqQTNp*RWCDW-*63dp}0`fb}L}Od24|{JJT*s2EiwZ3!%VG;Hv>2C| znVFekiJ2vf!4@+!Gcz+YGcz+i?L9N+%zZI&;{LmTPDe+q)!kW?l@*a$S@~tynzbA( z_72*Zv6s&hTH=eJ^b+2ouHUiJjB$~)vLM5?&#gWQo)ti4Bmo&iH(d%ab*9I560CYh zR7@7tkjWstEf!FOy_#QV_zfPx=#Ur$48L#@uh#@yE7yr!>->(Y*UTZ*JfguBO&aR5 zW3YL$b4Zt;33+_30?&ZHx$QN6&x7ksCYnA*BG6wRKL{R#e_%+kmqR9I=UgKxQ`>2) z5qdV75v^})DDr1eOH<@|PHlGqHG=W_>T#Td;;nAT1^2+$N?QnsN<3sf_iAn9{w9-S zYHBBV?>#sMSw!@j%alp(W71PZ)}vY|*v!>)b}0Q7!g}p>n}P39wQTlZAzG!jzdM9n zp?J>?G#GxoE)EoofAGld>>T{SCP!(G4j~$jQ14P-27%Q+=R`5fO!omT2CcPCAB&Br z!TLCRHE!E{N2s>7LW)%fY#%u(+ugbeH!WnU>my4|?VazGrCoeHdK@q^6cI6!Li*#f zKL&n@Q|CJ*@6Tv34pv=wZ_2UUuixaWI$mBuJQ!J|8EIKCiApBk08fQHKET84C@1H- zh77d0gecFHU%N)`KSiu>1V;>P@(d1^Jah7F%T43TzT_)&c(f%{9fwrHKx9UF8f6Y8 z!j4^D@`T|U-sI=Ji9PnwiF~s$d}FI_3y|ql5_l`3Y%c%80sDAw7Nj8LUb6+}JR{)F z$84p~c_H+*FXqYNdq?LJBJ0v9gfZ32G4#RBJihOvqsfZ5_Rc0_OUEDi*cb0Wh&K|F z4^Gh_H~*NYR&!W^xma<|r*Zo+!_#XD_Pct$e|KgWT7T5qFJMesUUGd~OM|}al z?)(Aw&9e*b3}K;3Wt(1M-|J7n*({K3w+me~!uzRcC zCc^FX>!ZDj?I9_)rr1JCOS$1Cd)1uycZpI@m(skJqN{IcJedr8aI+kRtH0?!Aj2ZM zi8c}*`rVBHfs(p5L<~lt1Ga;hh*6W{Pi~G?t$p!^MWLBB-PY`}-y>WpSGF#7SI%0) z*Dr)9#3U)9^|fA1Qr_HL3l)rA;+CIyhGW4CCF`Ky*z<~i^|g9S#CKf093)^Pw6vJt z-JIwh;PM>b*?3gou+@q5@QUlvkWxZ+)&BJUKxz?JW!ck0YjC7KmL{Snwz$slsLCI zZ`xS(r}8@@Q}B79(64VG#Ud~Gf8N60`ni(SAb$b1`dbhq`hI50x^lQOl&F^k%{B16 zX3zCB4u|%}{j3NjW)|Q15K33f!(N_yJAM-{k$xf(+IoC*)4t5uDD;Xe&SnrZS1Lc! zGI~VN_IO!KEHPGeDX`R2(hplj`#Dro&14RAlkbXzy@*q$crU2(PTy53hfO0hQZ6!{ zpuI1ta|&T@Qnz?5tSkB&qc=h{GP}H;KffOG@J9^y+F1?-{v=zG4D{&QG}PX=y2+C#@uwY(gbLrluaHZ6IQoyfwGsk$ZKuEq3{`n9#edOD2H!L{} z{o-_$LGs+ruzuE<=m+fiS_KuSQJNjrFJ*lNOV~Z#oV#PSlEF_MJP3+zNe#NkPW1qo zjY3~7g~90;vZ(TdDc@%YzCA!^dibY7^w;f5Kikur(%D+kA0E)^@ik&z>7!%y_7i<%WfN&F3`zUDH7Acsk`Ru~>F(9FyYDA0jkmwL-%CZbm zTvX&Es{J%AEIvu zd7OGIj;?hI9AyBB&*;osTnMCq%*aL%iM@dzwl#jC@F#eaAhy>t184=>f)`Elkt%IR zf<;C^#L;(J3n?V0X+dIuabgk4b^S>0+%ci6?~ZJws1gP1?2IH0ng5;+OBDyUnCsuA zy$Kq&fJNCEgG=gq&f_qOQ++3Y=$Ni-L&dK|vYh?{GPG3miz;MqegD1^rbTC9DUyOu zu?^KSvQPRLszzFYuI99)fSMnjFYbt>@TJSRzZj#mR8l%1H#Lh@i|3IFzRN8>l<;tF zkpz^`8&3MX7Zkxp4Xyu{)}DGoHU%2L^}Bgoa+`EVFs!YQq^a#5#>k@e57hOHgaUiQ zOiyxPuB5g)kDsdiw~m>OXwa4w1`FtljkCcY^nPd7W+i6K)7KkAYzA^jmAJD&_e z_|FZmI8{322xm-GRx@}}1?Vm(0#fXyU-m!8ME_9u$4#di^%RVvD?*ZoF`4^@AE6?o z1_FeG0Qls5Y1|z{VKMXi6)pv%WQdH!Z0^{f z6&vy+_(e?k)3VPIBd;{I-V93>kYS4ZMiD0y&Ne^?=m(b(#U2s6MAx78AEzD@v#`o|q6p zdKLciz(QqHUY2P+??vc>ACMSlff0IbeO?$lUK#r$Gq>**U`2n>yX#BYRiW4d3nFS+ zXv!5CXQp0^PzRb)Fqe^o|AKj{Af$j((Um$y^FFe_(PgVP#BoI+p_d5$ma9nSLIy;B zlWBcgi5sQLy9Xg;l?dQ%^z1<-JvS~ozITLgj;*kCqwU<$`qwvT%AVb z-Im{@5T*~!q=#K6W2TuZD*Aqm{rD;Ys}lbVOl2-zK>N1sD1>&^Jg-oueR0e!>>Hx) zxZ-8%OwUe>fvf*>XUcZtKE`cM${bfE>UpT}0;xxV3#ey^=SZ)Qeus37Avsd`2Nmbc zKoQbo5YDO0ZK&eUH#ppMCn+vRDD>r;-?z~v+~O_ZXTjEy=?e#;65z2hA}D5zYNU-DtH*c6P7E#+cq z>k_O#AUFvXg`clvouw$hZPj*I1YwB67*xlW!mPNh)#c5QsA%B%;@?cm!GS4yyh$Dj zO+X9Hpj1dDF+-br8_DvEFC`i|l9Aa|7QjoDaqcJp<)_`|}qSzA`7Gghas8}EGcLm~xLKS;1_iPGBV zV{f#faKFpaU*-=eqc}K0bP~~@@;=WA2^TbT3LNg&VpQ6lPn7o$6=XU(_@D;uo}UJzT{)K|xX_(# zy(5Th3u%EOtpwu3a^8^jbPDU#4wpEuTksv~nyMLc+9z#aupd?I?szO~{rqG{r82FV z5@>-MuFlT^v)Rsb>J&uQZ_f53sjB&;-z;#g;O%!hTmULRoCBRtjc-wDA zrq;6h^;CW6Zlo#=~HRoi3>ZPQ46T*P*dArb{QZYty%gyGp^yRy{uqn z@(%#|u04hKJBmb?-^;lQQ9OfrfN*_g#g(AM#$8as`h?<|^_&ax8j>fGUqZ_jvITgY zRd8%%L!4@AR9_B}l^zLTWAxIR-+W8hEr9RsPzM`^F0;BOjnFmB@A4-=Ql@ms>9R{;*GSb^Z5b%Y9B(4L^> z`u??W35C8f4oMhroBozkcs}+c_eZ9{z;ncPcTUbIgJ!SP<2C8~e7eibtm=24#9~HY zor6+JzFgBmhUV!P`SESVFlB7GCheEhIzrVY$;}GuzDt$qeYcBjjxoC-?zB+%rS6}q zhqwT|^8&Ii^|4eyZHI!!hF18R01{O9I+SALjA$V%E|ybI(Z$f*g^bn>qKocJM)OgW zWI~0fQv5ws(O0e*;*<9dpGJZk*+p%nprEYv0b*U5KK8KXEgj+7-;xk(EcyGJSVV{U zhM+ZJ;YLg@-+gv7k=Cqpt%|PPtYYc1i=~@i0jH7*ta^S9l~X?wlm&-h(y>F>rMHlj zkj4xEPY+Vzz$9t32j|Q)~8v z>)9Wehy_6dlW*bHJ2*oAzI?)?7v&C~VTHeohI7bVj z&LZeITb+q11r4l3x(Ldxzm+JFczO{(oCKG9vI?-#CS$aqy0xA=3_mO$bqM9ij?H_G z$g3qoPiPgF8S+-kMiX#w6uNmtugt9)t99?22G`r0QFvk|TND~@PU*eAliV8B9uRcD z+qWgDp1t9LUig(Rl{uQ(Z=*Y9v)m z#xkQhCYO_AFy-}NNluIfxFnRSbW}7s8On2X!mW)*K=*H>$XiRa6_)0F-%?hbzl)tB zVV`nN`_5Rf`SodMmgGpwalzZTI}q?H=4%7Y4w*pr!OIlBz?YuJpL}XL>3S{HJ9fb& zg$?{r1x70XKh!W`Z;xnlZB%?-f534`7k+$@)Or7jn9L_r;EEA~H3;EGm6TgJQ zVLWh-k0*Q#ib{FWwCT*by0ZC|aKVZutjf;&Q;l3~KY$yuf~Z#_HcBR;fRA;UkiO1n z+V(recju|HJ3_ke-z+f5+qgqpW2sygar>+AxaChc>Uz958+uKFjX6W0AxL(#pkoam zv4KPxGK+j^3p~Yu)IScx@jBcVI&tA&(KfS#T-H6RK`VkLJ*MLjo%L6oiXoq8H`7)m zl{RptXPT}K@^4*#_!bf{<9;NaNqG8qao4t9V(>DO9YK=Om$dVv$v+gN6HfOkGIrU$&m zZ6+4T*0*(bWoe&YrcJ8D_AS7hABMBF)2x%rWxglNXzPSF58MoH-AqB`;~N6d?Q)iz zT_F$~OLX;{8iy0v${Z%Jo0doq#EjY>Sdp6+vKk3zGLqeH1B?`JnuN7~EFVzfG&>j5 za6W{yIkOh-YTlQ8%h<@wMi*Q`N~h$~V`E1-aZ&VuWiAzfLf*;9TVf=zGWatuYpwY-( z@>nds-0Q=cq@$Ck6pO0pJ~uUj9b83BAD$;Lw)!V11@g>{_syS%-jCaXDv!O#(z;I_ zq?|jL;S%cA`2E7;^0~84b{h^M5r}j6L!N6~b`;Ab8?x-Z#rxc#IyE@Ld zos`9|W7hg-g>9mjtnxVLg2J|7T8bvq*>;S)3!$^cvnw5QzcsgIYxHxxk}u%W9`Pyy zDN+lOUYa1kA+)_Q=wUg(gO%USae2lgyH+kRf>6{BbiE2|&igiSm|Hd_-9ETKtkFr^ zH1a6(Qr{-s5<+9+3auH zuIpx-*F>A-lI3g6QUF3DpN%I_byD}Lp&!ih)EiW<%F&1%%76mCH*9pgS{<#Cbd7rp z+idk#uVw%FExC^3UeexSjnk}OIKW%rkp^hrl4zafk3{MhHzQqnc|oV%J0M;D8nh4ob=CG3KRpD80=t9~-ezvj?C_6hZ;M}Ol5 z^uI$#s>ZW43PA%oeerVx_{rSgZOHMd*9pd%xEPp@~x-dDXNyp9bc2G$ZlE+s+o2`JI!) z?W+l?Z8qYW>LykEw z4dS%2vbV#UJLO$Uub3)Xe7Y0sx(Wa8Ue4>vBD@YE%{FzWgo@l zgddRPQ!Z;_kJd4zFT3KTkJTEVcNF2&S6!ZOeR5JYu9u^PFvein8lFNeVWRxmA5P*T1x66|uLz}vM! z{mi>v9)Y%!sDS)Zh4BU&5~^Y;MMQ?>;q!#Y~+R z*ci_A=w`si_h+bZqiW@;ym?+I_kTDQtF5JVt3ZU2@MM({kbDrc+>vWZ4PZ5DWyxl& zzdFFMcmAA06Iv(0yyc~1E#0W280dk@+)YEC&6Kd@os?D|YQ!Pd3TODqUWk=5Sd%~3 z24KcU%J$dZRrFVu{{xLJQ`{5q)5$+o*)8^FQ2*bC41hI_cO=Bj50z$7}|mFJR6)?-fnq>Iv{gyXqL` zOfW6;7;)HK6pmZkRfEQ%X8AJ%t2=@Ih4a}JG;(Nizx21Sz^^T7e9z$f7;Hk9eZ{NF z&su7^#}I^av!M2g>n$(@K|o-(zEi8r6j@L3qH)GbTjBl!xkbs&Lny2XP`JM@{soIb z0vB98CpzP5j?`_;6e3;rw_lOXtPcH#AejyTi~7X^6oyLMU5}St!hydKHH|<-?K$%B zOw=(+6!DvYgGCDf-G(6sz8m(CrR%xL+QJ19ZPK}l*MWa+GRSMz+H&0)w8c=`PX}BA zlqiOlnw$&TyqauFt@#BKi-l?K42tI{q#iXGlDnZFFZy7IJozg z2B%%*J7@1w@GnkcUe9tNb^+2DHEtU20kF4`?$3Er9;3}pKQDR%wc*JG?itX6AB}I9 zmJSb3;Zw5NTQCWY;J+qL)4_<9Tvyc{ho{W1pzvamav5OT-0R&IKfSpDzElEk=m8PN zY+j!Ckeg#A} zn$>=jEjA=eU{$7;6kH$M!4ubA&$5j40h={|{WMr7dNRn0f>oybE>+vD-NXG z^=*IO1)HtGlX0lgkQ_Lra{oFPb4WcH4$4_XwTxgjDF<$j zUOs_?3(gi>YhZQfpcS?N{88tdHmeOoAu@u${ar}~VYrpF_UhLkBmAReT%V~S7&+DF zj0(N0AjU(C(rPaDVUL3maBW5n8P9f zrzd9e9 z2Rs#!F+dLg@N?TfMNN7k$gv|2vgn?sjqE3k#>1xI^W?CjJ3AHaSSSwv*pl?t#^}dP z`+@hV0Cj68W>uPzlqaBVRbs}65xiYu=pqEGD3H`&I_`-3m>Ggn#i=?&?s91+VIm{# z?L6q4pxnk609!^F0qWz_9TIf$(8|(j0TYIctemI<5>vt#Kv~Mxk*1F-AiL44NHJdj z2wcN)LB#Ru`l~+)5G5FpJzrT1o(!FSF2Zq#T)jxH-#nz|%eOp>fZ~45&y{iEple4_!Y+vOW_PF+t)<>T17hTiGySm)w*8R?k|P&wDr!ImDt30J=VqR}O~ zGpWe&=8vVi6$}{5R9It|#P2zN`=$Y<3E}?DTg#ry7*{BoC_Xuv()B#0J|8)FqH{-g zN&M#_V*eP|7kU~W^D&c!usv7$tKE5RTiT$rNb-V~@AYa@9T^K{k0zi>sImfT+g_2! z#dEb3(~Y5pgABQI0Kdr(wB5F&Pv5g7d$ha2 z$$wdgTL{R8t_{WcW-ofUXQ2lHJ3GqK-`>NdZ&?wO%dK& zld-)cswTucU4+ehPbj+0_ zzyD~DCqSn@-0Y0m)o7LOx18zc*Jr-}ajcXUv2BK-468rItvR+GD5%S~cY8&{d11qKJ%IjVe zrx#Y>#|bH1D5lbepI&T~V=qX3@x7WIWOMB;Mdd$?5M)9-0yaV4pWY-$Fh9KiTRbTfLQh{Mmd-$%CY1tgC@Lv@~(^Xy3 zEAw}C*GZ>961iRkXQ$e_z;zUL34eF_Sl#D>%^J8wL}AYpn+n#9H?$3`9dXUuDqrEb zO$oS7@I`TAKBbayAD{Zk>p572%z$eN=E7HcRN~566CCWqK@}WJb9d;`$xC(uD7S3( z+A)?5pQdUrMxeO7(BsumPZdFje~#R>C8hd^YlL3xW_oufb5JI~i3@ZTFmaNH&mMWU zy=1%WMi7pI)S#}hT<+jyw(PQTInZ}wdSNgp=R?q)?|3Sn;dX(&Hf<0l&9mbkp9TgX zA~@(~xU>4Q7GClD3bh3lY^p-+suTppZ^`{)j)KaFBDV1E&?T9@v2UG-oH+?Xm=}qf zy0*`fQ9C|+_(D7qkq|j`a%<|78}*)?J@m~Bk~b7y0Zp|Y<~|E~XhSg920W@8lqg^& z_?s1Bw70C_wc(V>{zh6GK2Z|~G7JYJ^bW{$awa}Z6sAY6E>cG=4>{4ECix}Ytv4f) zT1Z2Ph-vTliwMyvqdo9fx|R<&!C#a!cWnd84mzCM06Ub(rgK`f$#o7MSO_Osu?}gs zUGRWV%fsyt0QS`v+#i$RLZe(WKfF!cb}MTi)UWpjKz^jY4?k0IfBklqJ?tlONv>z^ zvVbgd!?I`NO%4|5K9a(5Ctwcwg%WfIqEK#%CP&9mX?1V-s20^Qi zd(9{iPQplTU0vN9n0d*RYefCcl)Ac*-1h@_z8ZP(zUq&;?A>Sm)`7(=o`vSh1>wa! z6%u2?dtvJ8TL?6jSgtMI7x>ghiU$g8Ggn{eMXr7ptn=tgHY~+pLhngK56KYrDuG+; zT<=rOtDoTyiB07<`hA`6+g*d+z7T&y1iLmt*)5j~R zIcef@MyKp@6|^(_On83$IBAC`Q&Cd;8p^;GV29MkO#!>8!8b>>Y+ zsLw;Dd`0My-g5IZRvo*=UZ`IMn8rVjvD&?03eN@(M@`LtXpNe{RI#D{mv==jM_$lj(7Ze z{9DbPh3XPdfO3th4Pvui0k2!JQ$=RHY=~mB&Kb9Sam<{Ri=$s={rN{VGMqXj-$Z4m z3E^Q-Nv4h=-xG2z+OPVWOfBSEoL~2qnc&C{$iEP)GINj>zwqrP!K25jk^38m^Pe!J z*aiFlDjh48pHHKbh$piE&Rr=TD}89at(v2_W4L3lf3euSsYHDbv=2Af;e?>23m5^e zGjOb3#g1hqdfQ^@J)hJfkF%xQN43Wn?^tEu3?F&0BRzFGjl6;GCuZe)DwH2~jvi{+ zfY4BsQW3-Y>L=J9AR305XtW|reEWkj7V9JFW`Y4|KXiR0x$b-n+2$6eQl^QgGLG&K zg&Nip)FDyYB8NKntB?Ll9H){#yb(LxW7WfmAMTy13c)sT^rMvw2ViGsdQWZjbjT6v zet4S85#5Ozzs#mIO2*;g_t*h72!-iiW0m+UvGQyIxE(#D}N_ywFp#HBT0UQ zZgXL_&M7Ef)ugr`9@`Xgd7lMNEZLYfm-%oV$=cOB$mN5`_k>Bo-crO(&^iGcVRQJe zAzx<11*siP-8BweDXm=7Ea9GMT_&siCv^B(AGL(&2wX-*Nr;rXS;^lRw#7Uhg z%G6*OMG|TbApik3Oza`rlMu-e%qG%VXs_Dd%-UzxnC6nd`g5PsAPwtH`Z+?_z6|PS zY+yjU25NSq;Z%`KZ8gqe_CZO#xrs{^X~~PM_2`zH2?L71DeawJ(lZLUqnkLL_y*)B+ss-TQap zZYx|zIfI6zCu%^8@8;7tFNLL50zXO zKKDfxTPt*o<}Gv=;ZGWcqC7=X)s7#|vZIZ+a4o#-z#w-Bl2rW2q3%g$&Buy<2xE*3 z8EJXQDT-it-k{+b$(y_UEoX1=dGLbWh)B@LlF6JdN7+PH-A9Pf>L7=LyY<)OM(ZAM zxiwaMQ932VxPsLr`c}%SJp)BvFOM#879)6FkMU?EN0(@bc4k zg3ns85uOO(5FN#?eGHF{nr_T(>m)3@*v^RqD-rV#3%6^N&a+?=AD{Kq8$N52M0{NJ zxkXqeog5MjBlU4lJ_~*RGtNjVX!z5Dg8heiLc`IrQki+!d?7RTEh#gdMX5NXywLoB zVImXD5@!(U_Szplk4dbPFcBK^=2>~y&1cf1=XUpzEYm^#iSP*sSQVcBtM)o! zy_H#)bj$(50_bh{4@aU+T91=)D-0#r6%jtBZ(xPg{3~U9>5$mZ}y&_jpf`{T0ugWInan`1?MSO@ccO)tm(Q4STUR-RVn}VejOY~5Q z_+4#j_cXg<{YXzUi%cJ)HUM=8QP=FR6`WJ5t&mH2ivT5GCCT=RXVm9+C5B|VKY;^l zfa}zze)crUJgB0E!-=LXn{KW^X@nCYl+~3()5bxlBACbSZ*1RTDn|E-@D%QM&nHY+ zt7tlAeA&LQpVNuEPB^*PE)<+vg`EBZD~qdTDY@$3H!jOVQN6>pBb0B>dE{#jkE!wX zN)ofr_y*kN6fbCkSBlJ6{*~f7q6E27_ddeumS{h~g?2vhYm4x}i0ROR(cQ*6Y({qm zJmvgXDW2Gxr=a&J76;B*W@UXOiS8|S8@Bv82qN2*CpPuZIK1*co~NV>0`#hkM`{^vZ!Xo}kd(!)-c!mwcO`O^B%hu$_j zQ+noK)4p4M6}c)*q}qu1hO5tKRdxDEYq*md;cid`}vx=a+-^Y(_)t^PmtM(o+ zG_K~$>&p>24lC30_@TKT4y{1j5)^F0)7-ptuJGk+QVlSKSh$QW>{#42c*DqMS^V!I zX&eG-pj z47tcun5F2XYy^x<%|zYoj1=6Y6%E`h4A>0Gc(`FXUD#bLZ7e@)BXF?sAvqVEooi!Y-s6e=xG0c#8OiKGr6VZ|53uw;J?Ui z9PNOAcihl`)(B{1X=LSK|Jefl|3Po_Z)5#Gy7T$rlbG=z#GjMFE@f?KX6!2XnR}Rv z=#!uH)N}x923EyS|JLWt$V^E`&rV18cjy127Wm9K{F$Zr|C0J&a`S&lFD1n;Xl>y5 z*NDUfxriJc%?#O%_4SRI7?}*H_4SyTshNz87^qo|^%of0fr`Fr?FG)7Pg4uo%-*Gchn4P_r4c>QNi%8Pn7085*-1>9PD>-q3*E z*xJrg?{i-DY;1sL26}%@H|;0C{{H;=%PwnYZRluVWcM%G{7mXhAZey=r)TF%z(zy& zx4aN=epcjQ<^VMMOM3n(KYT!ke?0v|oPcJ32@N|?&&q_0$c5U_$XL%2=s?6Rz^#z8s+zt zqH=}JUNv^bl!~<&HO9Qs%;t)1a>-L$Gi0FNpGEI#6w%_uE{p>@tfXOBzS>8s?XdKpM{N zl_6VTG;kptp&m4WLF6Zp&(KIwEK*R4-cP!m7K>MorIe!3hIz83l7TIY{wk9isWm!G zT=LUurd5_z8FNGf3#ivg7z7d0mDfiF49m5_+MPn9N}#w5Ku7)xn%HAtErXEz406lNLD*I+DfCCX8i0~rQlsm!HT?{7+oC_thg;$knw+!w zO`*%`*HS5=u4W`D{|lEKlZ<6Td`tlQ+;65@QxW!ssjEK4x5IZGTZOQ?%tkzkNlfCB zbH>=0;Pr`#`|yNGA%GGU9OwH^4-q;nRf{cK(10DZ+qF-#xgZD-dc`k`8Liw#2r19* zEmaSurz@*V?KvMNrkek167IDb@b~-G198o2OKPdI4Pv40oM5I)ZRC-XD zv`tS8w~<>?0UA7dm09;Y=zL17dX|{H8Kc?UQ6qd9DYza*Ao zFT7UhmYvF`VB*_-*N;;#9oKOQz}y6IY6kRp;SN6jo=n&|QiK)_vdW=zHS0kHQjk}d zB(jKvG=g(dc!Q%wvkxNWCgE+R-W8d`LoaM^fooYxNKwqA@}*Jv;R3ljYd z0T>l_0;3=nRqdfq%@){3Pk8zI6Ls24>MX1lnmetPhMsrvF1qyX%Nx--*adSAl;dj4 zN=4sOUdsC6Ya>g4dg^D0TRX+|&hi7-PEoYmUeUeP7;w>*rc!|U`oZFtL0%8(pfnJ3 z&c<%HFeWHC!(>By0#8S0r>I$bY?69EI=~%0%`R*p z-Af&2;v%)?+>8qSCb!$OU!@;xe+k`$?}s3L_!ptjzB>hZUgGpn&R%k16k_2nciHW) zY01Nc<-TLyGMmHI6aA%nex*K<1eC?1k#%_Hcf}6M&W%w+8=c*V+C{wb_j#z+$`{E*XpZKp|`+IaISSSCopzGk=84 ziV#;Vj^1+E9}fy8{Jb?#r&h~I^2kIl=5MUeMjNNI4?#{#!OcKqd!l+C%hN)9b*rh1 zA*Tf=I#YBC)Cqce!6KIQ4VNXxQBk6_?)*4@tA&pPU5?nG$xGrWfZ-2ce7DsOT16f{kyxY+fQg zf*(IvT+7>P=P)g*CzDWF0uX@YEio21Wo~x~3ky8lt~Ua7=9A9frgSw~TPTT^b!8S+ zdtcSXYB}XDh*OFAt`7~`faqt<=d+d^r6+Ihb!tU~->(4mDQlGvBYHXLwI6GG3oc41 zsIJdG)!eY}!ToAaMg1Mc8D3thDx>fR4S&uz-}GniLXNjojuZKmcui_^j63j>1!`0P zQ-JI_snUAUV5=%$Zorfs{O?7fyDS{Vu;4zd3W{f{kzx@7vVG1w(M$m!tXWPyuiv+{ zp6`rAPC?w57g{;T)Iz_c1i&7O&y*JKgCdx7Rs>hd%%3sGbWl7^ol~?s1F9JD*d-^rneh&-WJR%Pm4&U3z;o##AFO|k&T5owJ)v9B9pONA? z+qr7ju5C?}JAJ5{b&8+(%Vt}HNcQamc)Q?hmEOzaA}!-&U+XW*~W4Q3TVvGhr! z{liE&$^Bn;o1*O4;uR!N^K(#Skoh7@3?93)`1F`$zpEn%4nEmyJN*fPzh%JC&o}VN zqk%}w7Ldi);dJ+S7KAlZ#R1ooeT%q3$O1R0ZC!A=0i~7e)ofby<(ekj zx!Q7;aTly)U5q9&u3uI?Ey=so-T?J#*NG<>><_H&U?}=7hQx4dso0!gN7n#gs)md< zYA2;^FebbnE&%R;fA$$Id1o|UVbJR-W}8?O+C04dYBiQkc5IW>u%2UJ#!VZ0DD8{3Gzq4)8E+#TIDSDw~0XQiJBot*|GHS8QQoHd!+@4Djz zCn1AvKgQ82BOqG$p`LO}`3{S;Ad!a^l z2pRh2kd>4zt#5+IcgrgbyTcsATm#=846rappnql~9-&gM8tw6yq!65*}0qj zE7wX9V8}WXKYnLMNs@DvSA;(sn*`aSwF4eaF+_BuG0HICOBy}ei-sfSHqUW^Fn{jJ z*nNeu1ES1gGHEg@M$;bkg%NI&uHjF>8bzI_V&}==li1%TH160N`DjP&b&G+}Snk^gKp`t00xRXLi2c+M*MpRZY zZfbf#oBz;5Q^(_MLNM$}+}EE{q}nyZZ#wE(*Rni}G;HrS=vIET?%{qAD5fij7lbg) z7*HSy%OHo-gXYLt_`)AOGJ_d{*UnEx6P{!n@Up*{@g7f&vLw1P_yHysQ>|cr)Ln4* zX8?uL*YGwadc#zALXwxk3`Kw;C5N1Z38EQf=+ zyIvr}hD|OTq6pxC=<99Uv(nNXW#l)GfG2UncbjX~5Z|^l^DBz_Zi)i)85)-#!#AF) z3~H%hSHKfpLj^|q1zuzu*52*;j^%k|?7=X%WRy>+hc5hYD zK#w7xj~RusJ65Wj2;%PSj2frMDZ!t?WNz@jUkNKs3hQ1jtww*lK_k&gxydH%3}{aU zG+jA^Ue-5Rct$j-iRE1O`7?qnPg7{*B7v_qTfIz>CGjD!wxmg~IXgz_dKmuwV5a?Szyul5tU5=Z@A$7=20?x`Lf~<5lr~z2+j|M#oq>3W^ENJg-+tyGf%ac zVBtM4IU7}KAj(Znzsf&853`!>p51hCdoJ2#99d1}6Kuby6=kP#r|uiLTjaLY`9$Nh zWwji#jAw%a-GFwUvyy}maz2AI6dvkrN?OsjYod1ZwF4D@s2E?s5 zyj)vYgu3Eh9 zl;=77oo4eJ#gz38;;ZgKZiIj>s$I&8HwzpzduvHS1g%zq)c}3SXLeGLAvWm8>-jV5 zU&=wPV&~l8R_jW$lG9tgIY?xdq6-h;NN$-iGaHZc zcQ!2LDR)jkf&gn86IZ0qq2G3|RaA}(thvhYJKpx5;sEs5-Gd+RHvTm2o-g}-8&L!E znvj>>$aC){OQm!+@GU+1vK&Z!W8`_4E-eAhLxA|tfTmnc({Db(7{MFJrwIwPZ(Ak{ zH6N(DAO?F@{{O-9W}qXWBe2#tN8se76*dDp7}?PZ1OIXW85vj`8vQFB7!v~meevUz)jrP$7`{%{y)avsXMcHX@Lo<_51W~`#hIMS^hoTIXI4fTyP zZ9Cj*;>y+=c|gKUPUzL=n=>8?Y1+{jV-GiiQchhtSk=F(Lunt_R7CcBHp|S2Tc=F9 zU+qZ5Y4`K_I6I9vE?*`%iwjIi7}c_^_Q-0}qBR*ylKRd+tHv|;mH!CT{H>O4V$;?o@m5hV1t6$c0E)I5fH5MDXPq+lfH zthEa)ti^Jy7Fii`cIwHxS`z!{RUuC13{z!Nu)fT4t@&tG^s?FJ44A9wwDJ{nEo!^F zM8W{a%KT8P{NrH}0tuTZTvI&i)RWC<3-%yiFy69wVWaJX3`I?Bw7m~fIXKEMCr-Y-T7Xm%PH})? zh^VjSph&zCW(2F}r10z<lOu$^V>sqieHk^t;;aPd<9Lm1Ueg`UpsXgCV{~H|%YekV61pNP7Lo`pr)#N7 zIabI5`a^k4%0eBa876X@UZX?~x70UEl@PwCJEimr*2BuBh}sdA6r2JHz7o9PGpD*< zE?W5ep2z_nq{n8de7gHYTDnUe5Wn*BxZ z8>L&|--(sesFjqX)??4(Ff8KJ(#*{-pwCuks2fzjWQnQz!6?A>@GB1eoH^l?;aH(Y z{^FM#f&ft^PXDE_Zi+m>(IpNkNUah^2^9Rbm;mgyE-TXg}8M|jD0YcewpE`j{f?JAWG>WL?{!ex(4$lp-9B)L2@N)Dj5-bzOg~6MNTubK6z>OiK|ac(w=f z+zxnxd4JjX=eJ&eKx|Gfe*9a}!@E^6MB0a3iiTt6r4%<1AUt`7YRUpm@;5MyXC71e zG=#8qe;dU>ICJDL)g%$VRI1fg7iDzM2I3}y$c-Dy|=Wwe{ean1@LQfG33zC?VYgp8iuW2${U+DU!pgD zJ|J_AV6yWus0V&_#B#P}?UwnU(;NHsoq5lMdVp+HVHIhKsq?7t=kyj-)SsrDIdMS~ zXEsonW#LhfBd7$CuWs-H4CO{IKYt)RMRkDD>ylIpjh%a}XTKA_fD`;kv~01Ao)<#6uW z|Jt-anJ+#1+=+HzHdDV%iFK;%UwQi&*kP(TDy~!8COxj~gO)Q?l~>8BFJPamH|@de zk?P;ho5PSTeW+3D>1e#WPt|y<$7Ned)1xv;;T&Flu#ZL2pn{2?3eO%6nQypTB9$4l zv(ik2W*T+iwA$3KjOaC^@9ea~HkE{OXKC@BWUYCwBUI!Gexgny>MEvBwVqu_boS`! zv2O?#qN|RZyJnyDO$~Q&>%01T6~@4~Ye)~V@XkkiriN7HD0FK7n{WqqM>syKV9mPR zu$D<-Xo5--n+u-C>Q(V9UPI!LR*>jZKlw9_r)!AMz5)84?Cw*i(!zi!fpO2qiTy;N zD2{Fq8QGI-RutFV7m7ucC#st#ik;Q@)mMF|?mg!WBlTlVrjXIhS@98|mSPbe z1dDXT#QeOGiZuC`R>S@@mtMCvt#pDm1vyA=GUI^Lmmj-KQ?lm;1SCeu$s-faxm}NB zcb+_<)=Yv-uva_Va!6T2eRv@i#lLbbMvfHh<-GMc)ml4b-_uq5@6U$BCP|!KQ;tL^ zP0A2jxXhscRl=s~XECho7wlN`!@$$8n)8{+ zzy~&rrpRG%&@eu%z`0jB+0AMc9eTe%<&qaS%GWt(gFQn;(cO3c=uy@c4%KPBFlwwK zmN2$nZKWRS5y&;k@81wz#mmhU*pvGe&8}X69^AcF+RjQ zDw5L{C+`aUDJiAoEml)(9V`g6CU(z(NfeElO@KQ@1X~FY>m%>8O$d#JK-p!vL1Rym zBtCesS^4s4*4GDf#`9&poI3S0p%7gpZIG~%643>TyL&=VM_9WTYDkL&-aAyz&9Owo zcj~WpdGgx1`~`!X7x@=*2RD;@l}}M@1_`f~G!cMsL~*wKyPjtWEst$bm(L18RKt@M z<$MrNctkI70X0r83%|hp5LX6}AU|DDk<4`%P~LO+EqhO(?&tM-Fp=Ou7?J=Yz7!ij zQB-%BKgjBoz?DKZXUUxMDKBT<=1l|riVGN}!Txnm9Wv*#Mq1pI7?_J_@%?&?Y~&`P z$d%M<+xtBG*zlbL=$7_tJ|OoNM$L$nZ_aaj!YNcSktHRf-m)6yw>4gWw`pD@#6O;$ zgUZ8xgbO4&%agiLk&D8I(9_mLQ;@^cuMHx(nwG@bhhtV(HYCP8%aPp<__!0tx?X;& zSMh$7&h~o=#c_4Rr3_Wkl}ikGJk5ZQJcf8io_Ftt2C}?z|M}qph9Z=CywH7yNkH>C z;@yQ+D~TP_pM*^<7l5dTaCb)~RvVrGjw{Ex$k^0*8_i78*Dsb%Fa`md3?a&}u{*&{ zC)~B&RRNC|Zm6O5Njuuw-LuK5ql)>`a_+vpuIlR+8e|h}G~`UZ+Jab6L{jvaA{5bG zCT4RrDcUF##=Cn$9u3-`o=EG3)&Ykz)E`%AMl)Cow~aEPb3Olt^J)7XVd&-U;kJE0 z?KLqYGI27YF*^4RZaHVdDhH8k^E1G3{XBBK9)aN+6RpEisTAcm4x$Bd+`ljJ6$#EU z>jSsH*ve-HrnkF|bUHPxR5A{=eTc8>i;@syo$3^>Fn~b}At_xkAZ@($_CkI*>=!f} zH82$s=E3C?AUdR_jA?Lna2{+=AF3xH5Z)z|M?62=fIr_^}b>r^GKDgh;Ki(glN7QmW9E~AyS`DTb*%V>34yt_K(F5h6A z!aI&bG#qTbYP*;VHbup6kXsPu%_#d=W+Ig2rAl|m9fFj12xB!mZMs7Mk3(h-T7mkLGfde09o()?d!0^{1JDFu(3@Fje%!K?ek}mHgP$%+Uvh=e5Eo{ zc<)MN0=oqwmdVmnsV6>{p0))iDn09RHju@d5+qx@tWFH-yrVx-Su-w*j#<=<(!59& za9!S`T8NST*>X(J(V(S z#-J{^@EvzChsCQsa}>l1{uRb$o#L$_FdES#GTPYH_qNXvL(d1CG7Tw@4=VPXk3I=& zPa$+$y$}*QBQCR|S{}2Wuz20JUsfja(dS%PrZ^ned;Z;LY?j?uTx;a2W{glFq~!R! zQN6$);F2_GYl+Z50e6ANK&K+oP2aNo1(F2`o^erYN&N4v8!-lo2^JpAH}OgZ}V&L{{1VMCYtj9xO6#-eHmd5n9eEs@wO$6Xtr!W)5((BXj<%|~g zI**}=@9z#W>hnyNL~mbr zp*w#E2=kvm`Ze5vGTKe6py%_U8^fu=jb$~iBot^xaIx{Yh0T5u9)`k7p-CLy1*PFT zDr9)c&eZsN@a@#q|5>tLvi+va%-inGL1jO)N)5L2#3R^dYI>nld0GI$Cd?mu9wZ<6 zo!#wcwsiUPi~0wXJ|whFwsvSDEp1uBl2fLyctb!YDc%5 z*GD>Fx;wzj>+9g+HuLj{*&nvTzr3rveX|Cm$eWp&>BGZYCg7aB^HpqP=fjty$M=0d zHlgeNWg*9cq%+P3Ur5(hy|W#wCsswu^jKJa_IJy~OZr=zw_w2c{nQaR{E}NA_=1>I^UO-e939_S6DVLa#vORg#4C%@s+q_@7L>!)HK-8Yc=cW!~|p! z=vX5kf4^y$c*=90;rV;cL>T7Db(8+iv+az-fyvzI2)6p6;~Q@}?%aoB%%?y7Hc;>% zKhU)-tomt3LA!XlufWFOhJSE@3V^6h>5uz#j8p`}+Cddc3?d$Y@QcIfBO|BX(^Ri#;wv_@GrSVnL9(w74 z2*CW?UnSQ;r3kBL?`guCbxzhaTvhc$AY_xUP-~i70m?CEP)^HBL{0#?a6f2Eg| zurWy0b5T4wpC*_vjJHHP^GRpgagUX>P({Y-l{?b0rfRG@8J^!aVjv5mAo zzkuZw=tB3nYGiFo|Fg4eA=f8%y95Eix{+w<{74rR+MQVh@Ls%6lt6uNvQav@{YPduZ}3|n;!h^>{8Gv ziqdg|x(|1b%R7@Wj}VKx_l~U2DqYGR zQ6ZEOo%A!b3(WWk9CMZWQ@w!i=l(YC_Y>7j2-~h*0Eri`jF#FkDV-&OO%AFH9_9dl zC^oPuh1c3ikpj>e&4r_!65y}xOBEj^o8ye*ghFpIE1H2P@Y-jJIa$GYnM$#XI<>#= z29y)G=a(5pK3iY50T=nRGF(#$bM%^t_ml^?)SJOHN8N`ch}P;hubg2j7ZDJF;WK;~ zY58!7Fa-z@Ach3KD6L9iXXq#j(@{sCk3;PLgfA==Wwv`m$#7pF*WnTz8IoL@a8!v! zxDv>oy%QB?QL!ab99X01c#!H>G*z7Eaj4~q#r}wz6sJwKmG7q>X3Dkyy42+G9p4Hr z2U1HKrb}e+Do(2SjGz0QmZX^#BlOU9*R`;J6i~igeG<~NHo&NrE9V4-*Pnk)1)+cL z4JbcF{OjtemaZ_3Y6OuI+JuJQ`Lnc(1GExK!^oVzBh6Kf+5*%FZ3DtCH9uG{>s(_H zLT!eQJt!NemFG+qCPg*GpOhR_wP*PYL$eq6rQhkaN?u$`?f})Mj(K+YWiUl;yv9s^ z*~01AghwpTC2+mYvI!Z)6Zxii$7Y;h(h2%c4fl%|%cZ4^>}-=F{yn#f?b)29+oTftc@qE34=)ZczMQ?P>{<$;n0VL} zOzHS9nPys1kSc8WTHGd-sK4|%R=TZu!v(SOpxPyEWTVGsL!5oC^{AQDI%s4B@Y~hR|lGl?Hu{7NmOG^2imp zIr_zmr($Sk!(bWydI9wdq)}SvR^qrj!#`Eo3Vu2s);JhJ-e!w2cJEem3`V7}=Sa-3 z*|<*_rUZ2TqYybrD!`2QL2hOF_sj9dMeh}Ee^1ZpVeM8W)(@)^=JE@FrgoM7zp35b z5x*rs58k=i&@qaT>tf?kOJkQ`;HK@)f}T$O-YlCEMehDE(%^bz=a(6BCQ6JalvpBa zKqy1f?2`s#mbU8>Nd7Wgug@CKziew(u_hA9-c_V1)Jlszv?8e1DVTFky)Mn2F~o5r zP!Da3cnbKlFoZ$hoj_U%vj)IAfSUGc%g9CVx1tita4ECafGzAYn&|ZX5eijOT}ks) zZ%i8dw&IS#KqH|joLvansXgWv>T$3>*=;ZPM}GV1&Sf#Cb}HlB;z5`KYI?R7>$1eQ=#|v|DOU)5|+exCnM|ckC5Rbd@%TF7Q{ZBn-gxeoWz?D(Y7dq5; z`L>5zM+AMEb}RcvIjAY($)<6A;`5AtzCC?*S%3T?2bVxQG2&^u_L@kTSf*#g{+F+}WAE(IP+J)zd>$rAR_SG^U z_D+MvT}3Kj)ktaNlJ=GL?Yq4*I=ORQU^1lp72BcGw+y9ABKih2%BT3+&>!!>Ac(~| z4OLS1gMR8=P5sJkgTn^66!4JRb_jfwh+-Rj`@tI{UY$P^vn_ zpUVUEwiXGiC9YcSJkTYq!^v#h>;P9X@sp=ra1vx>KjM_`79nmNIZx5`9l1H+$HIX=U}l z-U7}1CSoPyCK4p~#ma)9N5>`sjrDJGAXr@RF^HkLJK?$Wu<-{eDw1Z8(qcltB*IVS z7ATk5Vyc`xn;%!z)GE4_rHnUx;#NW~BC&EYnY=1<7z#*3GD*nKkHQ*eG0&r)vrbz| zRHk$*XIxr?qhOyTy%pU@ku~;KN|K9m;Bub@Dm8Xj#(DGJQlkAnX2+>`C@nVP6Z*nu z*rc=CeE&If1l&E6qt^TTx_A|Jdk+s5TeadF_ll8XM8nS$7u;(59R*d;AL8CAZ)&J* z_&-D(h2@gkbiWP+MO9ZHSI2a-tqx$eFiI>=Y%jiZpEAqHnE1JL8%^3PMt_@kb?Mh| zr;P?9JrKiVw-8k`bd^xsBOEkw=y_Ya{HvG~(o&*Nc( zyxIEX{f(1{z##U1tbCcUyB1>tCgO#us}hY+J%Nz_E#-~&24n!SW5Qdp)~bqtUm&Ak z?yAD8D<|X6hlJW!lc24{hkm#E5q+Re!Nx1#C?!U|Od<9I_X>bHvh^ndf9Qaq`Rvp2!(UiAFqP=M z8!K}|apeKf=QwW4k@u$cxwx!X_C;r%+JiS_@C}pNqzyDev;C3GbHdRr(DlcXv|!Cq zjbpsr*Xr}WAgTPTpPZ9Xa2aUCF~sF@1ZmO-wNpjVQDZoe70c?>@w{xrsmF<2K=%?O z7Hk$#<6jM=kroewcVOYxv;h{b+bU~fDm+2%I40{BMU^1a?~VD}lQKRBC^zVNAa5&l z(&fUDuS^r{qTAp;6rfN;sjjlb}n{W}YHJq1Y)h)LP#CmorF0KnLjG zLP5tli;onI-xpm6lK1YCUu~z74#;NzfSOtb!qq2a3+ylM)vr8)W2>9=m(p~TJgr{YUOKe6Qo;U=rFw4yyQisZr znTCW@bA3Br19tBlhRc%UK$08!EyA3P4mV-du}+|!kLL4Kb#4Dnt3>>#54kP-Z3Z6Y zfWXh24_i6$y<>I4F}sgCJr|B&i4FFn1V4-oUSgKp;#3!mOczDySC`IcVRT6<$W}l; zSFnMDxeutW8~W^0oj(3e5DAcGpa7g3*>5P0*mn%#p?l0z7nfZ|d6ouo=KhSgy*6@i zK4Ji|Sn8@|hN>Hy<9o;}aE9|{TjpPd+JZr0X z4NiJa-?t8QN#;cd@5!6~ej5bgB<$Wen-v+>YZ&9AmJ&0(U*>5>4#1E7MC}LF1IYOC z9m%+6+JA3lihu}e%;H>!(o+1d)mVsa1FaUr#VN+M0X6Yh&1f?5TdurK#2hdWlH_3NB5=>&cb>qU{A5luHNspALAA z{|(E)%J%=pa&omU;!!0rzHTb0h{?~xofh0~5VF!Vu~0ZJ!{OptP(tVq&aiz}I5+JK zJ<>GREjDj*0oFbYNRh?6JasB3{!da)?4BsG?Db%&KlhbW+|nLrMp{plhbe z6Ebp;qO~wrG{k;JAogZuN;ryvwYo-Ss4ta#CKZf;;{K7~0-dtcAUB*tJ76&=vBd~& zOgAerz0_$IMy=JK{N{M_U^NWn0i51m85NDMN#>=i5_)SLG+iF1(1XkZEvy|n7nhV0 z6<;<2$tkP@-C+_todx)d2rLvd2a(k((?XwZVNfOgn885jL{NBH3-T~VIY3C2h7rzn zQJJq6oVrNs;}Kky17|W2;VPDaj8CFndIsq~_=2bm;sAALnBKTHY*{qSrI0Xo>)%>3 zV(B(Sh30s*Xh$s|UASHYVTY&+B(0TR6ad{g3}3w!t2o-1GZvv^I-u;foCeLdAptQ? z^OlQY-U%v!8_N3@{EP+}xPjOfF6vRl@0v|U)Tl{)i*gco%z8R7-Im> z5e9)N?2}5tj9Q@4>Arx}zr*vMGn(7Ilf5KaU%531^|sa3=dhWa%sAVk{Hkry^tq#FYp&PywV%0-^pGQNlpof zrnGrQOkVwr4_spuDX^VM3glKYI-FG|4d@K;}5-{As@`e586g4 zWhGkI{yIcSs+^%~tXu$FhFFa>zwQ!@2>}dTy>uq`?c8Su1s^REI!CNdzIOWvJ)$}; z5tw)pi8q}j2b3vLlHtcm#B<1vGOWfUVkjF-(&vkKNlpSaBpHfnnNkWGAe8+@yPFk= zsnA6Q1~v|fmx4!zdMK8t{H}ul6Gt+50zQpa;2O?77DnY+DepR;zW`_%lzdgnlo{zX zQenV?0&*s`&W*G1CP4A9Bh5zg%^aXCF$3t_SLFOQaJ|U zpAQP=RtYS#fGE2I;HBXQsC@E7qF&5ySLSy2EJMqwBr-r_W8+a%5CQ$OG&PJZSjr3S zIE7RLWu!Qu5`+--%_r3il14Mxx!IDfqdT!*;rHgW6#bBT#xKO`$qj1)e--6#GZUP21J-Kv zYKeiC(j;j1|IiDk)MHT$sNnyh7cTG2c?}GOQlppXs_0dQVow2*E5h{#8|JkBt5b}l z@hHcp=4X0_<_k6(J??u?)RPJA`Fb21=InNZg6 z@8$+W~m6`p`diT)(fkJR+rS`A@45a z6Ri!lK7xRk4ba+RhQoO#3Ui_>6ZFxv!*ycn(QIz=s;TXiLbqi+aC&_R;8l21-74E0 z6FrU63`q$UY^w~aQbw1WvE-#CWj&=cZS{=goOIuLPe!HR{}#OT6Mu=u=8wZtxjX?h*TFIg+weOJ9w(T(A44uGE4tF6;nkX3A@0r9{P|&-B zA+~A?HfjRl+OpkfVI2CeoK1I&F_4>i#&_JMxDPzIvY_W z^5YVFyTX{i#rSvmkM#N6ZY`p-RvG;3Q(10&)w&dhHWFpj_WW969&++^1ex&Nii(H^ z3qOsJe*Fw)FA_+iSrD~HNg8QJ!-cJ6=tB^XwV=N6(PquRlv>3pu@OJ_kdJI}&Go0p zY-w?UZw#B#ALh9cIi@2Ahi9{;?Yg0x)|Oqv<+4mGAr^gvvLN#8(5U6mK~k7t?#q2f zr71XxB0O=Xi@|DAQbBw1mx7BkmXGkS4<=_!+`6=1?+F(TrhvIiFt&iz^kzN|!1 zdtVEu-VI0MW$u$rJVz;+TI_@FW`L(*R;mx((+9|!wsW)-Ldr{0Ki4L6lH_gSY0szr zBPm2VtOS?WqJf|%P<$1|77gcv>}-Yi@acy;pTo#h^rUyA1w78kNbt?X!-u_frmy;z z?{6D|AsXix-11{J53l^K3xRIlukfm?`0lkRAIYvZNfMl3hnWY-_h5IgzpKAlTNba4 zyDsc^9Oi0&fMp4?MjE|r%JuRMyXoc!TS#aCb7wCU9=PGS*y)Pb)uCI`0PNXtlu`1H zRLyqqm$-owwWuE!sQ%~2VMU6v7g_g}hz=3=7==dz6g>M2FpMwbXt&2d2=%LjPTm>K zbRSBK8OZ~Sgo2)zr^iF7&6sRS>3XZLyf#coThiZM3QzjYTLb~1exn#8w-7x8#8kfB z?JLz%C zZ%!gy@>3Hc^0^SmxlM639G`*p#ldR=I+F*b-$gv+?`teiRC>eh?{pP{%ceGPif*mw zQ)Ev`B@H{upT)1CG8f?LE3Zd*=s~njZ~5ftR0YHtv(E3*vJp{&>REaU0bfa(5|-=E zY{+vv*}Wk>mC`V8J_}zl^tC)j+_TB8Z8kRLC2 z@?O`d@0*koaB9Rg0MI6n5h-GcMRzY{SevkcblINA{<1}(cPK0HEArrL-)E!c8q57e zHP0h2WJ+Hg0iJa_kbN;*3L|@uD6~l_u(?Mcv{IQ(L$cu`mJb;}F2KY<)fa6mx|*g} z8~*5HNHE~tZ3q%`9+7EP!&Bw%EQYkjs35AX9;{;!Vy0TwZckquWquWX8FxJ>nB8tu z+dI0nYpmNj2>9=KlA5ZZRT9E=vrUYErcOgsgXvFjRU@?_FG-H2q%kHHo5G0vhCcdV zdf`KXwtnicg#FjY`Fz+?*?aKM*S=)-NM1d0DyyC`r%}tW#~#|X$uXsGJSkc9FIGW6 zZkXux@o)+C5{yDACtPOa(o>J;rN7^vH~rSh3XU3U;|TWp4NTXQv7hEok(A zy4<+`SC<{5Esw8u*VEBO zTI`2l1nEsnRn@1S@$A0NCl+TP=KNcPPf;XC7F1FRwLB zEs9=Oy>jr#%xY1|K^jbyq*Ft?>_sP}TUPf-wMg&RaThFKM-`r9d zUUfh2H_K>wicALJ$8M3Xp-l^4b#9&fd3n`vTyG;=QTQpGzPSEZ;gqL4A6FF9*+N)A z(Gdv*`RVU(=`?eI%&!bq#=Ju-apyAXC=C&RJQwLAwO{+v!=@KnS%+%k8;j^LBR_#t zbp#zON5@1W<~G%tk5;K@x(L4Z;3H@7?pY9#V0bBW+f46gre-cnm*k#cFHTli*)=&c z?p*cYN{+Dc!jh*S_ge~jVm|^>iM!b_{9u-$IVyZaic{|vfhM>v+(r&~0DCh`@4o}5 zmqLA}whx_)u2KVjj@s9nHEK02}V5KwS@@yO(jJNp66}Wf7 z9{o??bOF*Z)nZaHgdQ8zyS$S|-hH@nkXZD;fvuD+O^jB0kpncQo^78ETl|z1YaChoBvMP2axi{^BA?QS0txml2py!FN1o+ZSwrJDNQC)U&;U~I$bff=Lf>Ly5=uC z8M=5?VG;KoRtq5D803;^M;UK5Ye#X@c80Z;f3rNeo@|E#s478e?PJN~L73H>yVAK|) ziW!Lc@S>{xvQtP$ORHAEB3~G}o6_Nyfn$J%a>2jL_Xu54`#&$QHNTpd&M@_haENeu zbYBOWo=r^$9$!7ZZNbDMHYNJo=HKvFlM6@d_S?9n#duRG$vJ0o=EgB{Rjd?z<%rd&-6nB4 zj;w|C0-K~tFh{fNEC;PTOY~=(&FrN7; z#AE!&01p#o5Y$gAOL=HvmP|9X7^7{Ib1%gMT8d?1WKF*@#aqIb2|NvD55fVnI$mMN zUal8TEsjRpFO#UBUQ25lqZ<6Apagdr*e<1N2;^)CGg(o|l8H`Tr(Di8S;aj7RaECr zNoTh*C=L5}Hg|yss_HJD9A_Jd>4f>Duku)!5Yu5cHO<(3fP1B2V{&~TdIzhLShp~oH1y9@9cK5|bW%-ptM;>m79P?an5y7Q+TtRn&MPtfKN7zx!( z-o*fbF&2C@TFj75aiCOgG~cl0e-!WmpI`eZQu5r9Hii$3(7IavYnkjd8R+DL6VVL& zW_t0mPvtuHCTy9N%Jn%TO_rD{xCzfW=DYyHb>*o8nqL8|=N)?AMr-wjr7`PR z0x_*Oi;Ao1b1VwBEm3pmj!B5Fac|)US7Vqwe0CQ{tJj%l>29SNn@@A=tf}~o@F?IY zDF*P+c%Lqo-cO%Jdh9g}z+#f>xvqlUVNXP(@R%Nk96{j4~ln zj!$EyDh9#6CI$nHzLJ;4A25V|mopH6&6Z(mn1TjP}Q_&FNlSj(fXn`0MbNF$!yPpQ;LD+I6`u)gwMl zQX8JX%2lC6 zwEdvo4xPq$kMBx~)pLUcobO?CXQae$j^FBPAM7@mYty`x^{12vSYvID-4PdYe#&mxAl3cg>TD~9Yo#(4#E3jjpVyRbdO)X*R)A22Bm zj+*}VLDU`cNZ(|eQ}y~fT>IfMssvf=^QM-Z?{$B5WNUlkIFzhL>Wfa!sg5mlOL0FV z_}|uKy}oS0fwC(c%!!Y5BV7z8cLpKQ=1k_LP)5a6Rh9JZ$bfq;akYrfPQ8$8joV3H zA(t|uf2()=##s+wKj(X%nSgFbhA48fM7GPj0O{t@HPj08cPeGw%gS*WG+6l^htSUX5iccs;{o~<7W8cTTt&`vnUtX zS);LLf&23#rWWv*C)$GypRLIWczmo&&q z6dXY-Y~YQ3AE4s6$@v$7@z%$YBf(B+L^sEuG;5@dNku+tBa`&+sYp-)3VPdKN2KSf zXE0JAvd#}*V<7V6xAwDNjg@N&k70?TDvElpbpxwc;BE^@ZQOWDJ_4*CSk z#JHhk0hpj14F3z0v(|^*5xWQEtCoL?&wdY)jq#v#W%VxZC?;mR>2H}_fv4%#_cXeK z<}Xpe`F-n$M)-DtwWv=ADEuM){rwUySXCDSLuv7KqRT$h8Evn=g-DxpR}_d>68#lv zo5k=>syPF@SKd3JhtyZ9oOYsH_u;Bjr1EoKgMH_1QK^ZJC%j=XZ-`gB87n~4e_JY# z{1uy-!`y~tco8m+O?XiN-zcgX?)J)?38=GZmG80Mo4;wJzhPZB^nwL~t`?QT8wD3h zMfI(#&S8{pd+Yz8(K!;B?4&vH0VU6?f63blgak@f*@Ntb_n*f@3)FbKg?%C?OOVyI z<8Z!fh{Xw?6E~EZ9@-3kFICkR+&W8X2yIW_&RL-#a+16(coG_W79zGxdPu)_Otqo1 z@ulS(>@+?CV;e}_@5_{(O|*Dx9VlX(>+koCB)}}ppl%j1w|xUIo&;z^dz~{we>!=5 zLvvkHIy`FXP}aMP+rUy)vSlLom3yt{k*{V`GZ^D?^J$t{F(|R-XMCGo z+1%&b!<<=#wKsjW#zuQCurCpmGHm>dojDJj)eQ1rP}h|Y+9QTFw?Qd>r@U?}>@+rF zS!e1I`Fykm6~=ES!1UE4k!CU1Sk5S#BOSjGF%_tQfq1e;@2F`VA=gQj1L`Nhi?HMT z<#CLFze@0q=vRuUi#*1Vy}D-^Y&d&@Ohkd~S`wvTbyQ`c^-_auktFH)McePuSR~SV zV7&N_V*~r9XxI}`qhx$03#F|}Wm~~Wfx0R+b#%~l>@8dnKGQBySu?D$+(>#mS%CIe znJ3|I#8LSkLHi|70^FxZdhW1TFv=CF(}7cYtOArTL8d-OEKS3+L09GwvPb_sT;TgH zJwp1%Q?KDwrNAY^mJC0V^%%)D7&@qQ|5uz~PuWei7FuN=8rw!JuUZO3O>xx68Y6@p zl-k=j z!_?+Y4CAA4n9o`eAC7HL^<%rUdn^&3Xk)=LnOb}xYTZ5ThicLA~=(Ka6P z>{!S(@*h0#UzYO(;!`idsM*pW&I>mk)R|C}qX3Nm(!kY5WA6Tk1Dy4LmFZw*Vf}vw z*kZMQ2H1v?e+JklRbVt3i6_wm(K}>XjYsatTg5W;%GB(Hi7L(DK+u3?+;v|GdX8{i z+AYF4YnQ2Y_SxnZ{t0XqmU{C0JRZ4F>nE_9sF|lZswExCNPST2s;3#ROsh9@vQjUT z+@pmVg}(+JRM5`NI4i5tO#F6C&R2^r*zGHsk)+I4-pyJJ!^mRDr4~E3)uLhM;Kwc+ zjpP)G#`}3ab;;u9qc5G1(TOr2K;&rt%a*0CFi9>jsH>LM^kdf29^>r%qzId3(u=dE zevh}%hS9Mcc**|g-)$iFM=Z2?y^>l?9v{V%GQO_9TQuUBH48`%tW5nx$pR}p)y|{< zI*)X>vB_ifwX_pc{VZ-Ui|Zzl=$b*!@F9+7mS#0^(`y*uRuQXqv%T722uN6#Fxe@( zL~(U2SRd1{Q7z!ETM!8mcrhFVIo6IGD<*5%XAP(u^KxWryU@lgYFVf>01|}2xncft zaPF+F{|5*IF1(pxgolI$3O4DEX&;(ysIFGuI7pC6TTI(w8itG+`J!+z3!aOPjB&CQ zVX`@X2ihqg=-O1FL7ziZHItUcE(*=eJqpiErqMau>P>)5SUq4I^m_+tM5vtu}ku3Trk0%+l$dL+| zU=+}d;<5P2aaza}6eF#-??TML!GI`N?`M6P_nN_gM$3e*8L3lq-#$hSrjA1dXIA%K z&Ird+Y6>);L|6$qQ@NPN(z+EEro)McphNCkA}a$C?l~CBB_nvk1gheNga{gXq_K*65h&{kd-L) zL;3^K#Oeov@C~K>PX4f*GU9~#(DK(q9)6@BoAJf00Pqy5m^skT79|f+GhRb>syMlR zcTlD)RtkmWnK;l^oWhiUKLSze_#LkRqLYwcQzRmn*=LTlT}01W{|pf_#XL9V3I>; z5{nK4D_W~DUc=07Bg0pMEL~nBS~L(4vhWj20ENd8=)YD}N@2(U(CciOlEMmhD!xoo7f>#KjYX`W!qQ#OQuB+v2lR?>}KH*3dhxq`$T> za9H8TFQe@s%KZRi^+O1BThcxoK!XfKkS5+aRZ29C7qQ5-nuGXkr9I-9dHW}D@5#A8C^GnA09#(W7NWr8&^7-guaX${-YJ(7s60K7}$nGZU zuo7&iI@?;KVFgF!5_~&U%m)%j1e*SjUzT`d)nGxrBX_V_IiwUcgL5ChFMvM1fN{q8bafN zaMC7`fm2A{LHvqa?>Q*J7DX)a@0s=^-#NV#4WlU(-jFC*HA!kb29sHd%3^7gQU z6`(a>z3)ovh*_Ikx6wUO-?nh1yKT{3mx~g4X6=WbdA54U(t?V#P+d{Rfj{k(xP58y zStE)46x954uklbTd7ht73!V0y$M*K5tJC_!B)IfCD`tIcY-i=6IHn65G+GWM^Y;X= z_Y|OG*(Lf!n@^*q$R=MP81f5KV$WdA7jIiei1%~kx*dYNQuP$9bWQYI=A4bMdB(D> zZCV%fHhZPRzT?`&#l-S?$N+cRT}~&NJsG?`%31m5>0&{WXs$?DuD^)Z=!WBfb!)%P zxOA_%LGU4MTsBhhZ}`03Y~R)sJD0vG-8gfGfzf}f8fY#Ks4A86 z+RN(a6`?on-aj)xSG~@;%jBcVg)lq<5D59A`x`IHJ?Z$)d1)3x#DBU zVlAG5BeqC=h@G+KKLxIz)rscyPc}}-JjM9-ot_px1KTh9jYR43aCWkKD4cFbPNsXm zuFj9TcOE{n3E^`pr*{^>5x%ivb_)A2M9}95%kQJPiVu z3#rR*DQ-N6NX|F_nb&L~(3xhm#g=QZ?oJkIe2dKy49pekMt6O#6V_}TZoj4Drx?)C z3jFeQ6Ylmsqpo&k-nF4DC?!I>tZYH0ln46E6gL(HFEMqc;|4$LHwo zU}%oDYmK%?!%(og`iN@)7xeQ|{iPP2XRoL0<%H@kPl7OdM!Wm~%MkD8YjRMq(36Js zinr(^5MHC7cv@FGWY|`jE7+blKlr>*)p_O_$dWy`+US}40O~~QX=828ve>^0b+N)tYzUYKG?hCv8 zis2lR<`qjAmg>d&=>5d$^ae_J28t2)%5X5tso{v5xY)IKB{SV!mIdvFQ1^1{Q1kMr zV8#TFUr3O+M{|(PiP*n!w(`cLV>s`?`viWd(T^a-^Xl;)?6pOZ{7~39a=0g~=>W4*G~4~wcYuF*w>(iG<|enAuB?P<4| zY$pvXL8!4o{Q$}7s{(W`U)OVP$y>QR@;2CtcEmc=doNhto zAx*HPkTFDrNxR%eAV^C{$w;I(Qxbdm)z^DkRn%9HJJi+Nia3ZUL3=+oM`F{xOdqaT zt{;`y&zQB?krEC1+xGB|$dxJ^T*uIc0PxsGYr@wqX6j_%iJ zawcU*`XA9Ikly$E{wFAp1ioqA`KO?!dBAMnQ#O0gQ*yK)Yu#(2pp04UumTBIY(bWs zR^fgQUFrqKyx^AZCg{FSmdM~b#zgcpOB1H%SrsSr3~Fv7DTIQa;KnshP#)l${$0`y#|XLps&# zL^BM383f&}3w}%R1}`JlL5uhNt~^7z7E$6!$Kc|jj9BJc_Xs}CAkNWTRfT_eicn8sh^ zu)nxW9S^E&hzyT`r>ot=6R`M%e%wDHeI*Z@a*z59w9mM#jh6H8kCa4x*IJ9Q| z8iRUo5RUGmGg~@3pDpx*?wL`a23DVEsge`ObDrTY2tTgxyiEzQR>6-!=vZP9H zo{o*Omn#eF2s&*GYPN-J0VHQ*woDX@I)YFRNsNMhbXnvSTFrKowBvMEUxj@MZ07fSL?vqn|#$r>eNK0gIIymD5Y0uMERFX|;o* z7n-~?hg3%l`_heGurg5B;BTvJWZ-w+z8ZIE{k}P^b4mY9D`> zZ|3Vh`N`zKzgjAzUmWe#zrMGKYt%AHhjv5lQ-_Un+^42XnM}jWbSwN#7Nf~*eX6xj z*%PFCZAs1oUXNwKm}&SsX#Uj`W?M|W$4cTeZ}ZAC1p26<8RzokkkDDRc)YG3oxAJ>r*mj^saG zf2g$n0cyqZ-6|$5g(INpwirUyLw8Njp1#^+cF;MaAMT0cG^J~7t%gs0U+-SaIY93^ zG1l8OY3_@Jrh5)XC2tA8y4!_uf`)3N)vF+vtESuxaRN~R5;6`^U0i@Rr~ggz1N!TR z1vxBowhTi?4#maf$gK*VyIC9lLP$}zk(Y-YO}Z<&)^4l=-nofL7{ejbF1xZ?4<9q&hiZEW18_dP6XZP znz)7SkLZ+Nj^02{Va7*&&r4Hc`oYei(G@u+zP#na1m?{Sln29+F^isEC_|7PsSgYp z4^>R8G-$z|cTQxNEY)=ig}@KU3xci}rJ)HKKf=}*bB@W_tdW~))0EBRBMd+7xQ?%5 z7I75zwV&rz+VeqZ7P|z|^q$Tszs5BAOQ}Vl%eCpLXd7ESbwW_1?Hc^>B|H>4dc?0SM_|6I1Q2!ih)~&u~%~_*PsOB21 zyk`GdzqakkKIDZ;i!H9~3>+gbiF%q!MEU^yR_t-;J*GzkgpIVLHx*oQ6AtN_<%UCi zd&rm>0YjRSWn=?>l;QiQrB0$OOI7(Ehf|cS?2%D0Lt4o z7(U{5@({g5f)WYI;v`BfKbSB>Q<00RL-421$vKGPmI)>1=qHV0^Zv=9d-0z?jRrl* zOJ!2#*G^6{f)xqgg|w?P45pXw%F`j9aAZZUj-pP$jND<^+sU&U_~JgG_<$*&HlwQR z)isu4TlDe4JWD(!3UmTh+1&ASQd&nxE_3Sjv-xIGhE$e)PjpoBX>Gn?l3}7M2it(% z=;xZ53$s5uC2mp{_5~2+e8O>1<4aaxbx$2e?{5VYGDB?F)W>WU4uG`Xd(-AUg=9fH z4l&iIhsBZCkJ*mtBMjQ9R0v;#9BUw_L8!Lu;p~i*^Nqa;TcZwX33VPq0Qbbc>eo z^gMevtp&(*Hi=!EVDY-*l^F>qC1MZ)m!peIBA9cC_R3Dq z>>BAg1X|mU#F&K{Tu&^gQ$M>G!3yTH2A2^WT+tBO?EKUT?RA}tF}3Dlsf)2gWgg#c z)T-4phCj^Qps+TO-IvP|?yNo(3-rK1t0Y04G4c|{;u^b|ws>>3D~(k!J-G%n5j=^T zi9X3KbHX5fN*{Juh}oXwM-b5!912nG&ha>+`6v~3zi88rfg!*hFeyF>*c{sG7+kJ}7es-NnOCBl$&bFO7s)X$pJLj1Q zG9`iI2DUC@=FigLv?+<#v7U)E(oK<^Ex%6N?QPGUsl_;%S}Nu|YZprJR}Hl-#vndo4njgKzJ)sv`q zM~V@!`ua7?gfo0Fx|4|QS$f5lvHY%BU;W9Vv_siN(8@{@Q;Y5_@*5O<(_K9WuvWId zo3yMJt|S9}$UP5q^(@C8UsaH+Ozc?mxek=&Yr%b|zkxg$ztB!N)MMihec)AtCpMPC z_0K>AHLA|bL=d|trP?V)3SDyhy@eduu5}O5CL*il9O*y+E|~%#hm3yh+zDjadnGtu z)&zPkVQ^$eZl`M2t;4U^P~z8Dx1@M{Mr|ujx8mUua~!mh)F}^A1yBJA)2ekM)X$vjr-X4^((+316>S*uJfj+Fj(DQ^RGVFoz{-Te;%c zBp6CkwI+0#`xpo9SZ@7P&6avIDqAe(-SieS$k?+|<9wBwn$M0L#MtncgOySxTe+gJn+paX*+MaDxcwPg% zK8`;9HVIv_!^%%4aYGkoK^J2>0YoEg=pSZP6KwmST4K^F1dxv=dxsct_lZJUawoR4BF)2-KhN>f-hcmr4QJx2;TaIx2m~uFHutfJgaq=f;8H*l-uG zS8u7JtW+qINOLw0;oEdad_eYj#`ZFkldFaI3g;RLp|lLVUXwZ`d(M6R#7&lr9;(yvneiwv~)iLVjU#E^KO5q)6W4D_wqRh1kse7U3`RvDTUymutTN;#(0{cSi?+HqO!Q*y$# zzphhGN#u5cQ*o|Ya69A$d@!PI{t$1*=plo-fwSt)|GIUMvGFRF6R5Fb8DQn;#0qRw za696DzmqRV$3;=d&gi)IJvmMEDzg$C>GC2r;lDvuaZ90$~{c(*|lGZ08^JH;px9CeKZbOvtO1t~%tV z#nu`fbEs2+E)LQNqHKs!!RT$K23EZ4^|c^ND5NxzGsf4=OS$>+V>M#L zF3+@oVB-Su$gcTnTRA+Y$&MF*#N$pF1CFAUprJ>PAc9-i_dVii~E9*-poNrYZ364vI zzD5*LcUkUKz1wRkY$+@k(|y5L-7CPz*gb`-@Ks3uc%r12|KJ#&rR6FK_HEmEthwN4 z(NR+KX1sWxQ<&NV_ejFvY}u!Zub7tj9?y6IpT%~Z4R`p2B~omjL2uGXp!~d6q>9z& zf-xjoN0Ys4MPOlL({l(3`f_l9Y& z?mvY^r;v%rz@xmPQ?Gd%3w1lQ#xI4n+OeRvP1Hk{=P;4^Y@b1wln6I_QkGq^W_2BH z*l(nF4}>NawSv%>y>#QQH{$#Fa*3O1r5AXE&xWswTKxK)TlqlL0nsNty=hu*1j6G_ z(Zu~a4k8a#*`uXFiL2+#b5hdJ?}oYWV;IHhHRyHarIoF>>Nk`wzs7A{Ahq>rNfG%m z0W!pNQn0Y6wibR3vN{g*yG`eP>5+C@F?SO2&#P`dW%13@N}bb!Zh=qp&!J#cA-t*I zR0rKvqfm!hL|a0J&1=hvNn|Pcp1@#JY;n2uXcUpxbE-mLtgQm^IIVZTAuS5H=AmlX zfs98hf)+N1!!=?OrA`gIf3gmELti)@T~BD$_BDIxJ*hS9Ev0m^adpBEZOOHsITS}^UU5h$Zpsq&`P$Tb~*k-&JG#h<1D8}G`$9}xw1U) z*ucPYcg&>Y({a_%-;5Zau0iFgtska`sm?iMdmz&$;C_!FKO*@(gPH@Z*{Uny%R!C` zVDSJ|?6A3Fy@6+RH|9j`J^#urK0-1*HD`#5W8{w?=)NQ~TV?Io*geH0o+{<2$u zMk61AcB7qfHvBvz*^o@fL{$dLv@gta9VB6Km5298oMe(Q99xrAm0vlA7jQ8`f4^c;;)p*uT$ zG(*63Ox~p3YFYZpFK>RfW0mpp?VaaD(UG zfZwOdKX@#2gF|DK!@dyIUEOt8*L=lo~P&+mXo4kpfjJbC}WVAB)7 zv+0R{vFVBbJ2pM>|0A28m7bZN>77ph{sw`KnVyA%nS+`07oDE*9W2kpz{E+%#P*9$ z&&c{aefsaH@L!becTV>I3!na%#NRmge_(L`nNk0Hzwg*_8%OT{GaUX8ux_3|lF#`B zjT~$Z4V(x?%pIM~t*ihJzrg48jD(cx<~GK*u8veZzliDoAf^8<=dUo~zm4htcIfX6 zdR}5PCnq~@28Ms(+PfLpy>smu8UCi(znw47KZy0V4l1^`R(xU(2G#)CUubngruR|P zGrjZ4e}~%ty~#hhtiK&CYin$7;{Ja!pOwU#=_0QMf3iJk@0Ur z{?PKj$Pjh_{6ePuSq3WyHyfMQAI9*vM*rn-S%8y)v4NApUkLc~ApY3)?{favbZ>0* z57pZ_J6O@%I+(t*;2GZO^VR?xC&zd7GySXj-$C&vwhq%t+Izky{LNQwhR|5xQa~o4aWdmzFD*&P3KgUX_>iBLd z;?CyAfWK+c%KUeW{=0=S{Bq%c9o%mx=Vb0=1^7c-p7(Pz`eg^U4u2?p7xJ3}CEzEk z`8Os1QP;mw&Y!FPqpm;elYdk4A9ej3<@~wo->D0lhu_W4 zz{nEdLhw(Y1tICXMB=i>^=?ys4WTJF%`-HyJs zGYoO`mG7Jz>9-XKa)t)NF!ju*D5M&pO5#AhR#loI=zX6Hgm`@=&r+_HLvV{r6xnY`TBx> zUQ@1LOwf1nM!O`jHszLbVxb zWICix1QW~#%7yzLcDnRaIffWZnG+=2MLNnJ&G%6Mn8+7w$I<>zNS~qWG>pWlI74!kp3q;YO zhaghBe})!R^TdASD#DwQcbBfOu5TnNF;!p3L~Dd+xvbd3v*Zv$KwPV* z!N>%R^)AM{y}7O|=-9{`3ETE?9?fN}0V6_ta}kT zEUMT%8-5u^2`<9C8RKq$LStR@RWPE8NqnjXBh;KP_fuTu@q`2n z>99G)<=j2{Pn>(g!Lt>2pCtQexel@SM))6R4;YL@o6@0kjPXvws*HeN^{#z<3PwZl zU4W?{N^oj|5!u-5nQdI~tOw&F2vcLO!UB{HTjoA_-4d(gA}9$ff(BY3%)WWwf{cPz ze*_EJ_u==PbDhC_aHI9X8h?PirlUn2a~+Ep?FS2ljhN(jY^Q8icd_!IXxJ9v`^tk; z@AQL^W-kW_6bLPvH103$`q%B^*RA2-(tp`G{>83UR0QKmi6A=`X`ThJ0ze+4yoJTB zj`I#`-ny;GlV|ITiP8LJa&63=ofw)`t(9y9t|TcJP}(GWP4=8G2-_}l)Aqb86i*cw zXM&VXg)WA?+s^f4@Vv#=hu)@CV?V-+lr>YHY-OYgzex2>!&(zf&@7#ckF$YG1PLC;kBZ>?LNE0pO&qmDTy?D(5JrQm-wQ3DM)<-nK+@3 z7P&=9mZh^k?PU|kM@f;tozkD^Rt#J{`?+}2ERmHqc<;A8ZvswB z4QK*tfZ(8t1jQhB`Ru-07!qd%DyZyO@N}aRn+Pa0B#3%t2rr6CACy3+6c7}n`3tBn zhS{LtXOzctMGCrRyjn+tty+gIH;%I10Ibjz=>U(?=G*pLALq)T9kx!#fNMidQabSr2_*2rZ}BGh?S83Ho<^QQzsz|c^E%Ue zu7`|q;&C*)6wDL*s|#05%*T2dNZ=s)^Ts#?&eFsk|FNqV@r7UJGa^B^=M+Frm3G<?ZF!fHPQ&>O@^`oE=0qD z)J8!oy3BLPtWg~tV@r%BwQEK{t@bE{u%)WP2pnNQB@zW!_9S@D)}I(V{{#=15~&a% zmt6T$UVZEsH>Dq22Vs~uT*y1c`$MTf2|Kq;B{PW*L}=~;IO6-k38Wd?-ittJBd=?& z=Y9^*;Z?X@z)rc)BnJbRk$u946*WP*_b6$i+620(?@y%z(PcKrBSt!iK^WMo_WR?W zGe@3A!fTnw{BEkkAG$y&Aq$)(1H#~R)_*7)e)PU|o>FXp^F$Oay!8*8S-gy85E^O? z(TV>!ScNQ@+zzRsFq8`phz>pbnIEx{dLPP>%5Cf|tb)B#Re>rTDX3RWI0fm4qsj&$ zJ{h}&MiE9yd@{V}_qv8wbvJ_z<4GGL+Iyu+B<|a)-TQnU#e%F*JB7}cC8dLFN2(kX zB(88qK&e$_1GT4_Tt(N4VEX+4_KLfdqr*~Thg?&XlxmG@@7}(vkb+`*ihT|z;|3Dy zW9foC8M$OP;0v|9T7$`XvW7qPtYJosb)Lzmt;qVfH@=QnT_Lm8uRzbVdAHhrz(D1V zBshO*^8cfR=-PMLW;8+oUkT}lp!>uas#M#>!PY|{D-lRWMe$fNFz7WA^CEHDk!|1qT zzN;Y;*G~W}e8t=Joz2DB{)0HaHmpgLSbI!IXOdh^0hqhs^EMEux+kIo9@`Dd+S-KS zw7}6UM(e6f(X?~jaIGT$qu_vzpj1J*c0!k&3+q=Wwv$;F7XeS9M1qE?j`h<17_){u z2;2Tu$BCoUCuPz$)g)rSYH9iQR;YfWyj?i`Y}GP~X+BEL8<@hxin$g=>Y-MiX(;ZG z(-Py$+>_zs-*G-Bt{T7fu!u7Y!0P)mgkbE3u8dlynqt*17T(dKhU03jh4E>15rk2D zPap&hm4z{QNSM5uCPVk6xgTDFj>adSco1!1(DK-==OEZJuG!tf5wRX;>CmAFZ(@0;tf_NE!GC7g;=QC8J4@!=n6#{V9S%8nCjhgPk zsXPQ7VM&wFNcMOW&`fK~+P{*p^ppYOz4MK^=_RbY<&JTkO%n;YRZSX65OfUX#3Hau zIxWrwBiY)TPX)UwDz0=>yRr&HPgKw6SueOrw>yq<=(|GBB=ng z=Y*^9jw?1IJ9Lr9#^L7ea3sJ8Y-uzOgc5bY zq}~>?F$ta%^iI@R-RSamR|(Zzpi@r0LNNlBeOUvi@|8cQbNgR#FWr7ts!wW{n z6h!tl#EdZj?h9QGyy|X=_<_HwAyCdSS1ggd@`zopxv_;AiY>Mw>lv1Pw0NWCZKQPl z8oIBy{kRNN6~q)+GiWozMvjoi`+fCCYelEWO7C~3Te7EYA06-F?~LsLeS8+C2>Ubk zia5UFOTy2axc+DbVY$8;mw^&8(@D3^FHC*r`IWF2TcB$-S1c&+c7sqmSM%9X_ABS1Z!L zWNppgA;Wos999f4a18MljdrQvnu=71f=Fc& zv!CLmAPJX{ye%OMya~uV1l)tXfP&xo%e11qEDgX&33%daaegJ7C+O80%DM~}Yp(2I zb@e={O2E=u{*mNHufnlZ&>!fRMO$i~>nTPumjLdOxYc3UuDg8?bnN};F@+?}cM#}r z<~bp7$HRwV+*z*FJ9<&xJJDR_t~)@7ZLFJ;8AZ+;)GybR3%H%zE-J%mzZ3xvSrRRv z=G)Fp@Y+H6_yb+#&7d>{mJ*Z_tiYK{^n>2|sbgIZAH-f@Oo6vo7`f6Lq0NzlTv=nw zqu&h0Ri>dDSpeNn=m89eL|RT`9*=JNw)<#|x5JPbze-z?FIy)gQqk4pd#GS~T0neD z*O*%AyfMocfEChoiJvGiQ^oyk4D}Zu-H3Jd;(5rRS(m0yPf!NNAyg_UZn23eiO-^w z#I0|{v2~t4kRJ~1mkYm9qdUWgvyUdSH)aZ(-~qzKJ^c}@%XwmOxVI{K_;VlO@oFO^ zE0PG4fmb@Ngw)NMO?@u0c{xgEew6$2 z6eHm@Q0)xG;FMu1uXtE0eU?#AXD`KJfncURRguI#kdVn%`y<$Qw(9&z6vK_rl^c}M zYpUGzpuR!sBOzvPJ2fo3q=mMWG?fu@rKFm1HXER{B#hP(yH+cXV2k$wiL)LHtXVP2 z2t(AEbZ;l5CU^hJOd@Zh##H#8x({`~+!}yCbk?WLEDIvmyDFHHVB?X(cR455Ga;i$ z@e!7gX#I}o4C6DtOrph%V$-ZNL(OU|(npeeefmaMWL^?RM7cus85}$!cL!D!+(5c! z2!=Uh#W#01fQMripx|ylqad(b~bLV@_NF<`lS>E+t0GiY@x2a*<+)F_s;z z7hd4a9F6C=Cf>B2t8HE;2#^qX&SO-uP_ zdj)x>>yJNib2L`fp|VAuq>)}Dmyf15A~*{ zFK$4hZg}P_(icX3bps9xKKdvFx(U{Q%cL9jDpUQDi73=QB|FMtpU=p7CkFHGYKQXI zOn-)z3hXaE;SUdC|9ekh=ivI|(;8!yQJYmp#EvVP+e6f;5NRbZ+*wk(M-XfkQ3O$h z5Rzhg+?Jf%w(4D+E+6Q>g~xLm0s`O6To*kx zvB$;ErWRHBb1VS#=wg^;;X2eP`tS7sE)8k}OUL-lgLLX!=cS>5hfzNgC#w#cn=E&z zWr}WbT+3m&$L}cDhIjx-)3OMwc&ZYySIAf=H0(;f&4RWA$N6|W0!K|&KRVp~85OK1 zqGpxaJ_QU$SCWY z3i0wkbM|x#*5Az0&flD%kFNE0sae&urn(|dFEU}TOn3}v!IYRN)IF4wpFQfV>acYu zf>rI*j3bB3W~QM;4v%qfq&<(8rpQV}pItP-%zZ_U%K@cwKD24em^rXnX?9wCHE*By zSib%EShJ)knuhkl5sFTjE9<<6!?oty>!Y}^ty)I+?D$T;Z$889UYq5SN#`kg{gF-W z50)P9g6yQJfVmRrj>wcNYyuWlqCna5uFib?*&t&=WcOilJ0CtO2ziR8brdX@*SXnv ztWdVBFHwynOpphCj|%xyX`p%|)O$zg=0Zix#lwLNT6j7fZS? zX6vRH_dVeE3^F@aQmj*Lm|VDO;8K{hmag^dwA{=kpH;a%j_T+RynX!jG5OoSU~_QZ zV#Z|Wt*Y{iTfW#Zorpagb-zEuVZNeU`zvc_Vf^P*=Y5|Fz`^GIqR;TY6~^2Mpd>Ez z{u1H8yB9FCad9xn5wiT#4b0XvlU-F*7STf|J6%2U#u;a@JQC;`{jTW=@xT-@O+H|b z0QJGjT7{Jeh#(;Wi+;}a11`vus0cG8qAbxzkU&&0WQ-^&F&0~p@^W`S+uOSv>fBf{ zJ#)NU_RMLd2O^axpl1M21|dwN*o5%loK+7NpQ?QU`rzgVTm;0Zc~sgZc-kXC%D{o) z(e@4MIdUBqSmL=0aCjtN9QYf8R=uTe|;&SZ)NhFye%TxM+|x0!u7B zjX_GG-@!0Od-qF@H~O;T*s(FbcK>#!^7611(_NUGXUJy~!+!C8aI8;qMPas)8Z45j zV`vxJ_EyZHY4AW1q)jlK6>dlwVX6HH@J>h<4nTH=EexFdFz@-Az_x{33kyxITJ0}D z+UTWYZ7i(Eb3GlH6F5Qq^Y{~zzU|f@(NhhV%ILBKMVh|SV%b|Co0 zD3-jn-E)wgo)C9J-+yNbTeu57seV@a0i-_`VNw4RL3_*xh`R}d(De-=W1k3!RMgwD z`(nV|Y}-8?15(L4Dw>6bCAtMSIj$wQ`=>AWGo1GYFUf^VcS`8#NH_pG0nEvMO@m63 zN{Ek_H)(i#3hs@k|Bds!`bq=)Tln5N7muPyiP=a?2d84YyH%B6MsI1IBG!d{q+&y7eLU%Acp-A zm<8#VV4($&mcVZVq!d7Z2$GlqQwTtC!qNMUZoY?3pZY=QgGB`%=zr({zCr{uAYvBr z-z8F&fMNC{m!y^fF}w@knrMluF=r)Z7yihE9dRtq3DAl)MCNnWBw_w zuD4$^CYr_sEi|3mqM(sTm*_D3#${pfJEcXVlU8GhNm z_i>kiDi`${4Fr`L4W9awQmR@Z^$K+zDh!%D<+^%6(sdF@(gGEp+7IP)RW-$11v%CA zYA;21t>B{VlK5PuA_p~qR8qc0^@u8+GH>x?6uq=-KE*W6A@4O78yTBCTb@>>R`*Ir zTZ~7TN47`#BMDr<2VoMm0+|ABBQa&wS#=imdd;gYblu22wCYCra&=2NZSjvXo*6Eo zw}mm8F)4B}a`h9!I(1cnHt9B5r`Eht_`(c|XMXFl-WnT1{JPyzZy~@|`sQERIQ1E8 zP&?;5sBF#GW(ulcSSMlHZUc$N*&2 zQ}@$c(q1i!YzLXkncJ=9*j+g`%+}3c9nyN3C+Y%6Pqh?iz37@WowU6g8tQhnn>6G! z&9y|eRkh(*N??7(n06xAR#uih3U17Qy0Ry;gJhtvcQ0YDkgu>W%h4}tPH5_OdyUDh z#h9yGzUNqIj&0&M=HJfU|LRoY=(b(5_07e~F4JztQOb^c|GIa%Kke-MKF@*SPTl%m z2hH}6Ev>_qGx8nuZMCD@C7m_iiOyy3!MQkj_2EZ-pj=c`KvcRAe2o41+;V4 zDON^~8jdhzRJ1wNYw{(o7SemcFq<&O!jCGK&%h%M-ba?U2ku(vx8pKRx7t+fu(H6)nc~Qu)2BNsJqGRAAZ6aR3UK=ei znHqH%mKm97&{ud@99W6FV0lIu4N>jM^sfvg56TTL51fQ&M7V}A#OCa`+0nEzpzxqr zAnzl;Ajgu@lLnJDlJCS9B*Jla&IQcHv1T-MnzMM>`UrC;c9C}~ z3!uHo+(0=8YX=u#-D0U?*|G?;$eT!-UerC--PF4p%kQ=vcy+=z#xz!aRHt)wJU#R% zzG=kCg=4~LbB!lw+10wC7RcouL>$a|N zS`~2v-K|<5RV&pNUA213S=S0adO!qpc_}RlDx&+F84}Vi?YgIX&i(_3lbL((?|0`n zzqxa{$$UQ{did?iwj1peuD0*%v6XXFO!MW*b)oI#-TjsOYw~t=EtY4#zWBh#eX&RA z(n@y;d*bHfjGoy^r&DUnnLSHCdi}%N_|Tj;@|Se%X!zp22~9;MMcr$Uti4^G`}HR^ zRL#2qrzckF9vuujc)WYr>6#7xe*Q-PfQ=o6>nHlpx_Z%e>GI{$uao}qV(_nu-G#=< z#~y5G4Srv~Mt?(F?->ozgN>& z>QiM+L-wK(Is^FynKhrGD>NKcaFW+YVQ2CLZuk1o&K()pT}Se9eX9L5A4L>pwpI!IM8ezPR?=NA5;< z)0hv(Y}jJi;&y-Ic7~<*+8?=EIv+IuZu{O@kb~W8pH#z^zikhCVw%$`KZw_r+`}W4%fMa2f%_nkchE<&;XMW_enP{8UkkvDOI7UjQyIf4M@2ohk9pdW)df_^5Q6BCC8Kk%Arws6hZ=UAd0wuCdl!hNeBtv0L}4FtF8k@rIOe9 zX++vS9#19m_pt{x|G?ou0}uf!B!&Y)hx7vyp@Bj^1(re)oTSi)s5BuOVINouuXR5( z@if*R?UcYsJca%Dl;oj-q72=PlNL2WkaA3I1{u0agUL-6lS!^2EoPlw8LZIZ8c%fK z->(?&^H-}dT(42;bBUfXlU^z}$v~q$_Yqgjew~z`<^!HK`p@BcZBE)9Mn-`x6 z4K#o>iYIDYy`1hM#Ia71{>B9986mElf8mPm3OAQ0># z2bV?S3`;Ls;F5Vw7EuCBkx4pCBqWH)1lbrpgUz<)N^*D)6d{#7A$%W$klK70CE>0S z;feo`nBH^u4z&F#rQbmI|MR-R00uKKn1R6z3}#?31A`g(fw(>>M_Azd`3>|wLsZiD ziMS#RucCRRJsaM6Bw64E{+j@bNB||H(FspHVX*_du&#ndj;8{Y_@eSYEDS}`P7c~d zf0b6y$t0jOJi-Jff^;Au41VCFbJ9+*=@~u}da(mf0F=c-G9(a*Y`Rj7ObpWlTVyB^FWMOa- zC_q-8vWwl?NekCsVkoPPLk`NxvnY#XI2+BguvnDBfbLB=OL_~Us~8}(NSy)gDj;EYl4U7mmrQ9Au~JK} zxRGI61v2ss4LfRN<{9A+ZFOcpNxu<^{q7d~`zNavo=dr}waH!|fL7 z2z`~T4AU!-K^z#!ARW!%GuRB`P(lXpsT;%rW-y{~&*6=dvr%$3`ZycCUQv=wlw=d- zV-w}IF>*E(BONYQGLMzaV|~nH`wYBZ4q&&bEV~QEN*6a-a!r<8lYLy1y*{Z@W0H+# zoCwCh1IDPy&LUj$#ieH4#}_baxT6s6?E)9mfWz(SNdsI{xVaE1Ig>ulaGClBBqiBU zk`3i!LwRjTNP|;494(pCk~!^TPWKsjy$C8BkaI*!N#G?%UUKAp9C@#YOKOCB6q4Zb zRA(R&_3=z_J`G48%IjG2(RhZ%+!NBGPAJ zst8R)xKX2voTE2lIxIYLj!p-2CZ^YFRT?!GHcO>9s5D9~7Cc9z&}!92eXv3iuF}De zK4K0wCn78ZML8`r;7Q`#>d~U8F%?RnX%6azC-w;_k;;W>P^XtpYGDH0jVbAvOeMo$ z3T8Q=>mB}nw%WG$X3wiMj6GBL{%pBCuqbWmtA6Q$_OchY*oKvS_RkUiqb`h$I25sN zn0&Y)I{o?h>qYA5gVw~Iz3I;ln-Vzv-mQDJGsBI$rj-41>xj(bcb0eGo>Vb&*fTMk zMxQF%l5^Nq zzO^vE=Ooqmusqvdb~rEb?CL4)Njqx;R))uXdMI&22vHF@xB8&j{aPhTN_I{f+QP<; zzM}d3SopB?KjjRGdAal9$x&H(^Dcc-xHaAI`GI#zG@DHUIhoaIBX^%`t=l~exhuvkc}D+b{-pu_W&^X&a4Q(n;_s(}58br>e$%EI JV +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "main.h" + +#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1 + +#define L2FWD_MAX_PORTS 32 + +#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +#define NB_MBUF 8192 + +/* + * RX and TX Prefetch, Host, and Write-back threshold values should be + * carefully set for optimal performance. Consult the network + * controller's datasheet and supporting DPDK documentation for guidance + * on how these parameters should be set. + */ +#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */ +#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */ +#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */ + +/* + * These default values are optimized for use with the Intel(R) 82599 10 GbE + * Controller and the DPDK ixgbe PMD. Consider using other values for other + * network controllers and/or network drivers. + */ +#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */ +#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */ +#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */ + +#define MAX_PKT_BURST 32 +#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */ + +#define SOCKET0 0 + +/* + * Configurable number of RX/TX ring descriptors + */ +#define RTE_TEST_RX_DESC_DEFAULT 128 +#define RTE_TEST_TX_DESC_DEFAULT 512 +static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; +static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; + +/* ethernet addresses of ports */ +static struct ether_addr l2fwd_ports_eth_addr[L2FWD_MAX_PORTS]; + +/* mask of enabled ports */ +static uint32_t l2fwd_enabled_port_mask = 0; + +/* list of enabled ports */ +static uint32_t l2fwd_dst_ports[L2FWD_MAX_PORTS]; + +static unsigned int l2fwd_rx_queue_per_lcore = 1; + +#define MAX_PKT_BURST 32 +struct mbuf_table { + unsigned len; + struct rte_mbuf *m_table[MAX_PKT_BURST]; +}; + +#define MAX_RX_QUEUE_PER_LCORE 16 +#define MAX_TX_QUEUE_PER_PORT 16 +struct lcore_queue_conf { + unsigned n_rx_queue; + unsigned rx_queue_list[MAX_RX_QUEUE_PER_LCORE]; + unsigned tx_queue_id; + struct mbuf_table tx_mbufs[L2FWD_MAX_PORTS]; + +} __rte_cache_aligned; +struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; + +static const struct rte_eth_conf port_conf = { + .rxmode = { + .split_hdr_size = 0, + .header_split = 0, /**< Header Split disabled */ + .hw_ip_checksum = 0, /**< IP checksum offload disabled */ + .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ + .hw_strip_crc = 0, /**< CRC stripped by hardware */ + }, + .txmode = { + }, +}; + +static const struct rte_eth_rxconf rx_conf = { + .rx_thresh = { + .pthresh = RX_PTHRESH, + .hthresh = RX_HTHRESH, + .wthresh = RX_WTHRESH, + }, +}; + +static const struct rte_eth_txconf tx_conf = { + .tx_thresh = { + .pthresh = TX_PTHRESH, + .hthresh = TX_HTHRESH, + .wthresh = TX_WTHRESH, + }, + .tx_free_thresh = 0, /* Use PMD default values */ + .tx_rs_thresh = 0, /* Use PMD default values */ +}; + +struct rte_mempool * l2fwd_pktmbuf_pool = NULL; + +/* Per-port statistics struct */ +struct l2fwd_port_statistics { + uint64_t tx; + uint64_t rx; + uint64_t dropped; +} __rte_cache_aligned; +struct l2fwd_port_statistics port_statistics[L2FWD_MAX_PORTS]; + +/* A tsc-based timer responsible for triggering statistics printout */ +#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */ +#define MAX_TIMER_PERIOD 86400 /* 1 day max */ +static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */ + +/* Print out statistics on packets dropped */ +static void +print_stats(void) +{ + uint64_t total_packets_dropped, total_packets_tx, total_packets_rx; + unsigned portid; + + total_packets_dropped = 0; + total_packets_tx = 0; + total_packets_rx = 0; + + const char clr[] = { 27, '[', '2', 'J', '\0' }; + const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' }; + + /* Clear screen and move to top left */ + printf("%s%s", clr, topLeft); + + printf("\nPort statistics ===================================="); + + for (portid = 0; portid < L2FWD_MAX_PORTS; portid++) { + /* skip disabled ports */ + if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) + continue; + printf("\nStatistics for port %u ------------------------------" + "\nPackets sent: %24"PRIu64 + "\nPackets received: %20"PRIu64 + "\nPackets dropped: %21"PRIu64, + portid, + port_statistics[portid].tx, + port_statistics[portid].rx, + port_statistics[portid].dropped); + + total_packets_dropped += port_statistics[portid].dropped; + total_packets_tx += port_statistics[portid].tx; + total_packets_rx += port_statistics[portid].rx; + } + printf("\nAggregate statistics ===============================" + "\nTotal packets sent: %18"PRIu64 + "\nTotal packets received: %14"PRIu64 + "\nTotal packets dropped: %15"PRIu64, + total_packets_tx, + total_packets_rx, + total_packets_dropped); + printf("\n====================================================\n"); +} + +/* Send the packet on an output interface */ +static int +l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port) +{ + struct rte_mbuf **m_table; + unsigned ret; + unsigned queueid; + + queueid = (uint16_t) qconf->tx_queue_id; + m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table; + + ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n); + port_statistics[port].tx += ret; + if (unlikely(ret < n)) { + port_statistics[port].dropped += (n - ret); + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < n); + } + + return 0; +} + +/* Send the packet on an output interface */ +static int +l2fwd_send_packet(struct rte_mbuf *m, uint8_t port) +{ + unsigned lcore_id, len; + struct lcore_queue_conf *qconf; + + lcore_id = rte_lcore_id(); + + qconf = &lcore_queue_conf[lcore_id]; + len = qconf->tx_mbufs[port].len; + qconf->tx_mbufs[port].m_table[len] = m; + len++; + + /* enough pkts to be sent */ + if (unlikely(len == MAX_PKT_BURST)) { + l2fwd_send_burst(qconf, MAX_PKT_BURST, port); + len = 0; + } + + qconf->tx_mbufs[port].len = len; + return 0; +} + +static void +l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid) +{ + struct ether_hdr *eth; + void *tmp; + unsigned dst_port; + + dst_port = l2fwd_dst_ports[portid]; + eth = rte_pktmbuf_mtod(m, struct ether_hdr *); + + /* 00:09:c0:00:00:xx */ + tmp = ð->d_addr.addr_bytes[0]; + *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24); + + /* src addr */ + ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], ð->s_addr); + + l2fwd_send_packet(m, (uint8_t) dst_port); +} + +/* main processing loop */ +static void +l2fwd_main_loop(void) +{ + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct rte_mbuf *m; + unsigned lcore_id; + uint64_t prev_tsc = 0; + uint64_t diff_tsc, cur_tsc, timer_tsc; + unsigned i, j, portid, nb_rx; + struct lcore_queue_conf *qconf; + + timer_tsc = 0; + + lcore_id = rte_lcore_id(); + qconf = &lcore_queue_conf[lcore_id]; + + if (qconf->n_rx_queue == 0) { + RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id); + while(1); + } + + RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id); + + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id, + portid); + } + + while (1) { + + cur_tsc = rte_rdtsc(); + + /* + * TX burst queue drain + */ + diff_tsc = cur_tsc - prev_tsc; + if (unlikely(diff_tsc > BURST_TX_DRAIN)) { + + /* this could be optimized (use queueid instead of + * portid), but it is not called so often */ + for (portid = 0; portid < L2FWD_MAX_PORTS; portid++) { + if (qconf->tx_mbufs[portid].len == 0) + continue; + l2fwd_send_burst(&lcore_queue_conf[lcore_id], + qconf->tx_mbufs[portid].len, + (uint8_t) portid); + qconf->tx_mbufs[portid].len = 0; + } + + /* if timer is enabled */ + if (timer_period > 0) { + + /* advance the timer */ + timer_tsc += diff_tsc; + + /* if timer has reached its timeout */ + if (unlikely(timer_tsc >= (uint64_t) timer_period)) { + + /* do this only on master core */ + if (lcore_id == rte_get_master_lcore()) { + print_stats(); + /* reset the timer */ + timer_tsc = 0; + } + } + } + + prev_tsc = cur_tsc; + } + + /* + * Read packet from RX queues + */ + for (i = 0; i < qconf->n_rx_queue; i++) { + + portid = qconf->rx_queue_list[i]; + nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, + pkts_burst, MAX_PKT_BURST); + + port_statistics[portid].rx += nb_rx; + + for (j = 0; j < nb_rx; j++) { + m = pkts_burst[j]; + rte_prefetch0(rte_pktmbuf_mtod(m, void *)); + l2fwd_simple_forward(m, portid); + } + } + } +} + +static int +l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy) +{ + l2fwd_main_loop(); + return 0; +} + +/* display usage */ +static void +l2fwd_usage(const char *prgname) +{ + printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n" + " -p PORTMASK: hexadecimal bitmask of ports to configure\n" + " -q NQ: number of queue (=ports) per lcore (default is 1)\n" + " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n", + prgname); +} + +static int +l2fwd_parse_portmask(const char *portmask) +{ + char *end = NULL; + unsigned long pm; + + /* parse hexadecimal string */ + pm = strtoul(portmask, &end, 16); + if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + + if (pm == 0) + return -1; + + return pm; +} + +static unsigned int +l2fwd_parse_nqueue(const char *q_arg) +{ + char *end = NULL; + unsigned long n; + + /* parse hexadecimal string */ + n = strtoul(q_arg, &end, 10); + if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) + return 0; + if (n == 0) + return 0; + if (n >= MAX_RX_QUEUE_PER_LCORE) + return 0; + + return n; +} + +static int +l2fwd_parse_timer_period(const char *q_arg) +{ + char *end = NULL; + int n; + + /* parse number string */ + n = strtol(q_arg, &end, 10); + if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0')) + return -1; + if (n >= MAX_TIMER_PERIOD) + return -1; + + return n; +} + +/* Parse the argument given in the command line of the application */ +static int +l2fwd_parse_args(int argc, char **argv) +{ + int opt, ret; + char **argvopt; + int option_index; + char *prgname = argv[0]; + static struct option lgopts[] = { + {NULL, 0, 0, 0} + }; + + argvopt = argv; + + while ((opt = getopt_long(argc, argvopt, "p:q:T:", + lgopts, &option_index)) != EOF) { + + switch (opt) { + /* portmask */ + case 'p': + l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg); + if (l2fwd_enabled_port_mask == 0) { + printf("invalid portmask\n"); + l2fwd_usage(prgname); + return -1; + } + break; + + /* nqueue */ + case 'q': + l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg); + if (l2fwd_rx_queue_per_lcore == 0) { + printf("invalid queue number\n"); + l2fwd_usage(prgname); + return -1; + } + break; + + /* timer period */ + case 'T': + timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND; + if (timer_period < 0) { + printf("invalid timer period\n"); + l2fwd_usage(prgname); + return -1; + } + break; + + /* long options */ + case 0: + l2fwd_usage(prgname); + return -1; + + default: + l2fwd_usage(prgname); + return -1; + } + } + + if (optind >= 0) + argv[optind-1] = prgname; + + ret = optind-1; + optind = 0; /* reset getopt lib */ + return ret; +} + +int +MAIN(int argc, char **argv) +{ + struct lcore_queue_conf *qconf; + struct rte_eth_dev_info dev_info; + struct rte_eth_link link; + int ret; + unsigned int nb_ports, nb_lcores; + unsigned portid, last_port, queueid = 0; + unsigned lcore_id, rx_lcore_id; + unsigned n_tx_queue, max_tx_queues; + unsigned nb_ports_in_mask = 0; + + /* init EAL */ + ret = rte_eal_init(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n"); + argc -= ret; + argv += ret; + + /* parse application arguments (after the EAL ones) */ + ret = l2fwd_parse_args(argc, argv); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n"); + + /* create the mbuf pool */ + l2fwd_pktmbuf_pool = + rte_mempool_create("mbuf_pool", NB_MBUF, + MBUF_SIZE, 32, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + SOCKET0, 0); + if (l2fwd_pktmbuf_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n"); + + /* init driver(s) */ +#ifdef RTE_LIBRTE_IGB_PMD + if (rte_igb_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n"); +#endif +#ifdef RTE_LIBRTE_IXGBE_PMD + if (rte_ixgbe_pmd_init() < 0) + rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n"); +#endif + + if (rte_eal_pci_probe() < 0) + rte_exit(EXIT_FAILURE, "Cannot probe PCI\n"); + + nb_ports = rte_eth_dev_count(); + if (nb_ports == 0) + rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n"); + + if (nb_ports > L2FWD_MAX_PORTS) + nb_ports = L2FWD_MAX_PORTS; + + nb_lcores = rte_lcore_count(); + + /* reset l2fwd_dst_ports */ + for (portid = 0; portid < L2FWD_MAX_PORTS; portid++) + l2fwd_dst_ports[portid] = 0; + last_port = 0; + + /* + * Each logical core is assigned a dedicated TX queue on each port. + * Compute the maximum number of TX queues that can be used. + */ + max_tx_queues = nb_lcores; + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) + continue; + + if (nb_ports_in_mask % 2) { + l2fwd_dst_ports[portid] = last_port; + l2fwd_dst_ports[last_port] = portid; + } + else + last_port = portid; + + nb_ports_in_mask++; + + rte_eth_dev_info_get((uint8_t) portid, &dev_info); + if (max_tx_queues > dev_info.max_tx_queues) + max_tx_queues = dev_info.max_tx_queues; + } + + if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2) { + rte_exit(EXIT_FAILURE, "invalid number of ports in portmask. " + "Should be an even number.\n"); + } + + rx_lcore_id = 0; + n_tx_queue = 0; + qconf = NULL; + + /* Initialize the port/queue configuration of each logical core */ + for (portid = 0; portid < nb_ports; portid++) { + /* skip ports that are not enabled */ + if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) + continue; + + /* get the lcore_id for this port */ + while (rte_lcore_is_enabled(rx_lcore_id) == 0 || + lcore_queue_conf[rx_lcore_id].n_rx_queue == + l2fwd_rx_queue_per_lcore) { + + rx_lcore_id++; + if (rx_lcore_id >= RTE_MAX_LCORE) + rte_exit(EXIT_FAILURE, "Not enough cores\n"); + } + if (qconf != &lcore_queue_conf[rx_lcore_id]) { + if (n_tx_queue == max_tx_queues) + rte_exit(EXIT_FAILURE, + "Not enough TX queues\n"); + /* Assigned a new logical core in the loop above. */ + qconf = &lcore_queue_conf[rx_lcore_id]; + qconf->tx_queue_id = n_tx_queue; + n_tx_queue++; + } + qconf->rx_queue_list[qconf->n_rx_queue] = portid; + qconf->n_rx_queue++; + printf("Lcore %u: RX port %u TX queue %u\n", + rx_lcore_id, portid, qconf->tx_queue_id); + } + + /* Initialise each port */ + for (portid = 0; portid < nb_ports; portid++) { + + /* skip ports that are not enabled */ + if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { + printf("Skipping disabled port %u\n", portid); + continue; + } + /* init port */ + printf("Initializing port %u... ", portid); + fflush(stdout); + ret = rte_eth_dev_configure((uint8_t) portid, 1, + (uint16_t) n_tx_queue, &port_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot configure device: " + "err=%d, port=%u\n", + ret, portid); + + rte_eth_macaddr_get((uint8_t) portid, + &l2fwd_ports_eth_addr[portid]); + + /* init one RX queue */ + fflush(stdout); + ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd, + SOCKET0, &rx_conf, + l2fwd_pktmbuf_pool); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " + "err=%d, port=%u\n", + ret, portid); + + /* init one TX queue logical core on each port */ + for (queueid = 0; queueid < n_tx_queue; queueid++) { + fflush(stdout); + ret = rte_eth_tx_queue_setup((uint8_t) portid, + (uint16_t) queueid, nb_txd, + SOCKET0, &tx_conf); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: " + "err=%d, port=%u queue=%u\n", + ret, portid, queueid); + } + + /* Start device */ + ret = rte_eth_dev_start((uint8_t) portid); + if (ret < 0) + rte_exit(EXIT_FAILURE, "rte_eth_dev_start: " + "err=%d, port=%u\n", + ret, portid); + + printf("done: "); + + /* get link status */ + rte_eth_link_get((uint8_t) portid, &link); + if (link.link_status) { + printf(" Link Up - speed %u Mbps - %s\n", + (unsigned) link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + } else { + printf(" Link Down\n"); + } + + rte_eth_promiscuous_enable((uint8_t)portid); + + printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n", + portid, + l2fwd_ports_eth_addr[portid].addr_bytes[0], + l2fwd_ports_eth_addr[portid].addr_bytes[1], + l2fwd_ports_eth_addr[portid].addr_bytes[2], + l2fwd_ports_eth_addr[portid].addr_bytes[3], + l2fwd_ports_eth_addr[portid].addr_bytes[4], + l2fwd_ports_eth_addr[portid].addr_bytes[5]); + + /* initialize port stats */ + memset(&port_statistics, 0, sizeof(port_statistics)); + } + + /* launch per-lcore init on every lcore */ + rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER); + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (rte_eal_wait_lcore(lcore_id) < 0) + return -1; + } + + return 0; +} diff --git a/examples/l2fwd/main.h b/examples/l2fwd/main.h new file mode 100644 index 0000000000..6027cb5455 --- /dev/null +++ b/examples/l2fwd/main.h @@ -0,0 +1,47 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * version: DPDK.L.1.2.3-3 + */ + +#ifndef _MAIN_H_ +#define _MAIN_H_ + +#ifdef RTE_EXEC_ENV_BAREMETAL +#define MAIN _main +#else +#define MAIN main +#endif + +int MAIN(int argc, char **argv); + +#endif /* _MAIN_H_ */ diff --git a/examples/l3fwd-vf/496040_L3Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf b/examples/l3fwd-vf/496040_L3Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4cea1255c1ab6a98e9028141db6d428257eb60b2 GIT binary patch literal 52089 zcmcG!QhqX`J_u+qtXc6-v z+8n))9&?CXQA~oKnSleIeE(o(0Um~hi=Bvx$lllro{x`F!qUbC;KV3lW8?x51DM#G z0{&WZa&{r&W@3~B*qOUn5OJ|FGyWiAW@c3NbO10a7+a~h*a`?ZyEp-iY~f)nwwu*$ zt#O&*zG%PN2iEW3VPk}{>9-6oCm39UXA$-HTOchNHa%y*-lgxzeYLtu0E1ZLNp#~+ z_qrtVhB?xcnGQr?sLKHYE3lVZ1AtEWuC<|H1_G6DR%{_m|` z>2I5?Gljp>=b+#Y+}7M4d8SbrykybGwR-xdC0*mN`6XKl2%^mAYgw?$RHS1AkX_{Y z(YBNyQGC{0?s<;1T;J@JqnGg-oE&k zQ@->tWwlp~IDQhdW2QGBdf*>! zB=e~z1G-N$lU%w72cNuJ&3DnC&@|*M;8!MMxhSv6^w}q~HOwn?hSYrITa!IfD+ffv zc-*OH7^{+7yP{-SeCb=*Q@%0|8Z!y28PXzeE2q!SV?y_cu%c>QfAh5IJzoBpbRp>` zh*?YX?#pV6Hgi?-wN;0#>Rqd)lwUcy1zd2Pbjr(W{yKR>3rE^>QTzVLYY4S`W>sR` z2qStXF?eDHAVFj)YD4rA1R;EhE$b7{361H=m=`qrk=?1A@X~5$xHf`I?77U#I?U4GHmy zVQ3G76Nz&s)MMb9HL#a*3gg@=KU;F|i|to<7UNLIJ0@43px@{VAz{MoZ|IF|$LKTL z6y@hWYmqlfLtUnhKxV?_5&$f9v^tVazR*p=?}+cPjc}9bjf0c}l4GcEKbu#D2iX8% zp#7$R7I+wdo#}t8>aVB2^~u7;@gFFa<6lB$Vdnf>sQ*rY5 z8=@s>X$KQ} zT3^sYH<|TR?P1wJVlxwKG=)`y`%iBzaa9j1}95N>J_+HosGZhg_{|73q;M zMF_Y%Jc;Z&0~fOCXWuJckZ4TwkfF+shb7ae7o?$H4`mB>B6k!AFyaOzg7SK)iusi3 z9Ijz?2ZIBB_nQDG2fTdi%#cA0C%fU3m>zjn!=bDgOv}cZwoo)r}tqOh}*TfFt*7*eJDai0v!UnGRB`ur8{vx_8&V??vNAe`+XLtyMQR2G~22$Wdq z6{_;e)Xt+0G5HCMCQ1d=V#&nAtSH55N>HJ_EEb`7rcwCiZB|sQif7{&0tD@_V zq^4Q+38~4PuD~?P!Jp`@%G#hV++j(!Aq|FsO>NB&4qDx<-fa{gvFhA&SgJf7wsv|F zgYH*){6W(&CYyV8^z!k0yd1lkDfxB5?>b>b3@xrOIfreOhk8|W^Msdn4b^uX3jJx( zVpSSrQ)1&XzFu~l*>62oPxQK` zRw7Y4qW;KaE_t?oCUnt6)I`~%i#fk< zPwdauhRiXytkgzI|M+K~X5TEwb~66e*R|KZb0%)gHM8VS66*(pfHGrcRtwO-?yeO-|PG6{Ve=?WGQ^XvRwWi0|%9-%8k~0kD*T;!bA|@19-m(A3f;2EkGXTRi9R_i1SMMFs93!!L8LAfD`{4*U!{ z8#xs@hB>pRUwStc#D7fpswzuD9IuYUz;szX>lVa@#n-3uI|KfbjX!7hIIcNS4 zIA{MC9WaIj;{F3S4d1H?1hTR4e+)>$SgP#T6c# zHXDi<^?C_Sm459+%z@*gepdLMiUbW9asc$`0Cfu&uWVV>Wg{v>kJY;T=$NwVnf&iS zgn`%Bgwh4rzOx^-OGDi0xS4Q(qxLDsn{h$cYGSaa&PU*YM~$=e((LzSgT6CR`U+A7 z=QrsfhH?qj!EB?l5!RWmJhz;tB6j-SP#j^x3M22RZ+L)aUBcWvDUD9&=mNR#pj~~C zQr|Cor6zH<@bBPP9M#Ym*=pWtsN1{~3@uou8Li*E9@e$J8yb|G#V5z7kx3KWYGmPM zc`bgU_Ow=kCD78P{>Hlw7o`0q?BlZbkJdexT5?myUTHAl2K&$RIdKGEN17!Kpx$mF zTO$`*GXmgN7Jxyy?=m7iL=VUOn@(6W#Gk;u~yT2YiwC12HyGsr{n|{U;R) zR<3_}5DV+yi4VuWbD-)Z;lJZ!@wz*EqF6IBawHJ>QB`-OTKW9CobZ#5vMBb9&#-R z+13;t{Y}U6712+=ojgjIu?+{u_@l?3cTV5)uo0UG&l29$20u7PfS8m%rR#Yq$aP*{ zZ3(|W?Yl4S`|NXz+7V%q>*%IMDhOWs&4Lei6_dI_b*Gr^nQ=rXiW+$lGP_H(CFdWC zt(>Ym!SInKgrz!<>sfsOKr%7G{mH`ax2j?ec;rDPMS zWt;bJk%-t!r9K+|9-7oec9qTjG!of_k{5BGLuxvKj>eRBFpU@0>ZG)!ml#Lt%$W;J zxdGk}+WcOIrP~V6ih9qSDgzMIG@O?hql%kQI@wT0mbzb`VbliKGYyk`?IFEA$hrxtHO|h>6 z9cN|4-6>Xa|3S%hk3J&mgeo+J;AWZf%6_w=qsUt`ST=%lLxPqjE&0dWV-=y`=j|it(s^3`WxFlmfPg@+XYlLQ zLb`&#|EI&f`f|4Wmskg{mE-5jdD%|ihv$f`_=o-W+tTu8m{E&bn~|>VmTh@ejm@At z2)aMY^1c_4Q$MXpIPKq87VlynvkF8sR zaBeLjhQGK1qJh8Y92V&H_(3n^7%H z=;?Z15fY+GC=ns|!X>oU?h>MSy#R!MFu||d?U@P;187+}1xj4ng&2FH8-o%igm@JH zZLMfPSHM(WXROa7V#@AvPAxedkJu4@p?p`!*V~@eQ>kOWJ-YCCD4=VU8~#8p7P-dV zlcHFF)4|8RlMWMgQdF6}+r4dnWXDVXkPJ5x$Bvr!De?2D7Fo~Agbr&6dI5kJgWu78x8089XO_(O)G0765z0NS7B}>I-lXitjEVldW zau$ms%OzEQk~)b%%jJLt$knQq7ChJX4U4 zlq(xe zx*Enm_3f=A)r0fvJWU9+*10H3mZ_Eb%zew;@ZETS@E~)oau0SHtUlLrt83`Hg0HJz z8LDfbyG*LI?_DK{TT1_Yk+mImJ?cDD_VI^4p|zH+=Izs)7qM$TQ*Pxcd&_L7*eSfG z1V!lvsN=+9o?Qx^C|f3Vb}yoXphuy*{>ug6#DgT)ax-axT`!whiRPx}Pp^ z9Q8C>vnlslT>RK%ClGIt_mPg+7)l@Nlh29Sh_h1{t^3xyIPp8^-wXWgV|u^;b0oFH z`gF2CIdj`}%_E%4*E_L3BgMO3U3lXY@0+wyn)c~c`F=uO5)85&i~|pu^N{4-%AXle z+Kv&%UcG#Njh^O2R=L40n)_VS%;U*3w36C(h`(w&TDt8wy_S#VIw}#|UN_>0l3k-x zFn&bZ6m^Bi@SX{Pd9+g9W>Tr%nj&$ob}YBtYCCFgJ4D@>KPybQZv zO~*=w*=k#-Wt$7RLX~_*IcPoKQzbMo-t<5$>%2ob81ZIHy^dgH#<{lnox3Dh+2Z3c zaX&ykBU#2rnCs;3vd;4db+jh`{iR!;5t*N9TI;l0VJqb7M(*gRH!b0|&d%b3R&sWw zkF&zFPg~-5nfvG8X|t&yN%udVlmAK4kd6EQq|NM1f7cD!x&K|*;gce3r^5^<-u4SW z#9u}zK}lm)N$?aQXu9)Tzf;JhPZLjRMzZ^3qA?@G6#6VoDJqCr8~A|CxSp%4pF;W( zpu6Vlow(i2VcBm-p=TQ^=V(u>MOB?x^X2BGUoUJoyDps<(GZR2ZpsMLlI_;7kV(zM zj3E=qL0dwMfeRqv;3fA-4VFRWs30+nhUuU+ra!xupByM`fO!_T7?c#Z%pSnG6vJ~| z*Ms#50S5+?ZhQttzyFPX2HUoLwJlT(i>_K2h2R=lNu||-ST4>>>=WT80Ru=D5L&}) zK%Y?e4QOoo{8Ns5g_Xj88!g;{TMTh@l|W)0c?z0h@(4V!HnFwp0>(~d{%G{Lnxt=F z(EH&c_x@+H?OG5D6F#9KyVflL`6H6%yiaB1 z3xRFsL3-EwgRc3btYig4pk5Z1aJ3{fJ_ZraM1T^t*11w$RE`${e6eD}ugbhU`63S3 z3x3vW)#{@IZW2jHa*Ew3el*8gGYluKFbq&`gl(lAmX#hD-u4YumpwW7r%#Ar_wjRQ zj`I2@PQW`XKDTe`19ZPGz0t!V6;Ib>L+JCZ_yV}<4|i=Q!oC_20e~c#l5ukG}OE6@33s*^-&*?|}B-7JOM6nbesSnHrgt znOOg;GA%>*!*z0ob}n9qPRe)LJj(Yi7c1j*Rs-5f%GMyq-6{Gk(90*#JsRB{i z&OE$&g_OOFu7!-1TWN=#ky{Yl0%jHhwSZq3DOu>QT^{7Ggxr6GN&TNp`CoIp|85y_ zuyV2g&lX{d&ZHBGblTP*+D)qkWPmc+ZQLn-q+*nvbUCoAoAw4B5>X@#7+U<4(&xyO z!jxIB<8;Tf7tlnt9WHH^vPa0fZ(nZI*8N(z!r&^u{mdQ91dYZbU{OXl#($EelC~`~ zmp}K@4Wp~0i5`$S{v2&Kc#^+$SA8_W6RBN6YwTBr`Wl2t$g}ZFz7=6-zgbQ%B~4tt zvwWSHM712Z`595w}5Z@(rPBjkP zIOXh~xUUDXzF!_k%gNm(l}trXgBfhIrE}CEtBf~rRUC2G?(m|L+PKkIUIgQh;l!ta zLNFGMwtEkSD~wcVP;9B%#-brvLxQR6n9^o7zhsrz4f9~4gPhtBP18LNc5zr8b=P^^ zsFs4TF+p}zf!8P%PzVIB6eOC=s*|Rac~n4IOWd&u%-9Z;C8^z{Jd5QSEvpqM-XB|- zRUER5k`t<{?RuU>RvCbRlq6@Gr@_oQtVo!WhWRV0Mh7`9nsry9-#z9h5qC|)BzaA_ z8t@4;Co0z7PTXa1r`=KQR3%j)9KuuLFANzzhK~bqHuZqnpO}a2Vc-h2aH6}l(Z!XbWOWZ;P!CjV z>~hIXX#w_Tl7L_0{f&942 z1qNSKN>ZwV%ZWs=6cFSW6Qtx08qhOpZpT*9N*q8kghgbExG)|UBb4c}I3lHhU>%8u zPjsOYhC!l>>Vq?f;K6-k9(wYZk?2&_BptnG(L_w;@giuTY%qcJ-%qg2M6W(kB-G4; zcnX1i+v4Wm>cxi00n8Ed1r%bVG%J)+-3bBXsqLiTZ=4j;f+$UGAS3ramheKW5X%EHhX(C&~W6d)}@a_gDQ zdRLmL#KDqI!1F9~e8QwvLL=Ou*I;#@G zZq10#*B4i%Xb%`oBnkkPWAii0Dsv}Cs+mIpPoU;$E%r;Xdgm$DKfx6=)3A`mGdsC5 zR4$53ptgzsfVdZHmZp_b+7`kHEzdKeq0y1c_rM?=2ukrq7b`XadF%8HZpoN9jv8^6+3IvL zHlsFg1}Uy^2KoN`bYo30c-sk+q!_X(!ID%nua$8qBj?@w&Yzln8DD94-#> z8#m6+GraPLR*jd